Path: blob/main/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
48378 views
// SPDX-License-Identifier: CDDL-1.01/*2* CDDL HEADER START3*4* The contents of this file are subject to the terms of the5* Common Development and Distribution License (the "License").6* You may not use this file except in compliance with the License.7*8* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE9* or https://opensource.org/licenses/CDDL-1.0.10* See the License for the specific language governing permissions11* and limitations under the License.12*13* When distributing Covered Code, include this CDDL HEADER in each14* file and include the License file at usr/src/OPENSOLARIS.LICENSE.15* If applicable, add the following below this CDDL HEADER, with the16* fields enclosed by brackets "[]" replaced with your own identifying17* information: Portions Copyright [yyyy] [name of copyright owner]18*19* CDDL HEADER END20*/2122/*23* Copyright 2015 Nexenta Systems, Inc. All rights reserved.24* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.25* Copyright (c) 2011, 2024 by Delphix. All rights reserved.26* Copyright 2016 Igor Kozhukhov <[email protected]>27* Copyright (c) 2018 Datto Inc.28* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.29* Copyright (c) 2017, Intel Corporation.30* Copyright (c) 2018, loli10K <[email protected]>31* Copyright (c) 2021, Colm Buckley <[email protected]>32* Copyright (c) 2021, 2023, Klara Inc.33* Copyright (c) 2025 Hewlett Packard Enterprise Development LP.34*/3536#include <errno.h>37#include <libintl.h>38#include <stdio.h>39#include <stdlib.h>40#include <strings.h>41#include <unistd.h>42#include <libgen.h>43#include <zone.h>44#include <sys/stat.h>45#include <sys/efi_partition.h>46#include <sys/systeminfo.h>47#include <sys/zfs_ioctl.h>48#include <sys/zfs_sysfs.h>49#include <sys/vdev_disk.h>50#include <sys/types.h>51#include <dlfcn.h>52#include <libzutil.h>53#include <fcntl.h>5455#include "zfs_namecheck.h"56#include "zfs_prop.h"57#include "libzfs_impl.h"58#include "zfs_comutil.h"59#include "zfeature_common.h"6061static boolean_t zpool_vdev_is_interior(const char *name);6263typedef struct prop_flags {64unsigned int create:1; /* Validate property on creation */65unsigned int import:1; /* Validate property on import */66unsigned int vdevprop:1; /* Validate property as a VDEV property */67} prop_flags_t;6869/*70* ====================================================================71* zpool property functions72* ====================================================================73*/7475static int76zpool_get_all_props(zpool_handle_t *zhp)77{78zfs_cmd_t zc = {"\0"};79libzfs_handle_t *hdl = zhp->zpool_hdl;8081(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));8283if (zhp->zpool_n_propnames > 0) {84nvlist_t *innvl = fnvlist_alloc();85fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES,86zhp->zpool_propnames, zhp->zpool_n_propnames);87zcmd_write_src_nvlist(hdl, &zc, innvl);88fnvlist_free(innvl);89}9091zcmd_alloc_dst_nvlist(hdl, &zc, 0);9293while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {94if (errno == ENOMEM)95zcmd_expand_dst_nvlist(hdl, &zc);96else {97zcmd_free_nvlists(&zc);98return (-1);99}100}101102if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {103zcmd_free_nvlists(&zc);104return (-1);105}106107zcmd_free_nvlists(&zc);108109return (0);110}111112int113zpool_props_refresh(zpool_handle_t *zhp)114{115nvlist_t *old_props;116117old_props = zhp->zpool_props;118119if (zpool_get_all_props(zhp) != 0)120return (-1);121122nvlist_free(old_props);123return (0);124}125126static const char *127zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,128zprop_source_t *src)129{130nvlist_t *nv, *nvl;131const char *value;132zprop_source_t source;133134nvl = zhp->zpool_props;135if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {136source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);137value = fnvlist_lookup_string(nv, ZPROP_VALUE);138} else {139source = ZPROP_SRC_DEFAULT;140if ((value = zpool_prop_default_string(prop)) == NULL)141value = "-";142}143144if (src)145*src = source;146147return (value);148}149150uint64_t151zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)152{153nvlist_t *nv, *nvl;154uint64_t value;155zprop_source_t source;156157if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {158/*159* zpool_get_all_props() has most likely failed because160* the pool is faulted, but if all we need is the top level161* vdev's guid then get it from the zhp config nvlist.162*/163if ((prop == ZPOOL_PROP_GUID) &&164(nvlist_lookup_nvlist(zhp->zpool_config,165ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&166(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)167== 0)) {168return (value);169}170return (zpool_prop_default_numeric(prop));171}172173nvl = zhp->zpool_props;174if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {175source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);176value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);177} else {178source = ZPROP_SRC_DEFAULT;179value = zpool_prop_default_numeric(prop);180}181182if (src)183*src = source;184185return (value);186}187188/*189* Map VDEV STATE to printed strings.190*/191const char *192zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)193{194switch (state) {195case VDEV_STATE_CLOSED:196case VDEV_STATE_OFFLINE:197return (gettext("OFFLINE"));198case VDEV_STATE_REMOVED:199return (gettext("REMOVED"));200case VDEV_STATE_CANT_OPEN:201if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)202return (gettext("FAULTED"));203else if (aux == VDEV_AUX_SPLIT_POOL)204return (gettext("SPLIT"));205else206return (gettext("UNAVAIL"));207case VDEV_STATE_FAULTED:208return (gettext("FAULTED"));209case VDEV_STATE_DEGRADED:210return (gettext("DEGRADED"));211case VDEV_STATE_HEALTHY:212return (gettext("ONLINE"));213214default:215break;216}217218return (gettext("UNKNOWN"));219}220221/*222* Map POOL STATE to printed strings.223*/224const char *225zpool_pool_state_to_name(pool_state_t state)226{227switch (state) {228default:229break;230case POOL_STATE_ACTIVE:231return (gettext("ACTIVE"));232case POOL_STATE_EXPORTED:233return (gettext("EXPORTED"));234case POOL_STATE_DESTROYED:235return (gettext("DESTROYED"));236case POOL_STATE_SPARE:237return (gettext("SPARE"));238case POOL_STATE_L2CACHE:239return (gettext("L2CACHE"));240case POOL_STATE_UNINITIALIZED:241return (gettext("UNINITIALIZED"));242case POOL_STATE_UNAVAIL:243return (gettext("UNAVAIL"));244case POOL_STATE_POTENTIALLY_ACTIVE:245return (gettext("POTENTIALLY_ACTIVE"));246}247248return (gettext("UNKNOWN"));249}250251/*252* Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",253* "SUSPENDED", etc).254*/255const char *256zpool_get_state_str(zpool_handle_t *zhp)257{258zpool_errata_t errata;259zpool_status_t status;260const char *str;261262status = zpool_get_status(zhp, NULL, &errata);263264if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {265str = gettext("FAULTED");266} else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||267status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||268status == ZPOOL_STATUS_IO_FAILURE_MMP) {269str = gettext("SUSPENDED");270} else {271nvlist_t *nvroot = fnvlist_lookup_nvlist(272zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);273uint_t vsc;274vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(275nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);276str = zpool_state_to_name(vs->vs_state, vs->vs_aux);277}278return (str);279}280281/*282* Get a zpool property value for 'prop' and return the value in283* a pre-allocated buffer.284*/285int286zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,287size_t len, zprop_source_t *srctype, boolean_t literal)288{289uint64_t intval;290const char *strval;291zprop_source_t src = ZPROP_SRC_NONE;292293if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {294switch (prop) {295case ZPOOL_PROP_NAME:296(void) strlcpy(buf, zpool_get_name(zhp), len);297break;298299case ZPOOL_PROP_HEALTH:300(void) strlcpy(buf, zpool_get_state_str(zhp), len);301break;302303case ZPOOL_PROP_GUID:304intval = zpool_get_prop_int(zhp, prop, &src);305(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);306break;307308case ZPOOL_PROP_ALTROOT:309case ZPOOL_PROP_CACHEFILE:310case ZPOOL_PROP_COMMENT:311case ZPOOL_PROP_COMPATIBILITY:312if (zhp->zpool_props != NULL ||313zpool_get_all_props(zhp) == 0) {314(void) strlcpy(buf,315zpool_get_prop_string(zhp, prop, &src),316len);317break;318}319zfs_fallthrough;320default:321(void) strlcpy(buf, "-", len);322break;323}324325if (srctype != NULL)326*srctype = src;327return (0);328}329330/*331* ZPOOL_PROP_DEDUPCACHED can be fetched by name only using332* the ZPOOL_GET_PROPS_NAMES mechanism333*/334if (prop == ZPOOL_PROP_DEDUPCACHED) {335zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);336(void) zpool_props_refresh(zhp);337}338339if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&340prop != ZPOOL_PROP_NAME)341return (-1);342343switch (zpool_prop_get_type(prop)) {344case PROP_TYPE_STRING:345(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),346len);347break;348349case PROP_TYPE_NUMBER:350intval = zpool_get_prop_int(zhp, prop, &src);351352switch (prop) {353case ZPOOL_PROP_DEDUP_TABLE_QUOTA:354/*355* If dedup quota is 0, we translate this into 'none'356* (unless literal is set). And if it is UINT64_MAX357* we translate that as 'automatic' (limit to size of358* the dedicated dedup VDEV. Otherwise, fall throught359* into the regular number formating.360*/361if (intval == 0) {362(void) strlcpy(buf, literal ? "0" : "none",363len);364break;365} else if (intval == UINT64_MAX) {366(void) strlcpy(buf, "auto", len);367break;368}369zfs_fallthrough;370371case ZPOOL_PROP_SIZE:372case ZPOOL_PROP_ALLOCATED:373case ZPOOL_PROP_FREE:374case ZPOOL_PROP_FREEING:375case ZPOOL_PROP_LEAKED:376case ZPOOL_PROP_ASHIFT:377case ZPOOL_PROP_MAXBLOCKSIZE:378case ZPOOL_PROP_MAXDNODESIZE:379case ZPOOL_PROP_BCLONESAVED:380case ZPOOL_PROP_BCLONEUSED:381case ZPOOL_PROP_DEDUP_TABLE_SIZE:382case ZPOOL_PROP_DEDUPCACHED:383if (literal)384(void) snprintf(buf, len, "%llu",385(u_longlong_t)intval);386else387(void) zfs_nicenum(intval, buf, len);388break;389390case ZPOOL_PROP_EXPANDSZ:391case ZPOOL_PROP_CHECKPOINT:392if (intval == 0) {393(void) strlcpy(buf, "-", len);394} else if (literal) {395(void) snprintf(buf, len, "%llu",396(u_longlong_t)intval);397} else {398(void) zfs_nicebytes(intval, buf, len);399}400break;401402case ZPOOL_PROP_CAPACITY:403if (literal) {404(void) snprintf(buf, len, "%llu",405(u_longlong_t)intval);406} else {407(void) snprintf(buf, len, "%llu%%",408(u_longlong_t)intval);409}410break;411412case ZPOOL_PROP_FRAGMENTATION:413if (intval == UINT64_MAX) {414(void) strlcpy(buf, "-", len);415} else if (literal) {416(void) snprintf(buf, len, "%llu",417(u_longlong_t)intval);418} else {419(void) snprintf(buf, len, "%llu%%",420(u_longlong_t)intval);421}422break;423424case ZPOOL_PROP_BCLONERATIO:425case ZPOOL_PROP_DEDUPRATIO:426if (literal)427(void) snprintf(buf, len, "%llu.%02llu",428(u_longlong_t)(intval / 100),429(u_longlong_t)(intval % 100));430else431(void) snprintf(buf, len, "%llu.%02llux",432(u_longlong_t)(intval / 100),433(u_longlong_t)(intval % 100));434break;435436case ZPOOL_PROP_HEALTH:437(void) strlcpy(buf, zpool_get_state_str(zhp), len);438break;439case ZPOOL_PROP_VERSION:440if (intval >= SPA_VERSION_FEATURES) {441(void) snprintf(buf, len, "-");442break;443}444zfs_fallthrough;445default:446(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);447}448break;449450case PROP_TYPE_INDEX:451intval = zpool_get_prop_int(zhp, prop, &src);452if (zpool_prop_index_to_string(prop, intval, &strval)453!= 0)454return (-1);455(void) strlcpy(buf, strval, len);456break;457458default:459abort();460}461462if (srctype)463*srctype = src;464465return (0);466}467468/*469* Get a zpool property value for 'propname' and return the value in470* a pre-allocated buffer.471*/472int473zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,474size_t len, zprop_source_t *srctype)475{476nvlist_t *nv;477uint64_t ival;478const char *value;479zprop_source_t source = ZPROP_SRC_LOCAL;480481if (zhp->zpool_props == NULL)482zpool_get_all_props(zhp);483484if (nvlist_lookup_nvlist(zhp->zpool_props, propname, &nv) == 0) {485if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)486source = ival;487verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);488} else {489source = ZPROP_SRC_DEFAULT;490value = "-";491}492493if (srctype)494*srctype = source;495496(void) strlcpy(buf, value, len);497498return (0);499}500501/*502* Check if the bootfs name has the same pool name as it is set to.503* Assuming bootfs is a valid dataset name.504*/505static boolean_t506bootfs_name_valid(const char *pool, const char *bootfs)507{508int len = strlen(pool);509if (bootfs[0] == '\0')510return (B_TRUE);511512if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))513return (B_FALSE);514515if (strncmp(pool, bootfs, len) == 0 &&516(bootfs[len] == '/' || bootfs[len] == '\0'))517return (B_TRUE);518519return (B_FALSE);520}521522/*523* Given an nvlist of zpool properties to be set, validate that they are524* correct, and parse any numeric properties (index, boolean, etc) if they are525* specified as strings.526*/527static nvlist_t *528zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,529nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)530{531nvpair_t *elem;532nvlist_t *retprops;533zpool_prop_t prop;534const char *strval;535uint64_t intval;536const char *check;537struct stat64 statbuf;538zpool_handle_t *zhp;539char *parent, *slash;540char report[1024];541542if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {543(void) no_memory(hdl);544return (NULL);545}546547elem = NULL;548while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {549const char *propname = nvpair_name(elem);550551if (flags.vdevprop && zpool_prop_vdev(propname)) {552vdev_prop_t vprop = vdev_name_to_prop(propname);553554if (vdev_prop_readonly(vprop)) {555zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "556"is readonly"), propname);557(void) zfs_error(hdl, EZFS_PROPREADONLY,558errbuf);559goto error;560}561562if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,563retprops, &strval, &intval, errbuf) != 0)564goto error;565566continue;567} else if (flags.vdevprop && vdev_prop_user(propname)) {568if (nvlist_add_nvpair(retprops, elem) != 0) {569(void) no_memory(hdl);570goto error;571}572continue;573} else if (flags.vdevprop) {574zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,575"invalid property: '%s'"), propname);576(void) zfs_error(hdl, EZFS_BADPROP, errbuf);577goto error;578}579580prop = zpool_name_to_prop(propname);581if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {582int err;583char *fname = strchr(propname, '@') + 1;584585err = zfeature_lookup_name(fname, NULL);586if (err != 0) {587ASSERT3U(err, ==, ENOENT);588zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,589"feature '%s' unsupported by kernel"),590fname);591(void) zfs_error(hdl, EZFS_BADPROP, errbuf);592goto error;593}594595if (nvpair_type(elem) != DATA_TYPE_STRING) {596zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,597"'%s' must be a string"), propname);598(void) zfs_error(hdl, EZFS_BADPROP, errbuf);599goto error;600}601602(void) nvpair_value_string(elem, &strval);603if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&604strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {605zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,606"property '%s' can only be set to "607"'enabled' or 'disabled'"), propname);608(void) zfs_error(hdl, EZFS_BADPROP, errbuf);609goto error;610}611612if (!flags.create &&613strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {614zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,615"property '%s' can only be set to "616"'disabled' at creation time"), propname);617(void) zfs_error(hdl, EZFS_BADPROP, errbuf);618goto error;619}620621if (nvlist_add_uint64(retprops, propname, 0) != 0) {622(void) no_memory(hdl);623goto error;624}625continue;626} else if (prop == ZPOOL_PROP_INVAL &&627zfs_prop_user(propname)) {628/*629* This is a user property: make sure it's a630* string, and that it's less than ZAP_MAXNAMELEN.631*/632if (nvpair_type(elem) != DATA_TYPE_STRING) {633zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,634"'%s' must be a string"), propname);635(void) zfs_error(hdl, EZFS_BADPROP, errbuf);636goto error;637}638639if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {640zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,641"property name '%s' is too long"),642propname);643(void) zfs_error(hdl, EZFS_BADPROP, errbuf);644goto error;645}646647(void) nvpair_value_string(elem, &strval);648649if (strlen(strval) >= ZFS_MAXPROPLEN) {650zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,651"property value '%s' is too long"),652strval);653(void) zfs_error(hdl, EZFS_BADPROP, errbuf);654goto error;655}656657if (nvlist_add_string(retprops, propname,658strval) != 0) {659(void) no_memory(hdl);660goto error;661}662663continue;664}665666/*667* Make sure this property is valid and applies to this type.668*/669if (prop == ZPOOL_PROP_INVAL) {670zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,671"invalid property '%s'"), propname);672(void) zfs_error(hdl, EZFS_BADPROP, errbuf);673goto error;674}675676if (zpool_prop_readonly(prop)) {677zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "678"is readonly"), propname);679(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);680goto error;681}682683if (!flags.create && zpool_prop_setonce(prop)) {684zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,685"property '%s' can only be set at "686"creation time"), propname);687(void) zfs_error(hdl, EZFS_BADPROP, errbuf);688goto error;689}690691if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,692&strval, &intval, errbuf) != 0)693goto error;694695/*696* Perform additional checking for specific properties.697*/698switch (prop) {699case ZPOOL_PROP_VERSION:700if (intval < version ||701!SPA_VERSION_IS_SUPPORTED(intval)) {702zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,703"property '%s' number %llu is invalid."),704propname, (unsigned long long)intval);705(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);706goto error;707}708break;709710case ZPOOL_PROP_ASHIFT:711if (intval != 0 &&712(intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {713zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,714"property '%s' number %llu is invalid, "715"only values between %" PRId32 " and %"716PRId32 " are allowed."),717propname, (unsigned long long)intval,718ASHIFT_MIN, ASHIFT_MAX);719(void) zfs_error(hdl, EZFS_BADPROP, errbuf);720goto error;721}722break;723724case ZPOOL_PROP_BOOTFS:725if (flags.create || flags.import) {726zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,727"property '%s' cannot be set at creation "728"or import time"), propname);729(void) zfs_error(hdl, EZFS_BADPROP, errbuf);730goto error;731}732733if (version < SPA_VERSION_BOOTFS) {734zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,735"pool must be upgraded to support "736"'%s' property"), propname);737(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);738goto error;739}740741/*742* bootfs property value has to be a dataset name and743* the dataset has to be in the same pool as it sets to.744*/745if (!bootfs_name_valid(poolname, strval)) {746zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "747"is an invalid name"), strval);748(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);749goto error;750}751752if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {753zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,754"could not open pool '%s'"), poolname);755(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);756goto error;757}758zpool_close(zhp);759break;760761case ZPOOL_PROP_ALTROOT:762if (!flags.create && !flags.import) {763zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,764"property '%s' can only be set during pool "765"creation or import"), propname);766(void) zfs_error(hdl, EZFS_BADPROP, errbuf);767goto error;768}769770if (strval[0] != '/') {771zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,772"bad alternate root '%s'"), strval);773(void) zfs_error(hdl, EZFS_BADPATH, errbuf);774goto error;775}776break;777778case ZPOOL_PROP_CACHEFILE:779if (strval[0] == '\0')780break;781782if (strcmp(strval, "none") == 0)783break;784785if (strval[0] != '/') {786zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,787"property '%s' must be empty, an "788"absolute path, or 'none'"), propname);789(void) zfs_error(hdl, EZFS_BADPATH, errbuf);790goto error;791}792793parent = strdup(strval);794if (parent == NULL) {795(void) zfs_error(hdl, EZFS_NOMEM, errbuf);796goto error;797}798slash = strrchr(parent, '/');799800if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||801strcmp(slash, "/..") == 0) {802zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,803"'%s' is not a valid file"), parent);804(void) zfs_error(hdl, EZFS_BADPATH, errbuf);805free(parent);806goto error;807}808809*slash = '\0';810811if (parent[0] != '\0' &&812(stat64(parent, &statbuf) != 0 ||813!S_ISDIR(statbuf.st_mode))) {814zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,815"'%s' is not a valid directory"),816parent);817(void) zfs_error(hdl, EZFS_BADPATH, errbuf);818free(parent);819goto error;820}821free(parent);822823break;824825case ZPOOL_PROP_COMPATIBILITY:826switch (zpool_load_compat(strval, NULL, report, 1024)) {827case ZPOOL_COMPATIBILITY_OK:828case ZPOOL_COMPATIBILITY_WARNTOKEN:829break;830case ZPOOL_COMPATIBILITY_BADFILE:831case ZPOOL_COMPATIBILITY_BADTOKEN:832case ZPOOL_COMPATIBILITY_NOFILES:833zfs_error_aux(hdl, "%s", report);834(void) zfs_error(hdl, EZFS_BADPROP, errbuf);835goto error;836}837break;838839case ZPOOL_PROP_COMMENT:840for (check = strval; *check != '\0'; check++) {841if (!isprint(*check)) {842zfs_error_aux(hdl,843dgettext(TEXT_DOMAIN,844"comment may only have printable "845"characters"));846(void) zfs_error(hdl, EZFS_BADPROP,847errbuf);848goto error;849}850}851if (strlen(strval) > ZPROP_MAX_COMMENT) {852zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,853"comment must not exceed %d characters"),854ZPROP_MAX_COMMENT);855(void) zfs_error(hdl, EZFS_BADPROP, errbuf);856goto error;857}858break;859case ZPOOL_PROP_READONLY:860if (!flags.import) {861zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,862"property '%s' can only be set at "863"import time"), propname);864(void) zfs_error(hdl, EZFS_BADPROP, errbuf);865goto error;866}867break;868case ZPOOL_PROP_MULTIHOST:869if (get_system_hostid() == 0) {870zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,871"requires a non-zero system hostid"));872(void) zfs_error(hdl, EZFS_BADPROP, errbuf);873goto error;874}875break;876case ZPOOL_PROP_DEDUPDITTO:877printf("Note: property '%s' no longer has "878"any effect\n", propname);879break;880881default:882break;883}884}885886return (retprops);887error:888nvlist_free(retprops);889return (NULL);890}891892/*893* Set zpool property : propname=propval.894*/895int896zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)897{898zfs_cmd_t zc = {"\0"};899int ret;900char errbuf[ERRBUFLEN];901nvlist_t *nvl = NULL;902nvlist_t *realprops;903uint64_t version;904prop_flags_t flags = { 0 };905906(void) snprintf(errbuf, sizeof (errbuf),907dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),908zhp->zpool_name);909910if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)911return (no_memory(zhp->zpool_hdl));912913if (nvlist_add_string(nvl, propname, propval) != 0) {914nvlist_free(nvl);915return (no_memory(zhp->zpool_hdl));916}917918version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);919if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,920zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {921nvlist_free(nvl);922return (-1);923}924925nvlist_free(nvl);926nvl = realprops;927928/*929* Execute the corresponding ioctl() to set this property.930*/931(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));932933zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);934935ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);936937zcmd_free_nvlists(&zc);938nvlist_free(nvl);939940if (ret)941(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);942else943(void) zpool_props_refresh(zhp);944945return (ret);946}947948int949zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,950zfs_type_t type, boolean_t literal)951{952libzfs_handle_t *hdl = zhp->zpool_hdl;953zprop_list_t *entry;954char buf[ZFS_MAXPROPLEN];955nvlist_t *features = NULL;956nvpair_t *nvp;957zprop_list_t **last;958boolean_t firstexpand = (NULL == *plp);959int i;960961if (zprop_expand_list(hdl, plp, type) != 0)962return (-1);963964if (type == ZFS_TYPE_VDEV)965return (0);966967last = plp;968while (*last != NULL)969last = &(*last)->pl_next;970971if ((*plp)->pl_all)972features = zpool_get_features(zhp);973974if ((*plp)->pl_all && firstexpand) {975/* Handle userprops in the all properties case */976if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))977return (-1);978979nvp = NULL;980while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=981NULL) {982const char *propname = nvpair_name(nvp);983984if (!zfs_prop_user(propname))985continue;986987entry = zfs_alloc(hdl, sizeof (zprop_list_t));988entry->pl_prop = ZPROP_USERPROP;989entry->pl_user_prop = zfs_strdup(hdl, propname);990entry->pl_width = strlen(entry->pl_user_prop);991entry->pl_all = B_TRUE;992993*last = entry;994last = &entry->pl_next;995}996997for (i = 0; i < SPA_FEATURES; i++) {998entry = zfs_alloc(hdl, sizeof (zprop_list_t));999entry->pl_prop = ZPROP_USERPROP;1000entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",1001spa_feature_table[i].fi_uname);1002entry->pl_width = strlen(entry->pl_user_prop);1003entry->pl_all = B_TRUE;10041005*last = entry;1006last = &entry->pl_next;1007}1008}10091010/* add any unsupported features */1011for (nvp = nvlist_next_nvpair(features, NULL);1012nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {1013char *propname;1014boolean_t found;10151016if (zfeature_is_supported(nvpair_name(nvp)))1017continue;10181019propname = zfs_asprintf(hdl, "unsupported@%s",1020nvpair_name(nvp));10211022/*1023* Before adding the property to the list make sure that no1024* other pool already added the same property.1025*/1026found = B_FALSE;1027entry = *plp;1028while (entry != NULL) {1029if (entry->pl_user_prop != NULL &&1030strcmp(propname, entry->pl_user_prop) == 0) {1031found = B_TRUE;1032break;1033}1034entry = entry->pl_next;1035}1036if (found) {1037free(propname);1038continue;1039}10401041entry = zfs_alloc(hdl, sizeof (zprop_list_t));1042entry->pl_prop = ZPROP_USERPROP;1043entry->pl_user_prop = propname;1044entry->pl_width = strlen(entry->pl_user_prop);1045entry->pl_all = B_TRUE;10461047*last = entry;1048last = &entry->pl_next;1049}10501051for (entry = *plp; entry != NULL; entry = entry->pl_next) {1052if (entry->pl_fixed && !literal)1053continue;10541055if (entry->pl_prop != ZPROP_USERPROP &&1056zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),1057NULL, literal) == 0) {1058if (strlen(buf) > entry->pl_width)1059entry->pl_width = strlen(buf);1060} else if (entry->pl_prop == ZPROP_INVAL &&1061zfs_prop_user(entry->pl_user_prop) &&1062zpool_get_userprop(zhp, entry->pl_user_prop, buf,1063sizeof (buf), NULL) == 0) {1064if (strlen(buf) > entry->pl_width)1065entry->pl_width = strlen(buf);1066}1067}10681069return (0);1070}10711072int1073vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,1074zprop_list_t **plp)1075{1076zprop_list_t *entry;1077char buf[ZFS_MAXPROPLEN];1078const char *strval = NULL;1079int err = 0;1080nvpair_t *elem = NULL;1081nvlist_t *vprops = NULL;1082nvlist_t *propval = NULL;1083const char *propname;1084vdev_prop_t prop;1085zprop_list_t **last;10861087for (entry = *plp; entry != NULL; entry = entry->pl_next) {1088if (entry->pl_fixed)1089continue;10901091if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,1092entry->pl_user_prop, buf, sizeof (buf), NULL,1093B_FALSE) == 0) {1094if (strlen(buf) > entry->pl_width)1095entry->pl_width = strlen(buf);1096}1097if (entry->pl_prop == VDEV_PROP_NAME &&1098strlen(vdevname) > entry->pl_width)1099entry->pl_width = strlen(vdevname);1100}11011102/* Handle the all properties case */1103last = plp;1104if (*last != NULL && (*last)->pl_all == B_TRUE) {1105while (*last != NULL)1106last = &(*last)->pl_next;11071108err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);1109if (err != 0)1110return (err);11111112while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {1113propname = nvpair_name(elem);11141115/* Skip properties that are not user defined */1116if ((prop = vdev_name_to_prop(propname)) !=1117VDEV_PROP_USERPROP)1118continue;11191120if (nvpair_value_nvlist(elem, &propval) != 0)1121continue;11221123strval = fnvlist_lookup_string(propval, ZPROP_VALUE);11241125entry = zfs_alloc(zhp->zpool_hdl,1126sizeof (zprop_list_t));1127entry->pl_prop = prop;1128entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,1129propname);1130entry->pl_width = strlen(strval);1131entry->pl_all = B_TRUE;1132*last = entry;1133last = &entry->pl_next;1134}1135}11361137return (0);1138}11391140/*1141* Get the state for the given feature on the given ZFS pool.1142*/1143int1144zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,1145size_t len)1146{1147uint64_t refcount;1148boolean_t found = B_FALSE;1149nvlist_t *features = zpool_get_features(zhp);1150boolean_t supported;1151const char *feature = strchr(propname, '@') + 1;11521153supported = zpool_prop_feature(propname);1154ASSERT(supported || zpool_prop_unsupported(propname));11551156/*1157* Convert from feature name to feature guid. This conversion is1158* unnecessary for unsupported@... properties because they already1159* use guids.1160*/1161if (supported) {1162int ret;1163spa_feature_t fid;11641165ret = zfeature_lookup_name(feature, &fid);1166if (ret != 0) {1167(void) strlcpy(buf, "-", len);1168return (ENOTSUP);1169}1170feature = spa_feature_table[fid].fi_guid;1171}11721173if (nvlist_lookup_uint64(features, feature, &refcount) == 0)1174found = B_TRUE;11751176if (supported) {1177if (!found) {1178(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);1179} else {1180if (refcount == 0)1181(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);1182else1183(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);1184}1185} else {1186if (found) {1187if (refcount == 0) {1188(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);1189} else {1190(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);1191}1192} else {1193(void) strlcpy(buf, "-", len);1194return (ENOTSUP);1195}1196}11971198return (0);1199}12001201/*1202* Validate the given pool name, optionally putting an extended error message in1203* 'buf'.1204*/1205boolean_t1206zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)1207{1208namecheck_err_t why;1209char what;1210int ret;12111212ret = pool_namecheck(pool, &why, &what);12131214/*1215* The rules for reserved pool names were extended at a later point.1216* But we need to support users with existing pools that may now be1217* invalid. So we only check for this expanded set of names during a1218* create (or import), and only in userland.1219*/1220if (ret == 0 && !isopen &&1221(strncmp(pool, "mirror", 6) == 0 ||1222strncmp(pool, "raidz", 5) == 0 ||1223strncmp(pool, "draid", 5) == 0 ||1224strncmp(pool, "spare", 5) == 0 ||1225strcmp(pool, "log") == 0)) {1226if (hdl != NULL)1227zfs_error_aux(hdl,1228dgettext(TEXT_DOMAIN, "name is reserved"));1229return (B_FALSE);1230}123112321233if (ret != 0) {1234if (hdl != NULL) {1235switch (why) {1236case NAME_ERR_TOOLONG:1237zfs_error_aux(hdl,1238dgettext(TEXT_DOMAIN, "name is too long"));1239break;12401241case NAME_ERR_INVALCHAR:1242zfs_error_aux(hdl,1243dgettext(TEXT_DOMAIN, "invalid character "1244"'%c' in pool name"), what);1245break;12461247case NAME_ERR_NOLETTER:1248zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1249"name must begin with a letter"));1250break;12511252case NAME_ERR_RESERVED:1253zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1254"name is reserved"));1255break;12561257case NAME_ERR_DISKLIKE:1258zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1259"pool name is reserved"));1260break;12611262case NAME_ERR_LEADING_SLASH:1263zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1264"leading slash in name"));1265break;12661267case NAME_ERR_EMPTY_COMPONENT:1268zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1269"empty component in name"));1270break;12711272case NAME_ERR_TRAILING_SLASH:1273zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1274"trailing slash in name"));1275break;12761277case NAME_ERR_MULTIPLE_DELIMITERS:1278zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1279"multiple '@' and/or '#' delimiters in "1280"name"));1281break;12821283case NAME_ERR_NO_AT:1284zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1285"permission set is missing '@'"));1286break;12871288default:1289zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1290"(%d) not defined"), why);1291break;1292}1293}1294return (B_FALSE);1295}12961297return (B_TRUE);1298}12991300/*1301* Open a handle to the given pool, even if the pool is currently in the FAULTED1302* state.1303*/1304zpool_handle_t *1305zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)1306{1307zpool_handle_t *zhp;1308boolean_t missing;13091310/*1311* Make sure the pool name is valid.1312*/1313if (!zpool_name_valid(hdl, B_TRUE, pool)) {1314(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,1315dgettext(TEXT_DOMAIN, "cannot open '%s'"),1316pool);1317return (NULL);1318}13191320zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));13211322zhp->zpool_hdl = hdl;1323(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));13241325if (zpool_refresh_stats(zhp, &missing) != 0) {1326zpool_close(zhp);1327return (NULL);1328}13291330if (missing) {1331zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));1332(void) zfs_error_fmt(hdl, EZFS_NOENT,1333dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);1334zpool_close(zhp);1335return (NULL);1336}13371338return (zhp);1339}13401341/*1342* Like the above, but silent on error. Used when iterating over pools (because1343* the configuration cache may be out of date).1344*/1345int1346zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)1347{1348zpool_handle_t *zhp;1349boolean_t missing;13501351zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));13521353zhp->zpool_hdl = hdl;1354(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));13551356if (zpool_refresh_stats(zhp, &missing) != 0) {1357zpool_close(zhp);1358return (-1);1359}13601361if (missing) {1362zpool_close(zhp);1363*ret = NULL;1364return (0);1365}13661367*ret = zhp;1368return (0);1369}13701371/*1372* Similar to zpool_open_canfail(), but refuses to open pools in the faulted1373* state.1374*/1375zpool_handle_t *1376zpool_open(libzfs_handle_t *hdl, const char *pool)1377{1378zpool_handle_t *zhp;13791380if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)1381return (NULL);13821383if (zhp->zpool_state == POOL_STATE_UNAVAIL) {1384(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,1385dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);1386zpool_close(zhp);1387return (NULL);1388}13891390return (zhp);1391}13921393/*1394* Close the handle. Simply frees the memory associated with the handle.1395*/1396void1397zpool_close(zpool_handle_t *zhp)1398{1399nvlist_free(zhp->zpool_config);1400nvlist_free(zhp->zpool_old_config);1401nvlist_free(zhp->zpool_props);1402free(zhp);1403}14041405/*1406* Return the name of the pool.1407*/1408const char *1409zpool_get_name(zpool_handle_t *zhp)1410{1411return (zhp->zpool_name);1412}141314141415/*1416* Return the state of the pool (ACTIVE or UNAVAILABLE)1417*/1418int1419zpool_get_state(zpool_handle_t *zhp)1420{1421return (zhp->zpool_state);1422}14231424/*1425* Check if vdev list contains a dRAID vdev1426*/1427static boolean_t1428zpool_has_draid_vdev(nvlist_t *nvroot)1429{1430nvlist_t **child;1431uint_t children;14321433if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,1434&child, &children) == 0) {1435for (uint_t c = 0; c < children; c++) {1436const char *type;14371438if (nvlist_lookup_string(child[c],1439ZPOOL_CONFIG_TYPE, &type) == 0 &&1440strcmp(type, VDEV_TYPE_DRAID) == 0) {1441return (B_TRUE);1442}1443}1444}1445return (B_FALSE);1446}14471448/*1449* Output a dRAID top-level vdev name in to the provided buffer.1450*/1451static char *1452zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,1453uint64_t spares, uint64_t children)1454{1455snprintf(name, len, "%s%llu:%llud:%lluc:%llus",1456VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,1457(u_longlong_t)children, (u_longlong_t)spares);14581459return (name);1460}14611462/*1463* Return B_TRUE if the provided name is a dRAID spare name.1464*/1465boolean_t1466zpool_is_draid_spare(const char *name)1467{1468uint64_t spare_id, parity, vdev_id;14691470if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",1471(u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,1472(u_longlong_t *)&spare_id) == 3) {1473return (B_TRUE);1474}14751476return (B_FALSE);1477}14781479/*1480* Create the named pool, using the provided vdev list. It is assumed1481* that the consumer has already validated the contents of the nvlist, so we1482* don't have to worry about error semantics.1483*/1484int1485zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,1486nvlist_t *props, nvlist_t *fsprops)1487{1488zfs_cmd_t zc = {"\0"};1489nvlist_t *zc_fsprops = NULL;1490nvlist_t *zc_props = NULL;1491nvlist_t *hidden_args = NULL;1492uint8_t *wkeydata = NULL;1493uint_t wkeylen = 0;1494char errbuf[ERRBUFLEN];1495int ret = -1;14961497(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1498"cannot create '%s'"), pool);14991500if (!zpool_name_valid(hdl, B_FALSE, pool))1501return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));15021503zcmd_write_conf_nvlist(hdl, &zc, nvroot);15041505if (props) {1506prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };15071508if ((zc_props = zpool_valid_proplist(hdl, pool, props,1509SPA_VERSION_1, flags, errbuf)) == NULL) {1510goto create_failed;1511}1512}15131514if (fsprops) {1515uint64_t zoned;1516const char *zonestr;15171518zoned = ((nvlist_lookup_string(fsprops,1519zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&1520strcmp(zonestr, "on") == 0);15211522if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,1523fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {1524goto create_failed;1525}15261527if (!zc_props &&1528(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {1529goto create_failed;1530}1531if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,1532&wkeydata, &wkeylen) != 0) {1533zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);1534goto create_failed;1535}1536if (nvlist_add_nvlist(zc_props,1537ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {1538goto create_failed;1539}1540if (wkeydata != NULL) {1541if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)1542goto create_failed;15431544if (nvlist_add_uint8_array(hidden_args, "wkeydata",1545wkeydata, wkeylen) != 0)1546goto create_failed;15471548if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,1549hidden_args) != 0)1550goto create_failed;1551}1552}15531554if (zc_props)1555zcmd_write_src_nvlist(hdl, &zc, zc_props);15561557(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));15581559if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {15601561zcmd_free_nvlists(&zc);1562nvlist_free(zc_props);1563nvlist_free(zc_fsprops);1564nvlist_free(hidden_args);1565if (wkeydata != NULL)1566free(wkeydata);15671568switch (errno) {1569case EBUSY:1570/*1571* This can happen if the user has specified the same1572* device multiple times. We can't reliably detect this1573* until we try to add it and see we already have a1574* label. This can also happen under if the device is1575* part of an active md or lvm device.1576*/1577zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1578"one or more vdevs refer to the same device, or "1579"one of\nthe devices is part of an active md or "1580"lvm device"));1581return (zfs_error(hdl, EZFS_BADDEV, errbuf));15821583case ERANGE:1584/*1585* This happens if the record size is smaller or larger1586* than the allowed size range, or not a power of 2.1587*1588* NOTE: although zfs_valid_proplist is called earlier,1589* this case may have slipped through since the1590* pool does not exist yet and it is therefore1591* impossible to read properties e.g. max blocksize1592* from the pool.1593*/1594zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1595"record size invalid"));1596return (zfs_error(hdl, EZFS_BADPROP, errbuf));15971598case EOVERFLOW:1599/*1600* This occurs when one of the devices is below1601* SPA_MINDEVSIZE. Unfortunately, we can't detect which1602* device was the problem device since there's no1603* reliable way to determine device size from userland.1604*/1605{1606char buf[64];16071608zfs_nicebytes(SPA_MINDEVSIZE, buf,1609sizeof (buf));16101611zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1612"one or more devices is less than the "1613"minimum size (%s)"), buf);1614}1615return (zfs_error(hdl, EZFS_BADDEV, errbuf));16161617case ENOSPC:1618zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1619"one or more devices is out of space"));1620return (zfs_error(hdl, EZFS_BADDEV, errbuf));16211622case EINVAL:1623if (zpool_has_draid_vdev(nvroot) &&1624zfeature_lookup_name("draid", NULL) != 0) {1625zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1626"dRAID vdevs are unsupported by the "1627"kernel"));1628return (zfs_error(hdl, EZFS_BADDEV, errbuf));1629} else {1630return (zpool_standard_error(hdl, errno,1631errbuf));1632}16331634default:1635return (zpool_standard_error(hdl, errno, errbuf));1636}1637}16381639create_failed:1640zcmd_free_nvlists(&zc);1641nvlist_free(zc_props);1642nvlist_free(zc_fsprops);1643nvlist_free(hidden_args);1644if (wkeydata != NULL)1645free(wkeydata);1646return (ret);1647}16481649/*1650* Destroy the given pool. It is up to the caller to ensure that there are no1651* datasets left in the pool.1652*/1653int1654zpool_destroy(zpool_handle_t *zhp, const char *log_str)1655{1656zfs_cmd_t zc = {"\0"};1657zfs_handle_t *zfp = NULL;1658libzfs_handle_t *hdl = zhp->zpool_hdl;1659char errbuf[ERRBUFLEN];16601661if (zhp->zpool_state == POOL_STATE_ACTIVE &&1662(zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)1663return (-1);16641665(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));1666zc.zc_history = (uint64_t)(uintptr_t)log_str;16671668if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {1669(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1670"cannot destroy '%s'"), zhp->zpool_name);16711672if (errno == EROFS) {1673zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1674"one or more devices is read only"));1675(void) zfs_error(hdl, EZFS_BADDEV, errbuf);1676} else {1677(void) zpool_standard_error(hdl, errno, errbuf);1678}16791680if (zfp)1681zfs_close(zfp);1682return (-1);1683}16841685if (zfp) {1686remove_mountpoint(zfp);1687zfs_close(zfp);1688}16891690return (0);1691}16921693/*1694* Create a checkpoint in the given pool.1695*/1696int1697zpool_checkpoint(zpool_handle_t *zhp)1698{1699libzfs_handle_t *hdl = zhp->zpool_hdl;1700char errbuf[ERRBUFLEN];1701int error;17021703error = lzc_pool_checkpoint(zhp->zpool_name);1704if (error != 0) {1705(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1706"cannot checkpoint '%s'"), zhp->zpool_name);1707(void) zpool_standard_error(hdl, error, errbuf);1708return (-1);1709}17101711return (0);1712}17131714/*1715* Discard the checkpoint from the given pool.1716*/1717int1718zpool_discard_checkpoint(zpool_handle_t *zhp)1719{1720libzfs_handle_t *hdl = zhp->zpool_hdl;1721char errbuf[ERRBUFLEN];1722int error;17231724error = lzc_pool_checkpoint_discard(zhp->zpool_name);1725if (error != 0) {1726(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1727"cannot discard checkpoint in '%s'"), zhp->zpool_name);1728(void) zpool_standard_error(hdl, error, errbuf);1729return (-1);1730}17311732return (0);1733}17341735/*1736* Load data type for the given pool.1737*/1738int1739zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type)1740{1741libzfs_handle_t *hdl = zhp->zpool_hdl;1742char msg[1024];1743int error;17441745error = lzc_pool_prefetch(zhp->zpool_name, type);1746if (error != 0) {1747(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,1748"cannot prefetch %s in '%s'"),1749type == ZPOOL_PREFETCH_DDT ? "ddt" : "", zhp->zpool_name);1750(void) zpool_standard_error(hdl, error, msg);1751return (-1);1752}17531754return (0);1755}17561757/*1758* Add the given vdevs to the pool. The caller must have already performed the1759* necessary verification to ensure that the vdev specification is well-formed.1760*/1761int1762zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot, boolean_t check_ashift)1763{1764zfs_cmd_t zc = {"\0"};1765int ret;1766libzfs_handle_t *hdl = zhp->zpool_hdl;1767char errbuf[ERRBUFLEN];1768nvlist_t **spares, **l2cache;1769uint_t nspares, nl2cache;17701771(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1772"cannot add to '%s'"), zhp->zpool_name);17731774if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <1775SPA_VERSION_SPARES &&1776nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,1777&spares, &nspares) == 0) {1778zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "1779"upgraded to add hot spares"));1780return (zfs_error(hdl, EZFS_BADVERSION, errbuf));1781}17821783if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <1784SPA_VERSION_L2CACHE &&1785nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,1786&l2cache, &nl2cache) == 0) {1787zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "1788"upgraded to add cache devices"));1789return (zfs_error(hdl, EZFS_BADVERSION, errbuf));1790}17911792zcmd_write_conf_nvlist(hdl, &zc, nvroot);1793(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));1794zc.zc_flags = check_ashift;17951796if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {1797switch (errno) {1798case EBUSY:1799/*1800* This can happen if the user has specified the same1801* device multiple times. We can't reliably detect this1802* until we try to add it and see we already have a1803* label.1804*/1805zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1806"one or more vdevs refer to the same device"));1807(void) zfs_error(hdl, EZFS_BADDEV, errbuf);1808break;18091810case EINVAL:18111812if (zpool_has_draid_vdev(nvroot) &&1813zfeature_lookup_name("draid", NULL) != 0) {1814zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1815"dRAID vdevs are unsupported by the "1816"kernel"));1817} else {1818zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1819"invalid config; a pool with removing/"1820"removed vdevs does not support adding "1821"raidz or dRAID vdevs"));1822}18231824(void) zfs_error(hdl, EZFS_BADDEV, errbuf);1825break;18261827case EOVERFLOW:1828/*1829* This occurs when one of the devices is below1830* SPA_MINDEVSIZE. Unfortunately, we can't detect which1831* device was the problem device since there's no1832* reliable way to determine device size from userland.1833*/1834{1835char buf[64];18361837zfs_nicebytes(SPA_MINDEVSIZE, buf,1838sizeof (buf));18391840zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1841"device is less than the minimum "1842"size (%s)"), buf);1843}1844(void) zfs_error(hdl, EZFS_BADDEV, errbuf);1845break;18461847case ENOTSUP:1848zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1849"pool must be upgraded to add these vdevs"));1850(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);1851break;18521853default:1854(void) zpool_standard_error(hdl, errno, errbuf);1855}18561857ret = -1;1858} else {1859ret = 0;1860}18611862zcmd_free_nvlists(&zc);18631864return (ret);1865}18661867/*1868* Exports the pool from the system. The caller must ensure that there are no1869* mounted datasets in the pool.1870*/1871static int1872zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,1873const char *log_str)1874{1875zfs_cmd_t zc = {"\0"};18761877(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));1878zc.zc_cookie = force;1879zc.zc_guid = hardforce;1880zc.zc_history = (uint64_t)(uintptr_t)log_str;18811882if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {1883switch (errno) {1884case EXDEV:1885zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,1886"use '-f' to override the following errors:\n"1887"'%s' has an active shared spare which could be"1888" used by other pools once '%s' is exported."),1889zhp->zpool_name, zhp->zpool_name);1890return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,1891dgettext(TEXT_DOMAIN, "cannot export '%s'"),1892zhp->zpool_name));1893default:1894return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,1895dgettext(TEXT_DOMAIN, "cannot export '%s'"),1896zhp->zpool_name));1897}1898}18991900return (0);1901}19021903int1904zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)1905{1906return (zpool_export_common(zhp, force, B_FALSE, log_str));1907}19081909int1910zpool_export_force(zpool_handle_t *zhp, const char *log_str)1911{1912return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));1913}19141915static void1916zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,1917nvlist_t *config)1918{1919nvlist_t *nv = NULL;1920uint64_t rewindto;1921int64_t loss = -1;1922struct tm t;1923char timestr[128];19241925if (!hdl->libzfs_printerr || config == NULL)1926return;19271928if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||1929nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {1930return;1931}19321933if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)1934return;1935(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);19361937if (localtime_r((time_t *)&rewindto, &t) != NULL &&1938ctime_r((time_t *)&rewindto, timestr) != NULL) {1939timestr[24] = 0;1940if (dryrun) {1941(void) printf(dgettext(TEXT_DOMAIN,1942"Would be able to return %s "1943"to its state as of %s.\n"),1944name, timestr);1945} else {1946(void) printf(dgettext(TEXT_DOMAIN,1947"Pool %s returned to its state as of %s.\n"),1948name, timestr);1949}1950if (loss > 120) {1951(void) printf(dgettext(TEXT_DOMAIN,1952"%s approximately %lld "),1953dryrun ? "Would discard" : "Discarded",1954((longlong_t)loss + 30) / 60);1955(void) printf(dgettext(TEXT_DOMAIN,1956"minutes of transactions.\n"));1957} else if (loss > 0) {1958(void) printf(dgettext(TEXT_DOMAIN,1959"%s approximately %lld "),1960dryrun ? "Would discard" : "Discarded",1961(longlong_t)loss);1962(void) printf(dgettext(TEXT_DOMAIN,1963"seconds of transactions.\n"));1964}1965}1966}19671968void1969zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,1970nvlist_t *config, char *buf, size_t size)1971{1972nvlist_t *nv = NULL;1973int64_t loss = -1;1974uint64_t edata = UINT64_MAX;1975uint64_t rewindto;1976struct tm t;1977char timestr[128], temp[1024];19781979if (!hdl->libzfs_printerr)1980return;19811982/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */1983if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||1984nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||1985nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)1986goto no_info;19871988(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);1989(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,1990&edata);19911992(void) snprintf(buf, size, dgettext(TEXT_DOMAIN,1993"Recovery is possible, but will result in some data loss.\n"));19941995if (localtime_r((time_t *)&rewindto, &t) != NULL &&1996ctime_r((time_t *)&rewindto, timestr) != NULL) {1997timestr[24] = 0;1998(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,1999"\tReturning the pool to its state as of %s\n"2000"\tshould correct the problem. "), timestr);2001(void) strlcat(buf, temp, size);2002} else {2003(void) strlcat(buf, dgettext(TEXT_DOMAIN,2004"\tReverting the pool to an earlier state "2005"should correct the problem.\n\t"), size);2006}20072008if (loss > 120) {2009(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,2010"Approximately %lld minutes of data\n"2011"\tmust be discarded, irreversibly. "),2012((longlong_t)loss + 30) / 60);2013(void) strlcat(buf, temp, size);2014} else if (loss > 0) {2015(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,2016"Approximately %lld seconds of data\n"2017"\tmust be discarded, irreversibly. "),2018(longlong_t)loss);2019(void) strlcat(buf, temp, size);2020}2021if (edata != 0 && edata != UINT64_MAX) {2022if (edata == 1) {2023(void) strlcat(buf, dgettext(TEXT_DOMAIN,2024"After rewind, at least\n"2025"\tone persistent user-data error will remain. "),2026size);2027} else {2028(void) strlcat(buf, dgettext(TEXT_DOMAIN,2029"After rewind, several\n"2030"\tpersistent user-data errors will remain. "),2031size);2032}2033}2034(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,2035"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),2036reason >= 0 ? "clear" : "import", name);2037(void) strlcat(buf, temp, size);20382039(void) strlcat(buf, dgettext(TEXT_DOMAIN,2040"A scrub of the pool\n"2041"\tis strongly recommended after recovery.\n"), size);2042return;20432044no_info:2045(void) strlcat(buf, dgettext(TEXT_DOMAIN,2046"Destroy and re-create the pool from\n\ta backup source.\n"), size);2047}20482049/*2050* zpool_import() is a contracted interface. Should be kept the same2051* if possible.2052*2053* Applications should use zpool_import_props() to import a pool with2054* new properties value to be set.2055*/2056int2057zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,2058char *altroot)2059{2060nvlist_t *props = NULL;2061int ret;20622063if (altroot != NULL) {2064if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {2065return (zfs_error_fmt(hdl, EZFS_NOMEM,2066dgettext(TEXT_DOMAIN, "cannot import '%s'"),2067newname));2068}20692070if (nvlist_add_string(props,2071zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||2072nvlist_add_string(props,2073zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {2074nvlist_free(props);2075return (zfs_error_fmt(hdl, EZFS_NOMEM,2076dgettext(TEXT_DOMAIN, "cannot import '%s'"),2077newname));2078}2079}20802081ret = zpool_import_props(hdl, config, newname, props,2082ZFS_IMPORT_NORMAL);2083nvlist_free(props);2084return (ret);2085}20862087static void2088print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,2089int indent)2090{2091nvlist_t **child;2092uint_t c, children;2093char *vname;2094uint64_t is_log = 0;20952096(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,2097&is_log);20982099if (name != NULL)2100(void) printf("\t%*s%s%s\n", indent, "", name,2101is_log ? " [log]" : "");21022103if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,2104&child, &children) != 0)2105return;21062107for (c = 0; c < children; c++) {2108vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);2109print_vdev_tree(hdl, vname, child[c], indent + 2);2110free(vname);2111}2112}21132114void2115zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size)2116{2117nvlist_t *nvinfo, *unsup_feat;2118char temp[512];21192120nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);2121unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);21222123for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);2124nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {2125const char *desc = fnvpair_value_string(nvp);2126if (strlen(desc) > 0) {2127(void) snprintf(temp, 512, "\t%s (%s)\n",2128nvpair_name(nvp), desc);2129(void) strlcat(buf, temp, size);2130} else {2131(void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp));2132(void) strlcat(buf, temp, size);2133}2134}2135}21362137/*2138* Import the given pool using the known configuration and a list of2139* properties to be set. The configuration should have come from2140* zpool_find_import(). The 'newname' parameters control whether the pool2141* is imported with a different name.2142*/2143int2144zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,2145nvlist_t *props, int flags)2146{2147zfs_cmd_t zc = {"\0"};2148zpool_load_policy_t policy;2149nvlist_t *nv = NULL;2150nvlist_t *nvinfo = NULL;2151nvlist_t *missing = NULL;2152const char *thename;2153const char *origname;2154int ret;2155int error = 0;2156char buf[2048];2157char errbuf[ERRBUFLEN];21582159origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);21602161(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,2162"cannot import pool '%s'"), origname);21632164if (newname != NULL) {2165if (!zpool_name_valid(hdl, B_FALSE, newname))2166return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,2167dgettext(TEXT_DOMAIN, "cannot import '%s'"),2168newname));2169thename = newname;2170} else {2171thename = origname;2172}21732174if (props != NULL) {2175uint64_t version;2176prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };21772178version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);21792180if ((props = zpool_valid_proplist(hdl, origname,2181props, version, flags, errbuf)) == NULL)2182return (-1);2183zcmd_write_src_nvlist(hdl, &zc, props);2184nvlist_free(props);2185}21862187(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));21882189zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);21902191zcmd_write_conf_nvlist(hdl, &zc, config);2192zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);21932194zc.zc_cookie = flags;2195while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&2196errno == ENOMEM)2197zcmd_expand_dst_nvlist(hdl, &zc);2198if (ret != 0)2199error = errno;22002201(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);22022203zcmd_free_nvlists(&zc);22042205zpool_get_load_policy(config, &policy);22062207if (error) {2208char desc[1024];2209char aux[256];22102211/*2212* Dry-run failed, but we print out what success2213* looks like if we found a best txg2214*/2215if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {2216zpool_rewind_exclaim(hdl, newname ? origname : thename,2217B_TRUE, nv);2218nvlist_free(nv);2219return (-1);2220}22212222if (newname == NULL)2223(void) snprintf(desc, sizeof (desc),2224dgettext(TEXT_DOMAIN, "cannot import '%s'"),2225thename);2226else2227(void) snprintf(desc, sizeof (desc),2228dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),2229origname, thename);22302231switch (error) {2232case ENOTSUP:2233if (nv != NULL && nvlist_lookup_nvlist(nv,2234ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&2235nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {2236(void) printf(dgettext(TEXT_DOMAIN, "This "2237"pool uses the following feature(s) not "2238"supported by this system:\n"));2239memset(buf, 0, 2048);2240zpool_collect_unsup_feat(nv, buf, 2048);2241(void) printf("%s", buf);2242if (nvlist_exists(nvinfo,2243ZPOOL_CONFIG_CAN_RDONLY)) {2244(void) printf(dgettext(TEXT_DOMAIN,2245"All unsupported features are only "2246"required for writing to the pool."2247"\nThe pool can be imported using "2248"'-o readonly=on'.\n"));2249}2250}2251/*2252* Unsupported version.2253*/2254(void) zfs_error(hdl, EZFS_BADVERSION, desc);2255break;22562257case EREMOTEIO:2258if (nv != NULL && nvlist_lookup_nvlist(nv,2259ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {2260const char *hostname = "<unknown>";2261uint64_t hostid = 0;2262mmp_state_t mmp_state;22632264mmp_state = fnvlist_lookup_uint64(nvinfo,2265ZPOOL_CONFIG_MMP_STATE);22662267if (nvlist_exists(nvinfo,2268ZPOOL_CONFIG_MMP_HOSTNAME))2269hostname = fnvlist_lookup_string(nvinfo,2270ZPOOL_CONFIG_MMP_HOSTNAME);22712272if (nvlist_exists(nvinfo,2273ZPOOL_CONFIG_MMP_HOSTID))2274hostid = fnvlist_lookup_uint64(nvinfo,2275ZPOOL_CONFIG_MMP_HOSTID);22762277if (mmp_state == MMP_STATE_ACTIVE) {2278(void) snprintf(aux, sizeof (aux),2279dgettext(TEXT_DOMAIN, "pool is imp"2280"orted on host '%s' (hostid=%lx).\n"2281"Export the pool on the other "2282"system, then run 'zpool import'."),2283hostname, (unsigned long) hostid);2284} else if (mmp_state == MMP_STATE_NO_HOSTID) {2285(void) snprintf(aux, sizeof (aux),2286dgettext(TEXT_DOMAIN, "pool has "2287"the multihost property on and "2288"the\nsystem's hostid is not set. "2289"Set a unique system hostid with "2290"the zgenhostid(8) command.\n"));2291}22922293(void) zfs_error_aux(hdl, "%s", aux);2294}2295(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);2296break;22972298case EINVAL:2299(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);2300break;23012302case EROFS:2303zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,2304"one or more devices is read only"));2305(void) zfs_error(hdl, EZFS_BADDEV, desc);2306break;23072308case ENXIO:2309if (nv && nvlist_lookup_nvlist(nv,2310ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&2311nvlist_lookup_nvlist(nvinfo,2312ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {2313(void) printf(dgettext(TEXT_DOMAIN,2314"The devices below are missing or "2315"corrupted, use '-m' to import the pool "2316"anyway:\n"));2317print_vdev_tree(hdl, NULL, missing, 2);2318(void) printf("\n");2319}2320(void) zpool_standard_error(hdl, error, desc);2321break;23222323case EEXIST:2324(void) zpool_standard_error(hdl, error, desc);2325break;23262327case EBUSY:2328zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,2329"one or more devices are already in use\n"));2330(void) zfs_error(hdl, EZFS_BADDEV, desc);2331break;2332case ENAMETOOLONG:2333zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,2334"new name of at least one dataset is longer than "2335"the maximum allowable length"));2336(void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);2337break;2338default:2339(void) zpool_standard_error(hdl, error, desc);2340memset(buf, 0, 2048);2341zpool_explain_recover(hdl,2342newname ? origname : thename, -error, nv,2343buf, 2048);2344(void) printf("\t%s", buf);2345break;2346}23472348nvlist_free(nv);2349ret = -1;2350} else {2351zpool_handle_t *zhp;23522353/*2354* This should never fail, but play it safe anyway.2355*/2356if (zpool_open_silent(hdl, thename, &zhp) != 0)2357ret = -1;2358else if (zhp != NULL)2359zpool_close(zhp);2360if (policy.zlp_rewind &2361(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {2362zpool_rewind_exclaim(hdl, newname ? origname : thename,2363((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);2364}2365nvlist_free(nv);2366}23672368return (ret);2369}23702371/*2372* Translate vdev names to guids. If a vdev_path is determined to be2373* unsuitable then a vd_errlist is allocated and the vdev path and errno2374* are added to it.2375*/2376static int2377zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,2378nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)2379{2380nvlist_t *errlist = NULL;2381int error = 0;23822383for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;2384elem = nvlist_next_nvpair(vds, elem)) {2385boolean_t spare, cache;23862387const char *vd_path = nvpair_name(elem);2388nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,2389NULL);23902391if ((tgt == NULL) || cache || spare) {2392if (errlist == NULL) {2393errlist = fnvlist_alloc();2394error = EINVAL;2395}23962397uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :2398(spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);2399fnvlist_add_int64(errlist, vd_path, err);2400continue;2401}24022403uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);2404fnvlist_add_uint64(vdev_guids, vd_path, guid);24052406char msg[MAXNAMELEN];2407(void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);2408fnvlist_add_string(guids_to_paths, msg, vd_path);2409}24102411if (error != 0) {2412verify(errlist != NULL);2413if (vd_errlist != NULL)2414*vd_errlist = errlist;2415else2416fnvlist_free(errlist);2417}24182419return (error);2420}24212422static int2423xlate_init_err(int err)2424{2425switch (err) {2426case ENODEV:2427return (EZFS_NODEVICE);2428case EINVAL:2429case EROFS:2430return (EZFS_BADDEV);2431case EBUSY:2432return (EZFS_INITIALIZING);2433case ESRCH:2434return (EZFS_NO_INITIALIZE);2435}2436return (err);2437}24382439int2440zpool_initialize_one(zpool_handle_t *zhp, void *data)2441{2442int error;2443libzfs_handle_t *hdl = zpool_get_handle(zhp);2444const char *pool_name = zpool_get_name(zhp);2445if (zpool_open_silent(hdl, pool_name, &zhp) != 0)2446return (-1);2447initialize_cbdata_t *cb = data;2448nvlist_t *vdevs = fnvlist_alloc();24492450nvlist_t *config = zpool_get_config(zhp, NULL);2451nvlist_t *nvroot = fnvlist_lookup_nvlist(config,2452ZPOOL_CONFIG_VDEV_TREE);2453zpool_collect_leaves(zhp, nvroot, vdevs);2454if (cb->wait)2455error = zpool_initialize_wait(zhp, cb->cmd_type, vdevs);2456else2457error = zpool_initialize(zhp, cb->cmd_type, vdevs);2458fnvlist_free(vdevs);24592460return (error);2461}24622463/*2464* Begin, suspend, cancel, or uninit (clear) the initialization (initializing2465* of all free blocks) for the given vdevs in the given pool.2466*/2467static int2468zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,2469nvlist_t *vds, boolean_t wait)2470{2471int err;24722473nvlist_t *vdev_guids = fnvlist_alloc();2474nvlist_t *guids_to_paths = fnvlist_alloc();2475nvlist_t *vd_errlist = NULL;2476nvlist_t *errlist;2477nvpair_t *elem;24782479err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,2480guids_to_paths, &vd_errlist);24812482if (err != 0) {2483verify(vd_errlist != NULL);2484goto list_errors;2485}24862487err = lzc_initialize(zhp->zpool_name, cmd_type,2488vdev_guids, &errlist);24892490if (err != 0) {2491if (errlist != NULL && nvlist_lookup_nvlist(errlist,2492ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {2493goto list_errors;2494}24952496if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {2497zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,2498"uninitialize is not supported by kernel"));2499}25002501(void) zpool_standard_error(zhp->zpool_hdl, err,2502dgettext(TEXT_DOMAIN, "operation failed"));2503goto out;2504}25052506if (wait) {2507for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;2508elem = nvlist_next_nvpair(vdev_guids, elem)) {25092510uint64_t guid = fnvpair_value_uint64(elem);25112512err = lzc_wait_tag(zhp->zpool_name,2513ZPOOL_WAIT_INITIALIZE, guid, NULL);2514if (err != 0) {2515(void) zpool_standard_error_fmt(zhp->zpool_hdl,2516err, dgettext(TEXT_DOMAIN, "error "2517"waiting for '%s' to initialize"),2518nvpair_name(elem));25192520goto out;2521}2522}2523}2524goto out;25252526list_errors:2527for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;2528elem = nvlist_next_nvpair(vd_errlist, elem)) {2529int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));2530const char *path;25312532if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),2533&path) != 0)2534path = nvpair_name(elem);25352536(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,2537"cannot initialize '%s'", path);2538}25392540out:2541fnvlist_free(vdev_guids);2542fnvlist_free(guids_to_paths);25432544if (vd_errlist != NULL)2545fnvlist_free(vd_errlist);25462547return (err == 0 ? 0 : -1);2548}25492550int2551zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,2552nvlist_t *vds)2553{2554return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));2555}25562557int2558zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,2559nvlist_t *vds)2560{2561return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));2562}25632564static int2565xlate_trim_err(int err)2566{2567switch (err) {2568case ENODEV:2569return (EZFS_NODEVICE);2570case EINVAL:2571case EROFS:2572return (EZFS_BADDEV);2573case EBUSY:2574return (EZFS_TRIMMING);2575case ESRCH:2576return (EZFS_NO_TRIM);2577case EOPNOTSUPP:2578return (EZFS_TRIM_NOTSUP);2579}2580return (err);2581}25822583void2584zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)2585{2586libzfs_handle_t *hdl = zhp->zpool_hdl;2587uint_t children = 0;2588nvlist_t **child;2589uint_t i;25902591(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,2592&child, &children);25932594if (children == 0) {2595char *path = zpool_vdev_name(hdl, zhp, nvroot,2596VDEV_NAME_PATH);25972598if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&2599strcmp(path, VDEV_TYPE_HOLE) != 0)2600fnvlist_add_boolean(res, path);26012602free(path);2603return;2604}26052606for (i = 0; i < children; i++) {2607zpool_collect_leaves(zhp, child[i], res);2608}2609}26102611int2612zpool_trim_one(zpool_handle_t *zhp, void *data)2613{2614int error;2615libzfs_handle_t *hdl = zpool_get_handle(zhp);2616const char *pool_name = zpool_get_name(zhp);2617if (zpool_open_silent(hdl, pool_name, &zhp) != 0)2618return (-1);26192620trim_cbdata_t *cb = data;2621nvlist_t *vdevs = fnvlist_alloc();26222623/* no individual leaf vdevs specified, so add them all */2624nvlist_t *config = zpool_get_config(zhp, NULL);2625nvlist_t *nvroot = fnvlist_lookup_nvlist(config,2626ZPOOL_CONFIG_VDEV_TREE);26272628zpool_collect_leaves(zhp, nvroot, vdevs);2629error = zpool_trim(zhp, cb->cmd_type, vdevs, &cb->trim_flags);2630fnvlist_free(vdevs);26312632return (error);2633}26342635static int2636zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)2637{2638int err;2639nvpair_t *elem;26402641for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;2642elem = nvlist_next_nvpair(vdev_guids, elem)) {26432644uint64_t guid = fnvpair_value_uint64(elem);26452646err = lzc_wait_tag(zhp->zpool_name,2647ZPOOL_WAIT_TRIM, guid, NULL);2648if (err != 0) {2649(void) zpool_standard_error_fmt(zhp->zpool_hdl,2650err, dgettext(TEXT_DOMAIN, "error "2651"waiting to trim '%s'"), nvpair_name(elem));26522653return (err);2654}2655}2656return (0);2657}26582659/*2660* Check errlist and report any errors, omitting ones which should be2661* suppressed. Returns B_TRUE if any errors were reported.2662*/2663static boolean_t2664check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,2665nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)2666{2667nvpair_t *elem;2668boolean_t reported_errs = B_FALSE;2669int num_vds = 0;2670int num_suppressed_errs = 0;26712672for (elem = nvlist_next_nvpair(vds, NULL);2673elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {2674num_vds++;2675}26762677for (elem = nvlist_next_nvpair(errlist, NULL);2678elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {2679int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));2680const char *path;26812682/*2683* If only the pool was specified, and it was not a secure2684* trim then suppress warnings for individual vdevs which2685* do not support trimming.2686*/2687if (vd_error == EZFS_TRIM_NOTSUP &&2688trim_flags->fullpool &&2689!trim_flags->secure) {2690num_suppressed_errs++;2691continue;2692}26932694reported_errs = B_TRUE;2695if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),2696&path) != 0)2697path = nvpair_name(elem);26982699(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,2700"cannot trim '%s'", path);2701}27022703if (num_suppressed_errs == num_vds) {2704(void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,2705"no devices in pool support trim operations"));2706(void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,2707dgettext(TEXT_DOMAIN, "cannot trim")));2708reported_errs = B_TRUE;2709}27102711return (reported_errs);2712}27132714/*2715* Begin, suspend, or cancel the TRIM (discarding of all free blocks) for2716* the given vdevs in the given pool.2717*/2718int2719zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,2720trimflags_t *trim_flags)2721{2722int err;2723int retval = 0;27242725nvlist_t *vdev_guids = fnvlist_alloc();2726nvlist_t *guids_to_paths = fnvlist_alloc();2727nvlist_t *errlist = NULL;27282729err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,2730guids_to_paths, &errlist);2731if (err != 0) {2732check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);2733retval = -1;2734goto out;2735}27362737err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,2738trim_flags->secure, vdev_guids, &errlist);2739if (err != 0) {2740nvlist_t *vd_errlist;2741if (errlist != NULL && nvlist_lookup_nvlist(errlist,2742ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {2743if (check_trim_errs(zhp, trim_flags, guids_to_paths,2744vds, vd_errlist)) {2745retval = -1;2746goto out;2747}2748} else {2749char errbuf[ERRBUFLEN];27502751(void) snprintf(errbuf, sizeof (errbuf),2752dgettext(TEXT_DOMAIN, "operation failed"));2753zpool_standard_error(zhp->zpool_hdl, err, errbuf);2754retval = -1;2755goto out;2756}2757}275827592760if (trim_flags->wait)2761retval = zpool_trim_wait(zhp, vdev_guids);27622763out:2764if (errlist != NULL)2765fnvlist_free(errlist);2766fnvlist_free(vdev_guids);2767fnvlist_free(guids_to_paths);2768return (retval);2769}27702771/*2772* Scan the pool.2773*/2774int2775zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) {2776return (zpool_scan_range(zhp, func, cmd, 0, 0));2777}27782779int2780zpool_scan_range(zpool_handle_t *zhp, pool_scan_func_t func,2781pool_scrub_cmd_t cmd, time_t date_start, time_t date_end)2782{2783char errbuf[ERRBUFLEN];2784int err;2785libzfs_handle_t *hdl = zhp->zpool_hdl;27862787nvlist_t *args = fnvlist_alloc();2788fnvlist_add_uint64(args, "scan_type", (uint64_t)func);2789fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);2790fnvlist_add_uint64(args, "scan_date_start", (uint64_t)date_start);2791fnvlist_add_uint64(args, "scan_date_end", (uint64_t)date_end);27922793err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);2794fnvlist_free(args);27952796if (err == 0) {2797return (0);2798} else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {2799zfs_cmd_t zc = {"\0"};2800(void) strlcpy(zc.zc_name, zhp->zpool_name,2801sizeof (zc.zc_name));2802zc.zc_cookie = func;2803zc.zc_flags = cmd;28042805if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)2806return (0);2807}28082809/*2810* An ECANCELED on a scrub means one of the following:2811* 1. we resumed a paused scrub.2812* 2. we resumed a paused error scrub.2813* 3. Error scrub is not run because of no error log.2814*2815* Note that we no longer return ECANCELED in case 1 or 2. However, in2816* order to prevent problems where we have a newer userland than2817* kernel, we keep this check in place. That prevents erroneous2818* failures when an older kernel returns ECANCELED in those cases.2819*/2820if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||2821func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)2822return (0);2823/*2824* The following cases have been handled here:2825* 1. Paused a scrub/error scrub if there is none in progress.2826*/2827if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==2828POOL_SCRUB_PAUSE) {2829return (0);2830}28312832ASSERT3U(func, >=, POOL_SCAN_NONE);2833ASSERT3U(func, <, POOL_SCAN_FUNCS);28342835if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {2836if (cmd == POOL_SCRUB_PAUSE) {2837(void) snprintf(errbuf, sizeof (errbuf),2838dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),2839zhp->zpool_name);2840} else {2841assert(cmd == POOL_SCRUB_NORMAL);2842(void) snprintf(errbuf, sizeof (errbuf),2843dgettext(TEXT_DOMAIN, "cannot scrub %s"),2844zhp->zpool_name);2845}2846} else if (func == POOL_SCAN_RESILVER) {2847assert(cmd == POOL_SCRUB_NORMAL);2848(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,2849"cannot restart resilver on %s"), zhp->zpool_name);2850} else if (func == POOL_SCAN_NONE) {2851(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,2852"cannot cancel scrubbing %s"), zhp->zpool_name);2853} else {2854assert(!"unexpected result");2855}28562857/*2858* With EBUSY, six cases are possible:2859*2860* Current state Requested2861* 1. Normal Scrub Running Normal Scrub or Error Scrub2862* 2. Normal Scrub Paused Error Scrub2863* 3. Normal Scrub Paused Pause Normal Scrub2864* 4. Error Scrub Running Normal Scrub or Error Scrub2865* 5. Error Scrub Paused Pause Error Scrub2866* 6. Resilvering Anything else2867*/2868if (err == EBUSY) {2869nvlist_t *nvroot;2870pool_scan_stat_t *ps = NULL;2871uint_t psc;28722873nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,2874ZPOOL_CONFIG_VDEV_TREE);2875(void) nvlist_lookup_uint64_array(nvroot,2876ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);2877if (ps && ps->pss_func == POOL_SCAN_SCRUB &&2878ps->pss_state == DSS_SCANNING) {2879if (ps->pss_pass_scrub_pause == 0) {2880/* handles case 1 */2881assert(cmd == POOL_SCRUB_NORMAL);2882return (zfs_error(hdl, EZFS_SCRUBBING,2883errbuf));2884} else {2885if (func == POOL_SCAN_ERRORSCRUB) {2886/* handles case 2 */2887ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);2888return (zfs_error(hdl,2889EZFS_SCRUB_PAUSED_TO_CANCEL,2890errbuf));2891} else {2892/* handles case 3 */2893ASSERT3U(func, ==, POOL_SCAN_SCRUB);2894ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);2895return (zfs_error(hdl,2896EZFS_SCRUB_PAUSED, errbuf));2897}2898}2899} else if (ps &&2900ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&2901ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {2902if (ps->pss_pass_error_scrub_pause == 0) {2903/* handles case 4 */2904ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);2905return (zfs_error(hdl, EZFS_ERRORSCRUBBING,2906errbuf));2907} else {2908/* handles case 5 */2909ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);2910ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);2911return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,2912errbuf));2913}2914} else {2915/* handles case 6 */2916return (zfs_error(hdl, EZFS_RESILVERING, errbuf));2917}2918} else if (err == ENOENT) {2919return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));2920} else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {2921return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));2922} else {2923return (zpool_standard_error(hdl, err, errbuf));2924}2925}29262927/*2928* Find a vdev that matches the search criteria specified. We use the2929* the nvpair name to determine how we should look for the device.2930* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL2931* spare; but FALSE if its an INUSE spare.2932*2933* If 'return_parent' is set, then return the *parent* of the vdev you're2934* searching for rather than the vdev itself.2935*/2936static nvlist_t *2937vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,2938boolean_t *l2cache, boolean_t *log, boolean_t return_parent)2939{2940uint_t c, children;2941nvlist_t **child;2942nvlist_t *ret;2943uint64_t is_log;2944const char *srchkey;2945nvpair_t *pair = nvlist_next_nvpair(search, NULL);2946const char *tmp = NULL;2947boolean_t is_root;29482949/* Nothing to look for */2950if (search == NULL || pair == NULL)2951return (NULL);29522953/* Obtain the key we will use to search */2954srchkey = nvpair_name(pair);29552956nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp);2957if (strcmp(tmp, "root") == 0)2958is_root = B_TRUE;2959else2960is_root = B_FALSE;29612962switch (nvpair_type(pair)) {2963case DATA_TYPE_UINT64:2964if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {2965uint64_t srchval = fnvpair_value_uint64(pair);2966uint64_t theguid = fnvlist_lookup_uint64(nv,2967ZPOOL_CONFIG_GUID);2968if (theguid == srchval)2969return (nv);2970}2971break;29722973case DATA_TYPE_STRING: {2974const char *srchval, *val;29752976srchval = fnvpair_value_string(pair);2977if (nvlist_lookup_string(nv, srchkey, &val) != 0)2978break;29792980/*2981* Search for the requested value. Special cases:2982*2983* - ZPOOL_CONFIG_PATH for whole disk entries. These end in2984* "-part1", or "p1". The suffix is hidden from the user,2985* but included in the string, so this matches around it.2986* - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()2987* is used to check all possible expanded paths.2988* - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).2989*2990* Otherwise, all other searches are simple string compares.2991*/2992if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {2993uint64_t wholedisk = 0;29942995(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,2996&wholedisk);2997if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)2998return (nv);29993000} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {3001char *type, *idx, *end, *p;3002uint64_t id, vdev_id;30033004/*3005* Determine our vdev type, keeping in mind3006* that the srchval is composed of a type and3007* vdev id pair (i.e. mirror-4).3008*/3009if ((type = strdup(srchval)) == NULL)3010return (NULL);30113012if ((p = strrchr(type, '-')) == NULL) {3013free(type);3014break;3015}3016idx = p + 1;3017*p = '\0';30183019/*3020* draid names are presented like: draid2:4d:6c:0s3021* We match them up to the first ':' so we can still3022* do the parity check below, but the other params3023* are ignored.3024*/3025if ((p = strchr(type, ':')) != NULL) {3026if (strncmp(type, VDEV_TYPE_DRAID,3027strlen(VDEV_TYPE_DRAID)) == 0)3028*p = '\0';3029}30303031/*3032* If the types don't match then keep looking.3033*/3034if (strncmp(val, type, strlen(val)) != 0) {3035free(type);3036break;3037}30383039verify(zpool_vdev_is_interior(type));30403041id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);3042errno = 0;3043vdev_id = strtoull(idx, &end, 10);30443045/*3046* If we are looking for a raidz and a parity is3047* specified, make sure it matches.3048*/3049int rzlen = strlen(VDEV_TYPE_RAIDZ);3050assert(rzlen == strlen(VDEV_TYPE_DRAID));3051int typlen = strlen(type);3052if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||3053strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&3054typlen != rzlen) {3055uint64_t vdev_parity;3056int parity = *(type + rzlen) - '0';30573058if (parity <= 0 || parity > 3 ||3059(typlen - rzlen) != 1) {3060/*3061* Nonsense parity specified, can3062* never match3063*/3064free(type);3065return (NULL);3066}3067vdev_parity = fnvlist_lookup_uint64(nv,3068ZPOOL_CONFIG_NPARITY);3069if ((int)vdev_parity != parity) {3070free(type);3071break;3072}3073}30743075free(type);3076if (errno != 0)3077return (NULL);30783079/*3080* Now verify that we have the correct vdev id.3081*/3082if (vdev_id == id)3083return (nv);3084}30853086/*3087* Common case3088*/3089if (strcmp(srchval, val) == 0)3090return (nv);3091break;3092}30933094default:3095break;3096}30973098if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,3099&child, &children) != 0)3100return (NULL);31013102for (c = 0; c < children; c++) {3103if ((ret = vdev_to_nvlist_iter(child[c], search,3104avail_spare, l2cache, NULL, return_parent)) != NULL) {3105/*3106* The 'is_log' value is only set for the toplevel3107* vdev, not the leaf vdevs. So we always lookup the3108* log device from the root of the vdev tree (where3109* 'log' is non-NULL).3110*/3111if (log != NULL &&3112nvlist_lookup_uint64(child[c],3113ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&3114is_log) {3115*log = B_TRUE;3116}3117return (ret && return_parent && !is_root ? nv : ret);3118}3119}31203121if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,3122&child, &children) == 0) {3123for (c = 0; c < children; c++) {3124if ((ret = vdev_to_nvlist_iter(child[c], search,3125avail_spare, l2cache, NULL, return_parent))3126!= NULL) {3127*avail_spare = B_TRUE;3128return (ret && return_parent &&3129!is_root ? nv : ret);3130}3131}3132}31333134if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,3135&child, &children) == 0) {3136for (c = 0; c < children; c++) {3137if ((ret = vdev_to_nvlist_iter(child[c], search,3138avail_spare, l2cache, NULL, return_parent))3139!= NULL) {3140*l2cache = B_TRUE;3141return (ret && return_parent &&3142!is_root ? nv : ret);3143}3144}3145}31463147return (NULL);3148}31493150/*3151* Given a physical path or guid, find the associated vdev.3152*/3153nvlist_t *3154zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,3155boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)3156{3157nvlist_t *search, *nvroot, *ret;3158uint64_t guid;3159char *end;31603161search = fnvlist_alloc();31623163guid = strtoull(ppath, &end, 0);3164if (guid != 0 && *end == '\0') {3165fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);3166} else {3167fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);3168}31693170nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,3171ZPOOL_CONFIG_VDEV_TREE);31723173*avail_spare = B_FALSE;3174*l2cache = B_FALSE;3175if (log != NULL)3176*log = B_FALSE;3177ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,3178B_FALSE);3179fnvlist_free(search);31803181return (ret);3182}31833184/*3185* Determine if we have an "interior" top-level vdev (i.e mirror/raidz).3186*/3187static boolean_t3188zpool_vdev_is_interior(const char *name)3189{3190if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||3191strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||3192strncmp(name,3193VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||3194strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||3195strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)3196return (B_TRUE);31973198if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&3199!zpool_is_draid_spare(name))3200return (B_TRUE);32013202return (B_FALSE);3203}32043205/*3206* Lookup the nvlist for a given vdev or vdev's parent (depending on3207* if 'return_parent' is set).3208*/3209static nvlist_t *3210__zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,3211boolean_t *l2cache, boolean_t *log, boolean_t return_parent)3212{3213char *end;3214nvlist_t *nvroot, *search, *ret;3215uint64_t guid;3216boolean_t __avail_spare, __l2cache, __log;32173218search = fnvlist_alloc();32193220guid = strtoull(path, &end, 0);3221if (guid != 0 && *end == '\0') {3222fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);3223} else if (zpool_vdev_is_interior(path)) {3224fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);3225} else {3226fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);3227}32283229nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,3230ZPOOL_CONFIG_VDEV_TREE);32313232/*3233* User can pass NULL for avail_spare, l2cache, and log, but3234* we still need to provide variables to vdev_to_nvlist_iter(), so3235* just point them to junk variables here.3236*/3237if (!avail_spare)3238avail_spare = &__avail_spare;3239if (!l2cache)3240l2cache = &__l2cache;3241if (!log)3242log = &__log;32433244*avail_spare = B_FALSE;3245*l2cache = B_FALSE;3246if (log != NULL)3247*log = B_FALSE;3248ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,3249return_parent);3250fnvlist_free(search);32513252return (ret);3253}32543255nvlist_t *3256zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,3257boolean_t *l2cache, boolean_t *log)3258{3259return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,3260B_FALSE));3261}32623263/* Given a vdev path, return its parent's nvlist */3264nvlist_t *3265zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path,3266boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)3267{3268return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,3269B_TRUE));3270}32713272/*3273* Convert a vdev path to a GUID. Returns GUID or 0 on error.3274*3275* If is_spare, is_l2cache, or is_log is non-NULL, then store within it3276* if the VDEV is a spare, l2cache, or log device. If they're NULL then3277* ignore them.3278*/3279static uint64_t3280zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,3281boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)3282{3283boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;3284nvlist_t *tgt;32853286if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,3287&log)) == NULL)3288return (0);32893290if (is_spare != NULL)3291*is_spare = spare;3292if (is_l2cache != NULL)3293*is_l2cache = l2cache;3294if (is_log != NULL)3295*is_log = log;32963297return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));3298}32993300/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */3301uint64_t3302zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)3303{3304return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));3305}33063307/*3308* Bring the specified vdev online. The 'flags' parameter is a set of the3309* ZFS_ONLINE_* flags.3310*/3311int3312zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,3313vdev_state_t *newstate)3314{3315zfs_cmd_t zc = {"\0"};3316char errbuf[ERRBUFLEN];3317nvlist_t *tgt;3318boolean_t avail_spare, l2cache, islog;3319libzfs_handle_t *hdl = zhp->zpool_hdl;33203321if (flags & ZFS_ONLINE_EXPAND) {3322(void) snprintf(errbuf, sizeof (errbuf),3323dgettext(TEXT_DOMAIN, "cannot expand %s"), path);3324} else {3325(void) snprintf(errbuf, sizeof (errbuf),3326dgettext(TEXT_DOMAIN, "cannot online %s"), path);3327}33283329(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3330if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,3331&islog)) == NULL)3332return (zfs_error(hdl, EZFS_NODEVICE, errbuf));33333334zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);33353336if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)3337return (zfs_error(hdl, EZFS_ISSPARE, errbuf));33383339#ifndef __FreeBSD__3340const char *pathname;3341if ((flags & ZFS_ONLINE_EXPAND ||3342zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&3343nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {3344uint64_t wholedisk = 0;33453346(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,3347&wholedisk);33483349/*3350* XXX - L2ARC 1.0 devices can't support expansion.3351*/3352if (l2cache) {3353zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3354"cannot expand cache devices"));3355return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));3356}33573358if (wholedisk) {3359const char *fullpath = path;3360char buf[MAXPATHLEN];3361int error;33623363if (path[0] != '/') {3364error = zfs_resolve_shortname(path, buf,3365sizeof (buf));3366if (error != 0)3367return (zfs_error(hdl, EZFS_NODEVICE,3368errbuf));33693370fullpath = buf;3371}33723373error = zpool_relabel_disk(hdl, fullpath, errbuf);3374if (error != 0)3375return (error);3376}3377}3378#endif33793380zc.zc_cookie = VDEV_STATE_ONLINE;3381zc.zc_obj = flags;33823383if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {3384if (errno == EINVAL) {3385zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "3386"from this pool into a new one. Use '%s' "3387"instead"), "zpool detach");3388return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));3389}3390return (zpool_standard_error(hdl, errno, errbuf));3391}33923393*newstate = zc.zc_cookie;3394return (0);3395}33963397/*3398* Take the specified vdev offline3399*/3400int3401zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)3402{3403zfs_cmd_t zc = {"\0"};3404char errbuf[ERRBUFLEN];3405nvlist_t *tgt;3406boolean_t avail_spare, l2cache;3407libzfs_handle_t *hdl = zhp->zpool_hdl;34083409(void) snprintf(errbuf, sizeof (errbuf),3410dgettext(TEXT_DOMAIN, "cannot offline %s"), path);34113412(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3413if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,3414NULL)) == NULL)3415return (zfs_error(hdl, EZFS_NODEVICE, errbuf));34163417zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);34183419if (avail_spare)3420return (zfs_error(hdl, EZFS_ISSPARE, errbuf));34213422zc.zc_cookie = VDEV_STATE_OFFLINE;3423zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;34243425if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)3426return (0);34273428switch (errno) {3429case EBUSY:34303431/*3432* There are no other replicas of this device.3433*/3434return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));34353436case EEXIST:3437/*3438* The log device has unplayed logs3439*/3440return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));34413442default:3443return (zpool_standard_error(hdl, errno, errbuf));3444}3445}34463447/*3448* Remove the specified vdev asynchronously from the configuration, so3449* that it may come ONLINE if reinserted. This is called from zed on3450* Udev remove event.3451* Note: We also have a similar function zpool_vdev_remove() that3452* removes the vdev from the pool.3453*/3454int3455zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)3456{3457zfs_cmd_t zc = {"\0"};3458char errbuf[ERRBUFLEN];3459nvlist_t *tgt;3460boolean_t avail_spare, l2cache;3461libzfs_handle_t *hdl = zhp->zpool_hdl;34623463(void) snprintf(errbuf, sizeof (errbuf),3464dgettext(TEXT_DOMAIN, "cannot remove %s"), path);34653466(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3467if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,3468NULL)) == NULL)3469return (zfs_error(hdl, EZFS_NODEVICE, errbuf));34703471zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);34723473zc.zc_cookie = VDEV_STATE_REMOVED;34743475if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)3476return (0);34773478return (zpool_standard_error(hdl, errno, errbuf));3479}34803481/*3482* Mark the given vdev faulted.3483*/3484int3485zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)3486{3487zfs_cmd_t zc = {"\0"};3488char errbuf[ERRBUFLEN];3489libzfs_handle_t *hdl = zhp->zpool_hdl;34903491(void) snprintf(errbuf, sizeof (errbuf),3492dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);34933494(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3495zc.zc_guid = guid;3496zc.zc_cookie = VDEV_STATE_FAULTED;3497zc.zc_obj = aux;34983499if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)3500return (0);35013502switch (errno) {3503case EBUSY:35043505/*3506* There are no other replicas of this device.3507*/3508return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));35093510default:3511return (zpool_standard_error(hdl, errno, errbuf));3512}35133514}35153516/*3517* Generic set vdev state function3518*/3519static int3520zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux,3521vdev_state_t state)3522{3523zfs_cmd_t zc = {"\0"};3524char errbuf[ERRBUFLEN];3525libzfs_handle_t *hdl = zhp->zpool_hdl;35263527(void) snprintf(errbuf, sizeof (errbuf),3528dgettext(TEXT_DOMAIN, "cannot set %s %llu"),3529zpool_state_to_name(state, aux), (u_longlong_t)guid);35303531(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3532zc.zc_guid = guid;3533zc.zc_cookie = state;3534zc.zc_obj = aux;35353536if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)3537return (0);35383539return (zpool_standard_error(hdl, errno, errbuf));3540}35413542/*3543* Mark the given vdev degraded.3544*/3545int3546zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)3547{3548return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED));3549}35503551/*3552* Mark the given vdev as in a removed state (as if the device does not exist).3553*3554* This is different than zpool_vdev_remove() which does a removal of a device3555* from the pool (but the device does exist).3556*/3557int3558zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)3559{3560return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED));3561}35623563/*3564* Returns TRUE if the given nvlist is a vdev that was originally swapped in as3565* a hot spare.3566*/3567static boolean_t3568is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)3569{3570nvlist_t **child;3571uint_t c, children;35723573if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,3574&children) == 0) {3575const char *type = fnvlist_lookup_string(search,3576ZPOOL_CONFIG_TYPE);3577if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||3578strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&3579children == 2 && child[which] == tgt)3580return (B_TRUE);35813582for (c = 0; c < children; c++)3583if (is_replacing_spare(child[c], tgt, which))3584return (B_TRUE);3585}35863587return (B_FALSE);3588}35893590/*3591* Attach new_disk (fully described by nvroot) to old_disk.3592* If 'replacing' is specified, the new disk will replace the old one.3593*/3594int3595zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,3596const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)3597{3598zfs_cmd_t zc = {"\0"};3599char errbuf[ERRBUFLEN];3600int ret;3601nvlist_t *tgt;3602boolean_t avail_spare, l2cache, islog;3603uint64_t val;3604char *newname;3605const char *type;3606nvlist_t **child;3607uint_t children;3608nvlist_t *config_root;3609libzfs_handle_t *hdl = zhp->zpool_hdl;36103611if (replacing)3612(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,3613"cannot replace %s with %s"), old_disk, new_disk);3614else3615(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,3616"cannot attach %s to %s"), new_disk, old_disk);36173618(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3619if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,3620&islog)) == NULL)3621return (zfs_error(hdl, EZFS_NODEVICE, errbuf));36223623if (avail_spare)3624return (zfs_error(hdl, EZFS_ISSPARE, errbuf));36253626if (l2cache)3627return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));36283629zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);3630zc.zc_cookie = replacing;3631zc.zc_simple = rebuild;36323633if (rebuild &&3634zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {3635zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3636"the loaded zfs module doesn't support device rebuilds"));3637return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));3638}36393640type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE);3641if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 &&3642zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) {3643zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3644"the loaded zfs module doesn't support raidz expansion"));3645return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));3646}36473648if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,3649&child, &children) != 0 || children != 1) {3650zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3651"new device must be a single disk"));3652return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));3653}36543655config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),3656ZPOOL_CONFIG_VDEV_TREE);36573658if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)3659return (-1);36603661/*3662* If the target is a hot spare that has been swapped in, we can only3663* replace it with another hot spare.3664*/3665if (replacing &&3666nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&3667(zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,3668NULL) == NULL || !avail_spare) &&3669is_replacing_spare(config_root, tgt, 1)) {3670zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3671"can only be replaced by another hot spare"));3672free(newname);3673return (zfs_error(hdl, EZFS_BADTARGET, errbuf));3674}36753676free(newname);36773678zcmd_write_conf_nvlist(hdl, &zc, nvroot);36793680ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);36813682zcmd_free_nvlists(&zc);36833684if (ret == 0)3685return (0);36863687switch (errno) {3688case ENOTSUP:3689/*3690* Can't attach to or replace this type of vdev.3691*/3692if (replacing) {3693uint64_t version = zpool_get_prop_int(zhp,3694ZPOOL_PROP_VERSION, NULL);36953696if (islog) {3697zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3698"cannot replace a log with a spare"));3699} else if (rebuild) {3700zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3701"only mirror and dRAID vdevs support "3702"sequential reconstruction"));3703} else if (zpool_is_draid_spare(new_disk)) {3704zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3705"dRAID spares can only replace child "3706"devices in their parent's dRAID vdev"));3707} else if (version >= SPA_VERSION_MULTI_REPLACE) {3708zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3709"already in replacing/spare config; wait "3710"for completion or use 'zpool detach'"));3711} else {3712zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3713"cannot replace a replacing device"));3714}3715} else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {3716zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3717"raidz_expansion feature must be enabled "3718"in order to attach a device to raidz"));3719} else {3720char status[64] = {0};3721zpool_prop_get_feature(zhp,3722"feature@device_rebuild", status, 63);3723if (rebuild &&3724strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {3725zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3726"device_rebuild feature must be enabled "3727"in order to use sequential "3728"reconstruction"));3729} else {3730zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3731"can only attach to mirrors and top-level "3732"disks"));3733}3734}3735(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);3736break;37373738case EINVAL:3739/*3740* The new device must be a single disk.3741*/3742zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3743"new device must be a single disk"));3744(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);3745break;37463747case EBUSY:3748zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),3749new_disk);3750(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3751break;37523753case EOVERFLOW:3754/*3755* The new device is too small.3756*/3757zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3758"device is too small"));3759(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3760break;37613762case EDOM:3763/*3764* The new device has a different optimal sector size.3765*/3766zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3767"new device has a different optimal sector size; use the "3768"option '-o ashift=N' to override the optimal size"));3769(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3770break;37713772case ENAMETOOLONG:3773/*3774* The resulting top-level vdev spec won't fit in the label.3775*/3776(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);3777break;37783779case ENXIO:3780/*3781* The existing raidz vdev has offline children3782*/3783if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {3784zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3785"raidz vdev has devices that are are offline or "3786"being replaced"));3787(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3788break;3789} else {3790(void) zpool_standard_error(hdl, errno, errbuf);3791}3792break;37933794case EADDRINUSE:3795/*3796* The boot reserved area is already being used (FreeBSD)3797*/3798if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {3799zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3800"the reserved boot area needed for the expansion "3801"is already being used by a boot loader"));3802(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3803} else {3804(void) zpool_standard_error(hdl, errno, errbuf);3805}3806break;38073808case ZFS_ERR_ASHIFT_MISMATCH:3809zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3810"The new device cannot have a higher alignment requirement "3811"than the top-level vdev."));3812(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);3813break;3814default:3815(void) zpool_standard_error(hdl, errno, errbuf);3816}38173818return (-1);3819}38203821/*3822* Detach the specified device.3823*/3824int3825zpool_vdev_detach(zpool_handle_t *zhp, const char *path)3826{3827zfs_cmd_t zc = {"\0"};3828char errbuf[ERRBUFLEN];3829nvlist_t *tgt;3830boolean_t avail_spare, l2cache;3831libzfs_handle_t *hdl = zhp->zpool_hdl;38323833(void) snprintf(errbuf, sizeof (errbuf),3834dgettext(TEXT_DOMAIN, "cannot detach %s"), path);38353836(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3837if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,3838NULL)) == NULL)3839return (zfs_error(hdl, EZFS_NODEVICE, errbuf));38403841if (avail_spare)3842return (zfs_error(hdl, EZFS_ISSPARE, errbuf));38433844if (l2cache)3845return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));38463847zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);38483849if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)3850return (0);38513852switch (errno) {38533854case ENOTSUP:3855/*3856* Can't detach from this type of vdev.3857*/3858zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "3859"applicable to mirror and replacing vdevs"));3860(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);3861break;38623863case EBUSY:3864/*3865* There are no other replicas of this device.3866*/3867(void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);3868break;38693870default:3871(void) zpool_standard_error(hdl, errno, errbuf);3872}38733874return (-1);3875}38763877/*3878* Find a mirror vdev in the source nvlist.3879*3880* The mchild array contains a list of disks in one of the top-level mirrors3881* of the source pool. The schild array contains a list of disks that the3882* user specified on the command line. We loop over the mchild array to3883* see if any entry in the schild array matches.3884*3885* If a disk in the mchild array is found in the schild array, we return3886* the index of that entry. Otherwise we return -1.3887*/3888static int3889find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,3890nvlist_t **schild, uint_t schildren)3891{3892uint_t mc;38933894for (mc = 0; mc < mchildren; mc++) {3895uint_t sc;3896char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,3897mchild[mc], 0);38983899for (sc = 0; sc < schildren; sc++) {3900char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,3901schild[sc], 0);3902boolean_t result = (strcmp(mpath, spath) == 0);39033904free(spath);3905if (result) {3906free(mpath);3907return (mc);3908}3909}39103911free(mpath);3912}39133914return (-1);3915}39163917/*3918* Split a mirror pool. If newroot points to null, then a new nvlist3919* is generated and it is the responsibility of the caller to free it.3920*/3921int3922zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,3923nvlist_t *props, splitflags_t flags)3924{3925zfs_cmd_t zc = {"\0"};3926char errbuf[ERRBUFLEN];3927const char *bias;3928nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;3929nvlist_t **varray = NULL, *zc_props = NULL;3930uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;3931libzfs_handle_t *hdl = zhp->zpool_hdl;3932uint64_t vers, readonly = B_FALSE;3933boolean_t freelist = B_FALSE, memory_err = B_TRUE;3934int retval = 0;39353936(void) snprintf(errbuf, sizeof (errbuf),3937dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);39383939if (!zpool_name_valid(hdl, B_FALSE, newname))3940return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));39413942if ((config = zpool_get_config(zhp, NULL)) == NULL) {3943(void) fprintf(stderr, gettext("Internal error: unable to "3944"retrieve pool configuration\n"));3945return (-1);3946}39473948tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);3949vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);39503951if (props) {3952prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };3953if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,3954props, vers, flags, errbuf)) == NULL)3955return (-1);3956(void) nvlist_lookup_uint64(zc_props,3957zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);3958if (readonly) {3959zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3960"property %s can only be set at import time"),3961zpool_prop_to_name(ZPOOL_PROP_READONLY));3962return (-1);3963}3964}39653966if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,3967&children) != 0) {3968zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3969"Source pool is missing vdev tree"));3970nvlist_free(zc_props);3971return (-1);3972}39733974varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));3975vcount = 0;39763977if (*newroot == NULL ||3978nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,3979&newchild, &newchildren) != 0)3980newchildren = 0;39813982for (c = 0; c < children; c++) {3983uint64_t is_log = B_FALSE, is_hole = B_FALSE;3984boolean_t is_special = B_FALSE, is_dedup = B_FALSE;3985const char *type;3986nvlist_t **mchild, *vdev;3987uint_t mchildren;3988int entry;39893990/*3991* Unlike cache & spares, slogs are stored in the3992* ZPOOL_CONFIG_CHILDREN array. We filter them out here.3993*/3994(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,3995&is_log);3996(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,3997&is_hole);3998if (is_log || is_hole) {3999/*4000* Create a hole vdev and put it in the config.4001*/4002if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)4003goto out;4004if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,4005VDEV_TYPE_HOLE) != 0)4006goto out;4007if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,40081) != 0)4009goto out;4010if (lastlog == 0)4011lastlog = vcount;4012varray[vcount++] = vdev;4013continue;4014}4015lastlog = 0;4016type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);40174018if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {4019vdev = child[c];4020if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)4021goto out;4022continue;4023} else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {4024zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4025"Source pool must be composed only of mirrors\n"));4026retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);4027goto out;4028}40294030if (nvlist_lookup_string(child[c],4031ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {4032if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)4033is_special = B_TRUE;4034else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)4035is_dedup = B_TRUE;4036}4037verify(nvlist_lookup_nvlist_array(child[c],4038ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);40394040/* find or add an entry for this top-level vdev */4041if (newchildren > 0 &&4042(entry = find_vdev_entry(zhp, mchild, mchildren,4043newchild, newchildren)) >= 0) {4044/* We found a disk that the user specified. */4045vdev = mchild[entry];4046++found;4047} else {4048/* User didn't specify a disk for this vdev. */4049vdev = mchild[mchildren - 1];4050}40514052if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)4053goto out;40544055if (flags.dryrun != 0) {4056if (is_dedup == B_TRUE) {4057if (nvlist_add_string(varray[vcount - 1],4058ZPOOL_CONFIG_ALLOCATION_BIAS,4059VDEV_ALLOC_BIAS_DEDUP) != 0)4060goto out;4061} else if (is_special == B_TRUE) {4062if (nvlist_add_string(varray[vcount - 1],4063ZPOOL_CONFIG_ALLOCATION_BIAS,4064VDEV_ALLOC_BIAS_SPECIAL) != 0)4065goto out;4066}4067}4068}40694070/* did we find every disk the user specified? */4071if (found != newchildren) {4072zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "4073"include at most one disk from each mirror"));4074retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);4075goto out;4076}40774078/* Prepare the nvlist for populating. */4079if (*newroot == NULL) {4080if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)4081goto out;4082freelist = B_TRUE;4083if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,4084VDEV_TYPE_ROOT) != 0)4085goto out;4086} else {4087verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);4088}40894090/* Add all the children we found */4091if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,4092(const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)4093goto out;40944095/*4096* If we're just doing a dry run, exit now with success.4097*/4098if (flags.dryrun) {4099memory_err = B_FALSE;4100freelist = B_FALSE;4101goto out;4102}41034104/* now build up the config list & call the ioctl */4105if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)4106goto out;41074108if (nvlist_add_nvlist(newconfig,4109ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||4110nvlist_add_string(newconfig,4111ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||4112nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)4113goto out;41144115/*4116* The new pool is automatically part of the namespace unless we4117* explicitly export it.4118*/4119if (!flags.import)4120zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;4121(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4122(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));4123zcmd_write_conf_nvlist(hdl, &zc, newconfig);4124if (zc_props != NULL)4125zcmd_write_src_nvlist(hdl, &zc, zc_props);41264127if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {4128retval = zpool_standard_error(hdl, errno, errbuf);4129goto out;4130}41314132freelist = B_FALSE;4133memory_err = B_FALSE;41344135out:4136if (varray != NULL) {4137int v;41384139for (v = 0; v < vcount; v++)4140nvlist_free(varray[v]);4141free(varray);4142}4143zcmd_free_nvlists(&zc);4144nvlist_free(zc_props);4145nvlist_free(newconfig);4146if (freelist) {4147nvlist_free(*newroot);4148*newroot = NULL;4149}41504151if (retval != 0)4152return (retval);41534154if (memory_err)4155return (no_memory(hdl));41564157return (0);4158}41594160/*4161* Remove the given device.4162*/4163int4164zpool_vdev_remove(zpool_handle_t *zhp, const char *path)4165{4166zfs_cmd_t zc = {"\0"};4167char errbuf[ERRBUFLEN];4168nvlist_t *tgt;4169boolean_t avail_spare, l2cache, islog;4170libzfs_handle_t *hdl = zhp->zpool_hdl;4171uint64_t version;41724173(void) snprintf(errbuf, sizeof (errbuf),4174dgettext(TEXT_DOMAIN, "cannot remove %s"), path);41754176if (zpool_is_draid_spare(path)) {4177zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4178"dRAID spares cannot be removed"));4179return (zfs_error(hdl, EZFS_NODEVICE, errbuf));4180}41814182(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4183if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,4184&islog)) == NULL)4185return (zfs_error(hdl, EZFS_NODEVICE, errbuf));41864187version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);4188if (islog && version < SPA_VERSION_HOLES) {4189zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4190"pool must be upgraded to support log removal"));4191return (zfs_error(hdl, EZFS_BADVERSION, errbuf));4192}41934194zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);41954196if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)4197return (0);41984199switch (errno) {42004201case EALREADY:4202zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4203"removal for this vdev is already in progress."));4204(void) zfs_error(hdl, EZFS_BUSY, errbuf);4205break;42064207case EINVAL:4208zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4209"invalid config; all top-level vdevs must "4210"have the same sector size and not be raidz."));4211(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);4212break;42134214case EBUSY:4215if (islog) {4216zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4217"Mount encrypted datasets to replay logs."));4218} else {4219zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4220"Pool busy; removal may already be in progress"));4221}4222(void) zfs_error(hdl, EZFS_BUSY, errbuf);4223break;42244225case EACCES:4226if (islog) {4227zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4228"Mount encrypted datasets to replay logs."));4229(void) zfs_error(hdl, EZFS_BUSY, errbuf);4230} else {4231(void) zpool_standard_error(hdl, errno, errbuf);4232}4233break;42344235default:4236(void) zpool_standard_error(hdl, errno, errbuf);4237}4238return (-1);4239}42404241int4242zpool_vdev_remove_cancel(zpool_handle_t *zhp)4243{4244zfs_cmd_t zc = {{0}};4245char errbuf[ERRBUFLEN];4246libzfs_handle_t *hdl = zhp->zpool_hdl;42474248(void) snprintf(errbuf, sizeof (errbuf),4249dgettext(TEXT_DOMAIN, "cannot cancel removal"));42504251(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4252zc.zc_cookie = 1;42534254if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)4255return (0);42564257return (zpool_standard_error(hdl, errno, errbuf));4258}42594260int4261zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,4262uint64_t *sizep)4263{4264char errbuf[ERRBUFLEN];4265nvlist_t *tgt;4266boolean_t avail_spare, l2cache, islog;4267libzfs_handle_t *hdl = zhp->zpool_hdl;42684269(void) snprintf(errbuf, sizeof (errbuf),4270dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),4271path);42724273if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,4274&islog)) == NULL)4275return (zfs_error(hdl, EZFS_NODEVICE, errbuf));42764277if (avail_spare || l2cache || islog) {4278*sizep = 0;4279return (0);4280}42814282if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {4283zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4284"indirect size not available"));4285return (zfs_error(hdl, EINVAL, errbuf));4286}4287return (0);4288}42894290/*4291* Clear the errors for the pool, or the particular device if specified.4292*/4293int4294zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)4295{4296zfs_cmd_t zc = {"\0"};4297char errbuf[ERRBUFLEN];4298nvlist_t *tgt;4299zpool_load_policy_t policy;4300boolean_t avail_spare, l2cache;4301libzfs_handle_t *hdl = zhp->zpool_hdl;4302nvlist_t *nvi = NULL;4303int error;43044305if (path)4306(void) snprintf(errbuf, sizeof (errbuf),4307dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),4308path);4309else4310(void) snprintf(errbuf, sizeof (errbuf),4311dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),4312zhp->zpool_name);43134314(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4315if (path) {4316if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,4317&l2cache, NULL)) == NULL)4318return (zfs_error(hdl, EZFS_NODEVICE, errbuf));43194320/*4321* Don't allow error clearing for hot spares. Do allow4322* error clearing for l2cache devices.4323*/4324if (avail_spare)4325return (zfs_error(hdl, EZFS_ISSPARE, errbuf));43264327zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);4328}43294330zpool_get_load_policy(rewindnvl, &policy);4331zc.zc_cookie = policy.zlp_rewind;43324333zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);4334zcmd_write_src_nvlist(hdl, &zc, rewindnvl);43354336while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&4337errno == ENOMEM)4338zcmd_expand_dst_nvlist(hdl, &zc);43394340if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&4341errno != EPERM && errno != EACCES)) {4342if (policy.zlp_rewind &4343(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {4344(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);4345zpool_rewind_exclaim(hdl, zc.zc_name,4346((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),4347nvi);4348nvlist_free(nvi);4349}4350zcmd_free_nvlists(&zc);4351return (0);4352}43534354zcmd_free_nvlists(&zc);4355return (zpool_standard_error(hdl, errno, errbuf));4356}43574358/*4359* Similar to zpool_clear(), but takes a GUID (used by fmd).4360*/4361int4362zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)4363{4364zfs_cmd_t zc = {"\0"};4365char errbuf[ERRBUFLEN];4366libzfs_handle_t *hdl = zhp->zpool_hdl;43674368(void) snprintf(errbuf, sizeof (errbuf),4369dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),4370(u_longlong_t)guid);43714372(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4373zc.zc_guid = guid;4374zc.zc_cookie = ZPOOL_NO_REWIND;43754376if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)4377return (0);43784379return (zpool_standard_error(hdl, errno, errbuf));4380}43814382/*4383* Change the GUID for a pool.4384*4385* Similar to zpool_reguid(), but may take a GUID.4386*4387* If the guid argument is NULL, then no GUID is passed in the nvlist to the4388* ioctl().4389*/4390int4391zpool_set_guid(zpool_handle_t *zhp, const uint64_t *guid)4392{4393char errbuf[ERRBUFLEN];4394libzfs_handle_t *hdl = zhp->zpool_hdl;4395nvlist_t *nvl = NULL;4396zfs_cmd_t zc = {"\0"};4397int error;43984399if (guid != NULL) {4400if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)4401return (no_memory(hdl));44024403if (nvlist_add_uint64(nvl, ZPOOL_REGUID_GUID, *guid) != 0) {4404nvlist_free(nvl);4405return (no_memory(hdl));4406}44074408zcmd_write_src_nvlist(hdl, &zc, nvl);4409}44104411(void) snprintf(errbuf, sizeof (errbuf),4412dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);44134414(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4415error = zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc);4416if (error) {4417return (zpool_standard_error(hdl, errno, errbuf));4418}4419if (guid != NULL) {4420zcmd_free_nvlists(&zc);4421nvlist_free(nvl);4422}4423return (0);4424}44254426/*4427* Change the GUID for a pool.4428*/4429int4430zpool_reguid(zpool_handle_t *zhp)4431{4432return (zpool_set_guid(zhp, NULL));4433}44344435/*4436* Reopen the pool.4437*/4438int4439zpool_reopen_one(zpool_handle_t *zhp, void *data)4440{4441libzfs_handle_t *hdl = zpool_get_handle(zhp);4442const char *pool_name = zpool_get_name(zhp);4443boolean_t *scrub_restart = data;4444int error;44454446error = lzc_reopen(pool_name, *scrub_restart);4447if (error) {4448return (zpool_standard_error_fmt(hdl, error,4449dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));4450}44514452return (0);4453}44544455/* call into libzfs_core to execute the sync IOCTL per pool */4456int4457zpool_sync_one(zpool_handle_t *zhp, void *data)4458{4459int ret;4460libzfs_handle_t *hdl = zpool_get_handle(zhp);4461const char *pool_name = zpool_get_name(zhp);4462boolean_t *force = data;4463nvlist_t *innvl = fnvlist_alloc();44644465fnvlist_add_boolean_value(innvl, "force", *force);4466if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {4467nvlist_free(innvl);4468return (zpool_standard_error_fmt(hdl, ret,4469dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));4470}4471nvlist_free(innvl);44724473return (0);4474}44754476#define PATH_BUF_LEN 6444774478/*4479* Given a vdev, return the name to display in iostat. If the vdev has a path,4480* we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.4481* We also check if this is a whole disk, in which case we strip off the4482* trailing 's0' slice name.4483*4484* This routine is also responsible for identifying when disks have been4485* reconfigured in a new location. The kernel will have opened the device by4486* devid, but the path will still refer to the old location. To catch this, we4487* first do a path -> devid translation (which is fast for the common case). If4488* the devid matches, we're done. If not, we do a reverse devid -> path4489* translation and issue the appropriate ioctl() to update the path of the vdev.4490* If 'zhp' is NULL, then this is an exported pool, and we don't need to do any4491* of these checks.4492*/4493char *4494zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,4495int name_flags)4496{4497const char *type, *tpath;4498const char *path;4499uint64_t value;4500char buf[PATH_BUF_LEN];4501char tmpbuf[PATH_BUF_LEN * 2];45024503/*4504* vdev_name will be "root"/"root-0" for the root vdev, but it is the4505* zpool name that will be displayed to the user.4506*/4507type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);4508if (zhp != NULL && strcmp(type, "root") == 0)4509return (zfs_strdup(hdl, zpool_get_name(zhp)));45104511if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))4512name_flags |= VDEV_NAME_PATH;4513if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))4514name_flags |= VDEV_NAME_GUID;4515if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))4516name_flags |= VDEV_NAME_FOLLOW_LINKS;45174518if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||4519name_flags & VDEV_NAME_GUID) {4520(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);4521(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);4522path = buf;4523} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {4524path = tpath;45254526if (name_flags & VDEV_NAME_FOLLOW_LINKS) {4527char *rp = realpath(path, NULL);4528if (rp) {4529strlcpy(buf, rp, sizeof (buf));4530path = buf;4531free(rp);4532}4533}45344535/*4536* For a block device only use the name.4537*/4538if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&4539!(name_flags & VDEV_NAME_PATH)) {4540path = zfs_strip_path(path);4541}45424543/*4544* Remove the partition from the path if this is a whole disk.4545*/4546if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&4547nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)4548== 0 && value && !(name_flags & VDEV_NAME_PATH)) {4549return (zfs_strip_partition(path));4550}4551} else {4552path = type;45534554/*4555* If it's a raidz device, we need to stick in the parity level.4556*/4557if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {4558value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);4559(void) snprintf(buf, sizeof (buf), "%s%llu", path,4560(u_longlong_t)value);4561path = buf;4562}45634564/*4565* If it's a dRAID device, we add parity, groups, and spares.4566*/4567if (strcmp(path, VDEV_TYPE_DRAID) == 0) {4568uint64_t ndata, nparity, nspares;4569nvlist_t **child;4570uint_t children;45714572verify(nvlist_lookup_nvlist_array(nv,4573ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);4574nparity = fnvlist_lookup_uint64(nv,4575ZPOOL_CONFIG_NPARITY);4576ndata = fnvlist_lookup_uint64(nv,4577ZPOOL_CONFIG_DRAID_NDATA);4578nspares = fnvlist_lookup_uint64(nv,4579ZPOOL_CONFIG_DRAID_NSPARES);45804581path = zpool_draid_name(buf, sizeof (buf), ndata,4582nparity, nspares, children);4583}45844585/*4586* We identify each top-level vdev by using a <type-id>4587* naming convention.4588*/4589if (name_flags & VDEV_NAME_TYPE_ID) {4590uint64_t id = fnvlist_lookup_uint64(nv,4591ZPOOL_CONFIG_ID);4592(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",4593path, (u_longlong_t)id);4594path = tmpbuf;4595}4596}45974598return (zfs_strdup(hdl, path));4599}46004601static int4602zbookmark_mem_compare(const void *a, const void *b)4603{4604return (memcmp(a, b, sizeof (zbookmark_phys_t)));4605}46064607void4608zpool_add_propname(zpool_handle_t *zhp, const char *propname)4609{4610assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES);4611zhp->zpool_propnames[zhp->zpool_n_propnames] = propname;4612zhp->zpool_n_propnames++;4613}46144615/*4616* Retrieve the persistent error log, uniquify the members, and return to the4617* caller.4618*/4619int4620zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)4621{4622zfs_cmd_t zc = {"\0"};4623libzfs_handle_t *hdl = zhp->zpool_hdl;4624zbookmark_phys_t *buf;4625uint64_t buflen = 10000; /* approx. 1MB of RAM */46264627if (fnvlist_lookup_uint64(zhp->zpool_config,4628ZPOOL_CONFIG_ERRCOUNT) == 0)4629return (0);46304631/*4632* Retrieve the raw error list from the kernel. If it doesn't fit,4633* allocate a larger buffer and retry.4634*/4635(void) strcpy(zc.zc_name, zhp->zpool_name);4636for (;;) {4637buf = zfs_alloc(zhp->zpool_hdl,4638buflen * sizeof (zbookmark_phys_t));4639zc.zc_nvlist_dst = (uintptr_t)buf;4640zc.zc_nvlist_dst_size = buflen;4641if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,4642&zc) != 0) {4643free(buf);4644if (errno == ENOMEM) {4645buflen *= 2;4646} else {4647return (zpool_standard_error_fmt(hdl, errno,4648dgettext(TEXT_DOMAIN, "errors: List of "4649"errors unavailable")));4650}4651} else {4652break;4653}4654}46554656/*4657* Sort the resulting bookmarks. This is a little confusing due to the4658* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last4659* to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks4660* _not_ copied as part of the process. So we point the start of our4661* array appropriate and decrement the total number of elements.4662*/4663zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;4664uint64_t zblen = buflen - zc.zc_nvlist_dst_size;46654666qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);46674668verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);46694670/*4671* Fill in the nverrlistp with nvlist's of dataset and object numbers.4672*/4673for (uint64_t i = 0; i < zblen; i++) {4674nvlist_t *nv;46754676/* ignoring zb_blkid and zb_level for now */4677if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&4678zb[i-1].zb_object == zb[i].zb_object)4679continue;46804681if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)4682goto nomem;4683if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,4684zb[i].zb_objset) != 0) {4685nvlist_free(nv);4686goto nomem;4687}4688if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,4689zb[i].zb_object) != 0) {4690nvlist_free(nv);4691goto nomem;4692}4693if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {4694nvlist_free(nv);4695goto nomem;4696}4697nvlist_free(nv);4698}46994700free(buf);4701return (0);47024703nomem:4704free(buf);4705return (no_memory(zhp->zpool_hdl));4706}47074708/*4709* Upgrade a ZFS pool to the latest on-disk version.4710*/4711int4712zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)4713{4714zfs_cmd_t zc = {"\0"};4715libzfs_handle_t *hdl = zhp->zpool_hdl;47164717(void) strcpy(zc.zc_name, zhp->zpool_name);4718zc.zc_cookie = new_version;47194720if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)4721return (zpool_standard_error_fmt(hdl, errno,4722dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),4723zhp->zpool_name));4724return (0);4725}47264727void4728zfs_save_arguments(int argc, char **argv, char *string, int len)4729{4730int i;47314732(void) strlcpy(string, zfs_basename(argv[0]), len);4733for (i = 1; i < argc; i++) {4734(void) strlcat(string, " ", len);4735(void) strlcat(string, argv[i], len);4736}4737}47384739int4740zpool_log_history(libzfs_handle_t *hdl, const char *message)4741{4742zfs_cmd_t zc = {"\0"};4743nvlist_t *args;47444745args = fnvlist_alloc();4746fnvlist_add_string(args, "message", message);4747zcmd_write_src_nvlist(hdl, &zc, args);4748int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);4749nvlist_free(args);4750zcmd_free_nvlists(&zc);4751return (err);4752}47534754/*4755* Perform ioctl to get some command history of a pool.4756*4757* 'buf' is the buffer to fill up to 'len' bytes. 'off' is the4758* logical offset of the history buffer to start reading from.4759*4760* Upon return, 'off' is the next logical offset to read from and4761* 'len' is the actual amount of bytes read into 'buf'.4762*/4763static int4764get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)4765{4766zfs_cmd_t zc = {"\0"};4767libzfs_handle_t *hdl = zhp->zpool_hdl;47684769(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));47704771zc.zc_history = (uint64_t)(uintptr_t)buf;4772zc.zc_history_len = *len;4773zc.zc_history_offset = *off;47744775if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {4776switch (errno) {4777case EPERM:4778return (zfs_error_fmt(hdl, EZFS_PERM,4779dgettext(TEXT_DOMAIN,4780"cannot show history for pool '%s'"),4781zhp->zpool_name));4782case ENOENT:4783return (zfs_error_fmt(hdl, EZFS_NOHISTORY,4784dgettext(TEXT_DOMAIN, "cannot get history for pool "4785"'%s'"), zhp->zpool_name));4786case ENOTSUP:4787return (zfs_error_fmt(hdl, EZFS_BADVERSION,4788dgettext(TEXT_DOMAIN, "cannot get history for pool "4789"'%s', pool must be upgraded"), zhp->zpool_name));4790default:4791return (zpool_standard_error_fmt(hdl, errno,4792dgettext(TEXT_DOMAIN,4793"cannot get history for '%s'"), zhp->zpool_name));4794}4795}47964797*len = zc.zc_history_len;4798*off = zc.zc_history_offset;47994800return (0);4801}48024803/*4804* Retrieve the command history of a pool.4805*/4806int4807zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,4808boolean_t *eof)4809{4810libzfs_handle_t *hdl = zhp->zpool_hdl;4811char *buf;4812int buflen = 128 * 1024;4813nvlist_t **records = NULL;4814uint_t numrecords = 0;4815int err = 0, i;4816uint64_t start = *off;48174818buf = zfs_alloc(hdl, buflen);48194820/* process about 1MiB a time */4821while (*off - start < 1024 * 1024) {4822uint64_t bytes_read = buflen;4823uint64_t leftover;48244825if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)4826break;48274828/* if nothing else was read in, we're at EOF, just return */4829if (!bytes_read) {4830*eof = B_TRUE;4831break;4832}48334834if ((err = zpool_history_unpack(buf, bytes_read,4835&leftover, &records, &numrecords)) != 0) {4836zpool_standard_error_fmt(hdl, err,4837dgettext(TEXT_DOMAIN,4838"cannot get history for '%s'"), zhp->zpool_name);4839break;4840}4841*off -= leftover;4842if (leftover == bytes_read) {4843/*4844* no progress made, because buffer is not big enough4845* to hold this record; resize and retry.4846*/4847buflen *= 2;4848free(buf);4849buf = zfs_alloc(hdl, buflen);4850}4851}48524853free(buf);48544855if (!err) {4856*nvhisp = fnvlist_alloc();4857fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,4858(const nvlist_t **)records, numrecords);4859}4860for (i = 0; i < numrecords; i++)4861nvlist_free(records[i]);4862free(records);48634864return (err);4865}48664867/*4868* Retrieve the next event given the passed 'zevent_fd' file descriptor.4869* If there is a new event available 'nvp' will contain a newly allocated4870* nvlist and 'dropped' will be set to the number of missed events since4871* the last call to this function. When 'nvp' is set to NULL it indicates4872* no new events are available. In either case the function returns 0 and4873* it is up to the caller to free 'nvp'. In the case of a fatal error the4874* function will return a non-zero value. When the function is called in4875* blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),4876* it will not return until a new event is available.4877*/4878int4879zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,4880int *dropped, unsigned flags, int zevent_fd)4881{4882zfs_cmd_t zc = {"\0"};4883int error = 0;48844885*nvp = NULL;4886*dropped = 0;4887zc.zc_cleanup_fd = zevent_fd;48884889if (flags & ZEVENT_NONBLOCK)4890zc.zc_guid = ZEVENT_NONBLOCK;48914892zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);48934894retry:4895if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {4896switch (errno) {4897case ESHUTDOWN:4898error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,4899dgettext(TEXT_DOMAIN, "zfs shutdown"));4900goto out;4901case ENOENT:4902/* Blocking error case should not occur */4903if (!(flags & ZEVENT_NONBLOCK))4904error = zpool_standard_error_fmt(hdl, errno,4905dgettext(TEXT_DOMAIN, "cannot get event"));49064907goto out;4908case ENOMEM:4909zcmd_expand_dst_nvlist(hdl, &zc);4910goto retry;4911default:4912error = zpool_standard_error_fmt(hdl, errno,4913dgettext(TEXT_DOMAIN, "cannot get event"));4914goto out;4915}4916}49174918error = zcmd_read_dst_nvlist(hdl, &zc, nvp);4919if (error != 0)4920goto out;49214922*dropped = (int)zc.zc_cookie;4923out:4924zcmd_free_nvlists(&zc);49254926return (error);4927}49284929/*4930* Clear all events.4931*/4932int4933zpool_events_clear(libzfs_handle_t *hdl, int *count)4934{4935zfs_cmd_t zc = {"\0"};49364937if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)4938return (zpool_standard_error(hdl, errno,4939dgettext(TEXT_DOMAIN, "cannot clear events")));49404941if (count != NULL)4942*count = (int)zc.zc_cookie; /* # of events cleared */49434944return (0);4945}49464947/*4948* Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for4949* the passed zevent_fd file handle. On success zero is returned,4950* otherwise -1 is returned and hdl->libzfs_error is set to the errno.4951*/4952int4953zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)4954{4955zfs_cmd_t zc = {"\0"};4956int error = 0;49574958zc.zc_guid = eid;4959zc.zc_cleanup_fd = zevent_fd;49604961if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {4962switch (errno) {4963case ENOENT:4964error = zfs_error_fmt(hdl, EZFS_NOENT,4965dgettext(TEXT_DOMAIN, "cannot get event"));4966break;49674968case ENOMEM:4969error = zfs_error_fmt(hdl, EZFS_NOMEM,4970dgettext(TEXT_DOMAIN, "cannot get event"));4971break;49724973default:4974error = zpool_standard_error_fmt(hdl, errno,4975dgettext(TEXT_DOMAIN, "cannot get event"));4976break;4977}4978}49794980return (error);4981}49824983static void4984zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,4985char *pathname, size_t len, boolean_t always_unmounted)4986{4987zfs_cmd_t zc = {"\0"};4988boolean_t mounted = B_FALSE;4989char *mntpnt = NULL;4990char dsname[ZFS_MAX_DATASET_NAME_LEN];49914992if (dsobj == 0) {4993/* special case for the MOS */4994(void) snprintf(pathname, len, "<metadata>:<0x%llx>",4995(longlong_t)obj);4996return;4997}49984999/* get the dataset's name */5000(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));5001zc.zc_obj = dsobj;5002if (zfs_ioctl(zhp->zpool_hdl,5003ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {5004/* just write out a path of two object numbers */5005(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",5006(longlong_t)dsobj, (longlong_t)obj);5007return;5008}5009(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));50105011/* find out if the dataset is mounted */5012mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,5013&mntpnt);50145015/* get the corrupted object's path */5016(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));5017zc.zc_obj = obj;5018if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,5019&zc) == 0) {5020if (mounted) {5021(void) snprintf(pathname, len, "%s%s", mntpnt,5022zc.zc_value);5023} else {5024(void) snprintf(pathname, len, "%s:%s",5025dsname, zc.zc_value);5026}5027} else {5028(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,5029(longlong_t)obj);5030}5031free(mntpnt);5032}50335034void5035zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,5036char *pathname, size_t len)5037{5038zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);5039}50405041void5042zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,5043char *pathname, size_t len)5044{5045zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);5046}5047/*5048* Wait while the specified activity is in progress in the pool.5049*/5050int5051zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)5052{5053boolean_t missing;50545055int error = zpool_wait_status(zhp, activity, &missing, NULL);50565057if (missing) {5058(void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,5059dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),5060zhp->zpool_name);5061return (ENOENT);5062} else {5063return (error);5064}5065}50665067/*5068* Wait for the given activity and return the status of the wait (whether or not5069* any waiting was done) in the 'waited' parameter. Non-existent pools are5070* reported via the 'missing' parameter, rather than by printing an error5071* message. This is convenient when this function is called in a loop over a5072* long period of time (as it is, for example, by zpool's wait cmd). In that5073* scenario, a pool being exported or destroyed should be considered a normal5074* event, so we don't want to print an error when we find that the pool doesn't5075* exist.5076*/5077int5078zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,5079boolean_t *missing, boolean_t *waited)5080{5081int error = lzc_wait(zhp->zpool_name, activity, waited);5082*missing = (error == ENOENT);5083if (*missing)5084return (0);50855086if (error != 0) {5087(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,5088dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),5089zhp->zpool_name);5090}50915092return (error);5093}50945095int5096zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)5097{5098int error = lzc_set_bootenv(zhp->zpool_name, envmap);5099if (error != 0) {5100(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,5101dgettext(TEXT_DOMAIN,5102"error setting bootenv in pool '%s'"), zhp->zpool_name);5103}51045105return (error);5106}51075108int5109zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)5110{5111nvlist_t *nvl;5112int error;51135114nvl = NULL;5115error = lzc_get_bootenv(zhp->zpool_name, &nvl);5116if (error != 0) {5117(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,5118dgettext(TEXT_DOMAIN,5119"error getting bootenv in pool '%s'"), zhp->zpool_name);5120} else {5121*nvlp = nvl;5122}51235124return (error);5125}51265127/*5128* Attempt to read and parse feature file(s) (from "compatibility" property).5129* Files contain zpool feature names, comma or whitespace-separated.5130* Comments (# character to next newline) are discarded.5131*5132* Arguments:5133* compatibility : string containing feature filenames5134* features : either NULL or pointer to array of boolean5135* report : either NULL or pointer to string buffer5136* rlen : length of "report" buffer5137*5138* compatibility is NULL (unset), "", "off", "legacy", or list of5139* comma-separated filenames. filenames should either be absolute,5140* or relative to:5141* 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or5142* 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).5143* (Unset), "" or "off" => enable all features5144* "legacy" => disable all features5145*5146* Any feature names read from files which match unames in spa_feature_table5147* will have the corresponding boolean set in the features array (if non-NULL).5148* If more than one feature set specified, only features present in *all* of5149* them will be set.5150*5151* "report" if not NULL will be populated with a suitable status message.5152*5153* Return values:5154* ZPOOL_COMPATIBILITY_OK : files read and parsed ok5155* ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file5156* ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name5157* ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name5158* ZPOOL_COMPATIBILITY_NOFILES : no feature files found5159*/5160zpool_compat_status_t5161zpool_load_compat(const char *compat, boolean_t *features, char *report,5162size_t rlen)5163{5164int sdirfd, ddirfd, featfd;5165struct stat fs;5166char *fc;5167char *ps, *ls, *ws;5168char *file, *line, *word;51695170char l_compat[ZFS_MAXPROPLEN];51715172boolean_t ret_nofiles = B_TRUE;5173boolean_t ret_badfile = B_FALSE;5174boolean_t ret_badtoken = B_FALSE;5175boolean_t ret_warntoken = B_FALSE;51765177/* special cases (unset), "" and "off" => enable all features */5178if (compat == NULL || compat[0] == '\0' ||5179strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {5180if (features != NULL) {5181for (uint_t i = 0; i < SPA_FEATURES; i++)5182features[i] = B_TRUE;5183}5184if (report != NULL)5185strlcpy(report, gettext("all features enabled"), rlen);5186return (ZPOOL_COMPATIBILITY_OK);5187}51885189/* Final special case "legacy" => disable all features */5190if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {5191if (features != NULL)5192for (uint_t i = 0; i < SPA_FEATURES; i++)5193features[i] = B_FALSE;5194if (report != NULL)5195strlcpy(report, gettext("all features disabled"), rlen);5196return (ZPOOL_COMPATIBILITY_OK);5197}51985199/*5200* Start with all true; will be ANDed with results from each file5201*/5202if (features != NULL)5203for (uint_t i = 0; i < SPA_FEATURES; i++)5204features[i] = B_TRUE;52055206char err_badfile[ZFS_MAXPROPLEN] = "";5207char err_badtoken[ZFS_MAXPROPLEN] = "";52085209/*5210* We ignore errors from the directory open()5211* as they're only needed if the filename is relative5212* which will be checked during the openat().5213*/52145215/* O_PATH safer than O_RDONLY if system allows it */5216#if defined(O_PATH)5217#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)5218#else5219#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)5220#endif52215222sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);5223ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);52245225(void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);52265227for (file = strtok_r(l_compat, ",", &ps);5228file != NULL;5229file = strtok_r(NULL, ",", &ps)) {52305231boolean_t l_features[SPA_FEATURES];52325233enum { Z_SYSCONF, Z_DATA } source;52345235/* try sysconfdir first, then datadir */5236source = Z_SYSCONF;5237if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {5238featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);5239source = Z_DATA;5240}52415242/* File readable and correct size? */5243if (featfd < 0 ||5244fstat(featfd, &fs) < 0 ||5245fs.st_size < 1 ||5246fs.st_size > ZPOOL_COMPAT_MAXSIZE) {5247(void) close(featfd);5248strlcat(err_badfile, file, ZFS_MAXPROPLEN);5249strlcat(err_badfile, " ", ZFS_MAXPROPLEN);5250ret_badfile = B_TRUE;5251continue;5252}52535254/* Prefault the file if system allows */5255#if defined(MAP_POPULATE)5256#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)5257#elif defined(MAP_PREFAULT_READ)5258#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)5259#else5260#define ZC_MMAP_FLAGS (MAP_PRIVATE)5261#endif52625263/* private mmap() so we can strtok safely */5264fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,5265ZC_MMAP_FLAGS, featfd, 0);5266(void) close(featfd);52675268/* map ok, and last character == newline? */5269if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {5270(void) munmap((void *) fc, fs.st_size);5271strlcat(err_badfile, file, ZFS_MAXPROPLEN);5272strlcat(err_badfile, " ", ZFS_MAXPROPLEN);5273ret_badfile = B_TRUE;5274continue;5275}52765277ret_nofiles = B_FALSE;52785279for (uint_t i = 0; i < SPA_FEATURES; i++)5280l_features[i] = B_FALSE;52815282/* replace final newline with NULL to ensure string ends */5283fc[fs.st_size - 1] = '\0';52845285for (line = strtok_r(fc, "\n", &ls);5286line != NULL;5287line = strtok_r(NULL, "\n", &ls)) {5288/* discard comments */5289char *r = strchr(line, '#');5290if (r != NULL)5291*r = '\0';52925293for (word = strtok_r(line, ", \t", &ws);5294word != NULL;5295word = strtok_r(NULL, ", \t", &ws)) {5296/* Find matching feature name */5297uint_t f;5298for (f = 0; f < SPA_FEATURES; f++) {5299zfeature_info_t *fi =5300&spa_feature_table[f];5301if (strcmp(word, fi->fi_uname) == 0) {5302l_features[f] = B_TRUE;5303break;5304}5305}5306if (f < SPA_FEATURES)5307continue;53085309/* found an unrecognized word */5310/* lightly sanitize it */5311if (strlen(word) > 32)5312word[32] = '\0';5313for (char *c = word; *c != '\0'; c++)5314if (!isprint(*c))5315*c = '?';53165317strlcat(err_badtoken, word, ZFS_MAXPROPLEN);5318strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);5319if (source == Z_SYSCONF)5320ret_badtoken = B_TRUE;5321else5322ret_warntoken = B_TRUE;5323}5324}5325(void) munmap((void *) fc, fs.st_size);53265327if (features != NULL)5328for (uint_t i = 0; i < SPA_FEATURES; i++)5329features[i] &= l_features[i];5330}5331(void) close(sdirfd);5332(void) close(ddirfd);53335334/* Return the most serious error */5335if (ret_badfile) {5336if (report != NULL)5337snprintf(report, rlen, gettext("could not read/"5338"parse feature file(s): %s"), err_badfile);5339return (ZPOOL_COMPATIBILITY_BADFILE);5340}5341if (ret_nofiles) {5342if (report != NULL)5343strlcpy(report,5344gettext("no valid compatibility files specified"),5345rlen);5346return (ZPOOL_COMPATIBILITY_NOFILES);5347}5348if (ret_badtoken) {5349if (report != NULL)5350snprintf(report, rlen, gettext("invalid feature "5351"name(s) in local compatibility files: %s"),5352err_badtoken);5353return (ZPOOL_COMPATIBILITY_BADTOKEN);5354}5355if (ret_warntoken) {5356if (report != NULL)5357snprintf(report, rlen, gettext("unrecognized feature "5358"name(s) in distribution compatibility files: %s"),5359err_badtoken);5360return (ZPOOL_COMPATIBILITY_WARNTOKEN);5361}5362if (report != NULL)5363strlcpy(report, gettext("compatibility set ok"), rlen);5364return (ZPOOL_COMPATIBILITY_OK);5365}53665367static int5368zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)5369{5370nvlist_t *tgt;5371boolean_t avail_spare, l2cache;53725373verify(zhp != NULL);5374if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {5375char errbuf[ERRBUFLEN];5376(void) snprintf(errbuf, sizeof (errbuf),5377dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));5378return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));5379}53805381if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,5382NULL)) == NULL) {5383char errbuf[ERRBUFLEN];5384(void) snprintf(errbuf, sizeof (errbuf),5385dgettext(TEXT_DOMAIN, "can not find %s in %s"),5386vdevname, zhp->zpool_name);5387return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));5388}53895390*vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);5391return (0);5392}53935394/*5395* Get a vdev property value for 'prop' and return the value in5396* a pre-allocated buffer.5397*/5398int5399zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,5400char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)5401{5402nvlist_t *nv;5403const char *strval;5404uint64_t intval;5405zprop_source_t src = ZPROP_SRC_NONE;54065407if (prop == VDEV_PROP_USERPROP) {5408/* user property, prop_name must contain the property name */5409assert(prop_name != NULL);5410if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {5411src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);5412strval = fnvlist_lookup_string(nv, ZPROP_VALUE);5413} else {5414/* user prop not found */5415src = ZPROP_SRC_DEFAULT;5416strval = "-";5417}5418(void) strlcpy(buf, strval, len);5419if (srctype)5420*srctype = src;5421return (0);5422}54235424if (prop_name == NULL)5425prop_name = (char *)vdev_prop_to_name(prop);54265427switch (vdev_prop_get_type(prop)) {5428case PROP_TYPE_STRING:5429if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {5430src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);5431strval = fnvlist_lookup_string(nv, ZPROP_VALUE);5432} else {5433src = ZPROP_SRC_DEFAULT;5434if ((strval = vdev_prop_default_string(prop)) == NULL)5435strval = "-";5436}5437(void) strlcpy(buf, strval, len);5438break;54395440case PROP_TYPE_NUMBER:5441if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {5442src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);5443intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);5444} else {5445src = ZPROP_SRC_DEFAULT;5446intval = vdev_prop_default_numeric(prop);5447}54485449switch (prop) {5450case VDEV_PROP_ASIZE:5451case VDEV_PROP_PSIZE:5452case VDEV_PROP_SIZE:5453case VDEV_PROP_BOOTSIZE:5454case VDEV_PROP_ALLOCATED:5455case VDEV_PROP_FREE:5456case VDEV_PROP_READ_ERRORS:5457case VDEV_PROP_WRITE_ERRORS:5458case VDEV_PROP_CHECKSUM_ERRORS:5459case VDEV_PROP_INITIALIZE_ERRORS:5460case VDEV_PROP_TRIM_ERRORS:5461case VDEV_PROP_SLOW_IOS:5462case VDEV_PROP_OPS_NULL:5463case VDEV_PROP_OPS_READ:5464case VDEV_PROP_OPS_WRITE:5465case VDEV_PROP_OPS_FREE:5466case VDEV_PROP_OPS_CLAIM:5467case VDEV_PROP_OPS_TRIM:5468case VDEV_PROP_BYTES_NULL:5469case VDEV_PROP_BYTES_READ:5470case VDEV_PROP_BYTES_WRITE:5471case VDEV_PROP_BYTES_FREE:5472case VDEV_PROP_BYTES_CLAIM:5473case VDEV_PROP_BYTES_TRIM:5474if (literal) {5475(void) snprintf(buf, len, "%llu",5476(u_longlong_t)intval);5477} else {5478(void) zfs_nicenum(intval, buf, len);5479}5480break;5481case VDEV_PROP_EXPANDSZ:5482if (intval == 0) {5483(void) strlcpy(buf, "-", len);5484} else if (literal) {5485(void) snprintf(buf, len, "%llu",5486(u_longlong_t)intval);5487} else {5488(void) zfs_nicenum(intval, buf, len);5489}5490break;5491case VDEV_PROP_CAPACITY:5492if (literal) {5493(void) snprintf(buf, len, "%llu",5494(u_longlong_t)intval);5495} else {5496(void) snprintf(buf, len, "%llu%%",5497(u_longlong_t)intval);5498}5499break;5500case VDEV_PROP_CHECKSUM_N:5501case VDEV_PROP_CHECKSUM_T:5502case VDEV_PROP_IO_N:5503case VDEV_PROP_IO_T:5504case VDEV_PROP_SLOW_IO_N:5505case VDEV_PROP_SLOW_IO_T:5506if (intval == UINT64_MAX) {5507(void) strlcpy(buf, "-", len);5508} else {5509(void) snprintf(buf, len, "%llu",5510(u_longlong_t)intval);5511}5512break;5513case VDEV_PROP_FRAGMENTATION:5514if (intval == UINT64_MAX) {5515(void) strlcpy(buf, "-", len);5516} else {5517(void) snprintf(buf, len, "%llu%%",5518(u_longlong_t)intval);5519}5520break;5521case VDEV_PROP_STATE:5522if (literal) {5523(void) snprintf(buf, len, "%llu",5524(u_longlong_t)intval);5525} else {5526(void) strlcpy(buf, zpool_state_to_name(intval,5527VDEV_AUX_NONE), len);5528}5529break;5530default:5531(void) snprintf(buf, len, "%llu",5532(u_longlong_t)intval);5533}5534break;55355536case PROP_TYPE_INDEX:5537if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {5538src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);5539intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);5540} else {5541/* 'trim_support' only valid for leaf vdevs */5542if (prop == VDEV_PROP_TRIM_SUPPORT) {5543(void) strlcpy(buf, "-", len);5544break;5545}5546src = ZPROP_SRC_DEFAULT;5547intval = vdev_prop_default_numeric(prop);5548/* Only use if provided by the RAIDZ VDEV above */5549if (prop == VDEV_PROP_RAIDZ_EXPANDING)5550return (ENOENT);5551if (prop == VDEV_PROP_SIT_OUT)5552return (ENOENT);5553}5554if (vdev_prop_index_to_string(prop, intval,5555(const char **)&strval) != 0)5556return (-1);5557(void) strlcpy(buf, strval, len);5558break;55595560default:5561abort();5562}55635564if (srctype)5565*srctype = src;55665567return (0);5568}55695570/*5571* Get a vdev property value for 'prop_name' and return the value in5572* a pre-allocated buffer.5573*/5574int5575zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,5576char *prop_name, char *buf, size_t len, zprop_source_t *srctype,5577boolean_t literal)5578{5579nvlist_t *reqnvl, *reqprops;5580nvlist_t *retprops = NULL;5581uint64_t vdev_guid = 0;5582int ret;55835584if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)5585return (ret);55865587if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)5588return (no_memory(zhp->zpool_hdl));5589if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)5590return (no_memory(zhp->zpool_hdl));55915592fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);55935594if (prop != VDEV_PROP_USERPROP) {5595/* prop_name overrides prop value */5596if (prop_name != NULL)5597prop = vdev_name_to_prop(prop_name);5598else5599prop_name = (char *)vdev_prop_to_name(prop);5600assert(prop < VDEV_NUM_PROPS);5601}56025603assert(prop_name != NULL);5604if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {5605nvlist_free(reqnvl);5606nvlist_free(reqprops);5607return (no_memory(zhp->zpool_hdl));5608}56095610fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);56115612ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);56135614if (ret == 0) {5615ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,5616len, srctype, literal);5617} else {5618char errbuf[ERRBUFLEN];5619(void) snprintf(errbuf, sizeof (errbuf),5620dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"5621" %s in %s"), prop_name, vdevname, zhp->zpool_name);5622(void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);5623}56245625nvlist_free(reqnvl);5626nvlist_free(reqprops);5627nvlist_free(retprops);56285629return (ret);5630}56315632/*5633* Get all vdev properties5634*/5635int5636zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,5637nvlist_t **outnvl)5638{5639nvlist_t *nvl = NULL;5640uint64_t vdev_guid = 0;5641int ret;56425643if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)5644return (ret);56455646if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)5647return (no_memory(zhp->zpool_hdl));56485649fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);56505651ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);56525653nvlist_free(nvl);56545655if (ret) {5656char errbuf[ERRBUFLEN];5657(void) snprintf(errbuf, sizeof (errbuf),5658dgettext(TEXT_DOMAIN, "cannot get vdev properties for"5659" %s in %s"), vdevname, zhp->zpool_name);5660(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);5661}56625663return (ret);5664}56655666/*5667* Set vdev property5668*/5669int5670zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,5671const char *propname, const char *propval)5672{5673int ret;5674nvlist_t *nvl = NULL;5675nvlist_t *outnvl = NULL;5676nvlist_t *props;5677nvlist_t *realprops;5678prop_flags_t flags = { 0 };5679uint64_t version;5680uint64_t vdev_guid;56815682if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)5683return (ret);56845685if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)5686return (no_memory(zhp->zpool_hdl));5687if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)5688return (no_memory(zhp->zpool_hdl));56895690fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);56915692if (nvlist_add_string(props, propname, propval) != 0) {5693nvlist_free(props);5694return (no_memory(zhp->zpool_hdl));5695}56965697char errbuf[ERRBUFLEN];5698(void) snprintf(errbuf, sizeof (errbuf),5699dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),5700propname, vdevname, zhp->zpool_name);57015702flags.vdevprop = 1;5703version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);5704if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,5705zhp->zpool_name, props, version, flags, errbuf)) == NULL) {5706nvlist_free(props);5707nvlist_free(nvl);5708return (-1);5709}57105711nvlist_free(props);5712props = realprops;57135714fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);57155716ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);57175718nvlist_free(props);5719nvlist_free(nvl);5720nvlist_free(outnvl);57215722if (ret) {5723if (errno == ENOTSUP) {5724zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,5725"property not supported for this vdev"));5726(void) zfs_error(zhp->zpool_hdl, EZFS_PROPTYPE, errbuf);5727} else {5728(void) zpool_standard_error(zhp->zpool_hdl, errno,5729errbuf);5730}5731}57325733return (ret);5734}57355736/*5737* Prune older entries from the DDT to reclaim space under the quota5738*/5739int5740zpool_ddt_prune(zpool_handle_t *zhp, zpool_ddt_prune_unit_t unit,5741uint64_t amount)5742{5743int error = lzc_ddt_prune(zhp->zpool_name, unit, amount);5744if (error != 0) {5745libzfs_handle_t *hdl = zhp->zpool_hdl;5746char errbuf[ERRBUFLEN];57475748(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,5749"cannot prune dedup table on '%s'"), zhp->zpool_name);57505751if (error == EALREADY) {5752zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,5753"a prune operation is already in progress"));5754(void) zfs_error(hdl, EZFS_BUSY, errbuf);5755} else {5756(void) zpool_standard_error(hdl, errno, errbuf);5757}5758return (-1);5759}57605761return (0);5762}576357645765