Path: blob/main/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
106883 views
// SPDX-License-Identifier: CDDL-1.01/*2* CDDL HEADER START3*4* The contents of this file are subject to the terms of the5* Common Development and Distribution License (the "License").6* You may not use this file except in compliance with the License.7*8* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE9* or https://opensource.org/licenses/CDDL-1.0.10* See the License for the specific language governing permissions11* and limitations under the License.12*13* When distributing Covered Code, include this CDDL HEADER in each14* file and include the License file at usr/src/OPENSOLARIS.LICENSE.15* If applicable, add the following below this CDDL HEADER, with the16* fields enclosed by brackets "[]" replaced with your own identifying17* information: Portions Copyright [yyyy] [name of copyright owner]18*19* CDDL HEADER END20*/2122/*23* Copyright 2015 Nexenta Systems, Inc. All rights reserved.24* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.25* Copyright (c) 2011, 2024 by Delphix. All rights reserved.26* Copyright 2016 Igor Kozhukhov <[email protected]>27* Copyright (c) 2018 Datto Inc.28* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.29* Copyright (c) 2017, Intel Corporation.30* Copyright (c) 2018, loli10K <[email protected]>31* Copyright (c) 2021, Colm Buckley <[email protected]>32* Copyright (c) 2021, 2023, Klara Inc.33* Copyright (c) 2025 Hewlett Packard Enterprise Development LP.34*/3536#include <errno.h>37#include <libintl.h>38#include <stdio.h>39#include <stdlib.h>40#include <strings.h>41#include <unistd.h>42#include <libgen.h>43#include <zone.h>44#include <sys/stat.h>45#include <sys/efi_partition.h>46#include <sys/systeminfo.h>47#include <sys/zfs_ioctl.h>48#include <sys/zfs_sysfs.h>49#include <sys/vdev_disk.h>50#include <sys/types.h>51#include <dlfcn.h>52#include <libzutil.h>53#include <fcntl.h>5455#include "zfs_namecheck.h"56#include "zfs_prop.h"57#include "libzfs_impl.h"58#include "zfs_comutil.h"59#include "zfeature_common.h"6061static boolean_t zpool_vdev_is_interior(const char *name);6263typedef struct prop_flags {64unsigned int create:1; /* Validate property on creation */65unsigned int import:1; /* Validate property on import */66unsigned int vdevprop:1; /* Validate property as a VDEV property */67} prop_flags_t;6869/*70* ====================================================================71* zpool property functions72* ====================================================================73*/7475static int76zpool_get_all_props(zpool_handle_t *zhp)77{78zfs_cmd_t zc = {"\0"};79libzfs_handle_t *hdl = zhp->zpool_hdl;8081(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));8283if (zhp->zpool_n_propnames > 0) {84nvlist_t *innvl = fnvlist_alloc();85fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES,86zhp->zpool_propnames, zhp->zpool_n_propnames);87zcmd_write_src_nvlist(hdl, &zc, innvl);88fnvlist_free(innvl);89}9091zcmd_alloc_dst_nvlist(hdl, &zc, 0);9293while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {94if (errno == ENOMEM)95zcmd_expand_dst_nvlist(hdl, &zc);96else {97zcmd_free_nvlists(&zc);98return (-1);99}100}101102if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {103zcmd_free_nvlists(&zc);104return (-1);105}106107zcmd_free_nvlists(&zc);108109return (0);110}111112int113zpool_props_refresh(zpool_handle_t *zhp)114{115nvlist_t *old_props;116117old_props = zhp->zpool_props;118119if (zpool_get_all_props(zhp) != 0)120return (-1);121122nvlist_free(old_props);123return (0);124}125126static const char *127zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,128zprop_source_t *src)129{130nvlist_t *nv, *nvl;131const char *value;132zprop_source_t source;133134nvl = zhp->zpool_props;135if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {136source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);137value = fnvlist_lookup_string(nv, ZPROP_VALUE);138} else {139source = ZPROP_SRC_DEFAULT;140if ((value = zpool_prop_default_string(prop)) == NULL)141value = "-";142}143144if (src)145*src = source;146147return (value);148}149150uint64_t151zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)152{153nvlist_t *nv, *nvl;154uint64_t value;155zprop_source_t source;156157if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {158/*159* zpool_get_all_props() has most likely failed because160* the pool is faulted, but if all we need is the top level161* vdev's guid then get it from the zhp config nvlist.162*/163if ((prop == ZPOOL_PROP_GUID) &&164(nvlist_lookup_nvlist(zhp->zpool_config,165ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&166(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)167== 0)) {168return (value);169}170return (zpool_prop_default_numeric(prop));171}172173nvl = zhp->zpool_props;174if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {175source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);176value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);177} else {178source = ZPROP_SRC_DEFAULT;179value = zpool_prop_default_numeric(prop);180}181182if (src)183*src = source;184185return (value);186}187188/*189* Map VDEV STATE to printed strings.190*/191const char *192zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)193{194switch (state) {195case VDEV_STATE_CLOSED:196case VDEV_STATE_OFFLINE:197return (gettext("OFFLINE"));198case VDEV_STATE_REMOVED:199return (gettext("REMOVED"));200case VDEV_STATE_CANT_OPEN:201if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)202return (gettext("FAULTED"));203else if (aux == VDEV_AUX_SPLIT_POOL)204return (gettext("SPLIT"));205else206return (gettext("UNAVAIL"));207case VDEV_STATE_FAULTED:208return (gettext("FAULTED"));209case VDEV_STATE_DEGRADED:210return (gettext("DEGRADED"));211case VDEV_STATE_HEALTHY:212return (gettext("ONLINE"));213214default:215break;216}217218return (gettext("UNKNOWN"));219}220221/*222* Map POOL STATE to printed strings.223*/224const char *225zpool_pool_state_to_name(pool_state_t state)226{227switch (state) {228default:229break;230case POOL_STATE_ACTIVE:231return (gettext("ACTIVE"));232case POOL_STATE_EXPORTED:233return (gettext("EXPORTED"));234case POOL_STATE_DESTROYED:235return (gettext("DESTROYED"));236case POOL_STATE_SPARE:237return (gettext("SPARE"));238case POOL_STATE_L2CACHE:239return (gettext("L2CACHE"));240case POOL_STATE_UNINITIALIZED:241return (gettext("UNINITIALIZED"));242case POOL_STATE_UNAVAIL:243return (gettext("UNAVAIL"));244case POOL_STATE_POTENTIALLY_ACTIVE:245return (gettext("POTENTIALLY_ACTIVE"));246}247248return (gettext("UNKNOWN"));249}250251/*252* Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",253* "SUSPENDED", etc).254*/255const char *256zpool_get_state_str(zpool_handle_t *zhp)257{258zpool_errata_t errata;259zpool_status_t status;260const char *str;261262status = zpool_get_status(zhp, NULL, &errata);263264if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {265str = gettext("FAULTED");266} else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||267status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||268status == ZPOOL_STATUS_IO_FAILURE_MMP) {269str = gettext("SUSPENDED");270} else {271nvlist_t *nvroot = fnvlist_lookup_nvlist(272zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);273uint_t vsc;274vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(275nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);276str = zpool_state_to_name(vs->vs_state, vs->vs_aux);277}278return (str);279}280281/*282* Get a zpool property value for 'prop' and return the value in283* a pre-allocated buffer.284*/285int286zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,287size_t len, zprop_source_t *srctype, boolean_t literal)288{289uint64_t intval;290const char *strval;291zprop_source_t src = ZPROP_SRC_NONE;292293if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {294switch (prop) {295case ZPOOL_PROP_NAME:296(void) strlcpy(buf, zpool_get_name(zhp), len);297break;298299case ZPOOL_PROP_HEALTH:300(void) strlcpy(buf, zpool_get_state_str(zhp), len);301break;302303case ZPOOL_PROP_GUID:304intval = zpool_get_prop_int(zhp, prop, &src);305(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);306break;307308case ZPOOL_PROP_ALTROOT:309case ZPOOL_PROP_CACHEFILE:310case ZPOOL_PROP_COMMENT:311case ZPOOL_PROP_COMPATIBILITY:312if (zhp->zpool_props != NULL ||313zpool_get_all_props(zhp) == 0) {314(void) strlcpy(buf,315zpool_get_prop_string(zhp, prop, &src),316len);317break;318}319zfs_fallthrough;320default:321(void) strlcpy(buf, "-", len);322break;323}324325if (srctype != NULL)326*srctype = src;327return (0);328}329330/*331* ZPOOL_PROP_DEDUPCACHED can be fetched by name only using332* the ZPOOL_GET_PROPS_NAMES mechanism333*/334if (prop == ZPOOL_PROP_DEDUPCACHED) {335zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);336(void) zpool_props_refresh(zhp);337}338339if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&340prop != ZPOOL_PROP_NAME)341return (-1);342343switch (zpool_prop_get_type(prop)) {344case PROP_TYPE_STRING:345(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),346len);347break;348349case PROP_TYPE_NUMBER:350intval = zpool_get_prop_int(zhp, prop, &src);351352switch (prop) {353case ZPOOL_PROP_DEDUP_TABLE_QUOTA:354/*355* If dedup quota is 0, we translate this into 'none'356* (unless literal is set). And if it is UINT64_MAX357* we translate that as 'automatic' (limit to size of358* the dedicated dedup VDEV. Otherwise, fall throught359* into the regular number formating.360*/361if (intval == 0) {362(void) strlcpy(buf, literal ? "0" : "none",363len);364break;365} else if (intval == UINT64_MAX) {366(void) strlcpy(buf, "auto", len);367break;368}369zfs_fallthrough;370371case ZPOOL_PROP_SIZE:372case ZPOOL_PROP_ALLOCATED:373case ZPOOL_PROP_FREE:374case ZPOOL_PROP_FREEING:375case ZPOOL_PROP_LEAKED:376case ZPOOL_PROP_ASHIFT:377case ZPOOL_PROP_MAXBLOCKSIZE:378case ZPOOL_PROP_MAXDNODESIZE:379case ZPOOL_PROP_BCLONESAVED:380case ZPOOL_PROP_BCLONEUSED:381case ZPOOL_PROP_DEDUP_TABLE_SIZE:382case ZPOOL_PROP_DEDUPCACHED:383if (literal)384(void) snprintf(buf, len, "%llu",385(u_longlong_t)intval);386else387(void) zfs_nicenum(intval, buf, len);388break;389390case ZPOOL_PROP_EXPANDSZ:391case ZPOOL_PROP_CHECKPOINT:392if (intval == 0) {393(void) strlcpy(buf, "-", len);394} else if (literal) {395(void) snprintf(buf, len, "%llu",396(u_longlong_t)intval);397} else {398(void) zfs_nicebytes(intval, buf, len);399}400break;401402case ZPOOL_PROP_CAPACITY:403if (literal) {404(void) snprintf(buf, len, "%llu",405(u_longlong_t)intval);406} else {407(void) snprintf(buf, len, "%llu%%",408(u_longlong_t)intval);409}410break;411412case ZPOOL_PROP_FRAGMENTATION:413if (intval == UINT64_MAX) {414(void) strlcpy(buf, "-", len);415} else if (literal) {416(void) snprintf(buf, len, "%llu",417(u_longlong_t)intval);418} else {419(void) snprintf(buf, len, "%llu%%",420(u_longlong_t)intval);421}422break;423424case ZPOOL_PROP_BCLONERATIO:425case ZPOOL_PROP_DEDUPRATIO:426if (literal)427(void) snprintf(buf, len, "%llu.%02llu",428(u_longlong_t)(intval / 100),429(u_longlong_t)(intval % 100));430else431(void) snprintf(buf, len, "%llu.%02llux",432(u_longlong_t)(intval / 100),433(u_longlong_t)(intval % 100));434break;435436case ZPOOL_PROP_HEALTH:437(void) strlcpy(buf, zpool_get_state_str(zhp), len);438break;439case ZPOOL_PROP_VERSION:440if (intval >= SPA_VERSION_FEATURES) {441(void) snprintf(buf, len, "-");442break;443}444zfs_fallthrough;445default:446(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);447}448break;449450case PROP_TYPE_INDEX:451intval = zpool_get_prop_int(zhp, prop, &src);452if (zpool_prop_index_to_string(prop, intval, &strval)453!= 0)454return (-1);455(void) strlcpy(buf, strval, len);456break;457458default:459abort();460}461462if (srctype)463*srctype = src;464465return (0);466}467468/*469* Get a zpool property value for 'propname' and return the value in470* a pre-allocated buffer.471*/472int473zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,474size_t len, zprop_source_t *srctype)475{476nvlist_t *nv;477uint64_t ival;478const char *value;479zprop_source_t source = ZPROP_SRC_LOCAL;480481if (zhp->zpool_props == NULL)482zpool_get_all_props(zhp);483484if (nvlist_lookup_nvlist(zhp->zpool_props, propname, &nv) == 0) {485if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)486source = ival;487verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);488} else {489source = ZPROP_SRC_DEFAULT;490value = "-";491}492493if (srctype)494*srctype = source;495496(void) strlcpy(buf, value, len);497498return (0);499}500501/*502* Check if the bootfs name has the same pool name as it is set to.503* Assuming bootfs is a valid dataset name.504*/505static boolean_t506bootfs_name_valid(const char *pool, const char *bootfs)507{508int len = strlen(pool);509if (bootfs[0] == '\0')510return (B_TRUE);511512if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))513return (B_FALSE);514515if (strncmp(pool, bootfs, len) == 0 &&516(bootfs[len] == '/' || bootfs[len] == '\0'))517return (B_TRUE);518519return (B_FALSE);520}521522/*523* Given an nvlist of zpool properties to be set, validate that they are524* correct, and parse any numeric properties (index, boolean, etc) if they are525* specified as strings.526*/527static nvlist_t *528zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,529nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)530{531nvpair_t *elem;532nvlist_t *retprops;533zpool_prop_t prop;534const char *strval;535uint64_t intval;536const char *check;537struct stat64 statbuf;538zpool_handle_t *zhp;539char *parent, *slash;540char report[1024];541542if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {543(void) no_memory(hdl);544return (NULL);545}546547elem = NULL;548while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {549const char *propname = nvpair_name(elem);550551if (flags.vdevprop && zpool_prop_vdev(propname)) {552vdev_prop_t vprop = vdev_name_to_prop(propname);553554if (vdev_prop_readonly(vprop)) {555zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "556"is readonly"), propname);557(void) zfs_error(hdl, EZFS_PROPREADONLY,558errbuf);559goto error;560}561562if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,563retprops, &strval, &intval, errbuf) != 0)564goto error;565566continue;567} else if (flags.vdevprop && vdev_prop_user(propname)) {568if (nvlist_add_nvpair(retprops, elem) != 0) {569(void) no_memory(hdl);570goto error;571}572continue;573} else if (flags.vdevprop) {574zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,575"invalid property: '%s'"), propname);576(void) zfs_error(hdl, EZFS_BADPROP, errbuf);577goto error;578}579580prop = zpool_name_to_prop(propname);581if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {582int err;583char *fname = strchr(propname, '@') + 1;584585err = zfeature_lookup_name(fname, NULL);586if (err != 0) {587ASSERT3U(err, ==, ENOENT);588zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,589"feature '%s' unsupported by kernel"),590fname);591(void) zfs_error(hdl, EZFS_BADPROP, errbuf);592goto error;593}594595if (nvpair_type(elem) != DATA_TYPE_STRING) {596zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,597"'%s' must be a string"), propname);598(void) zfs_error(hdl, EZFS_BADPROP, errbuf);599goto error;600}601602(void) nvpair_value_string(elem, &strval);603if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&604strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {605zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,606"property '%s' can only be set to "607"'enabled' or 'disabled'"), propname);608(void) zfs_error(hdl, EZFS_BADPROP, errbuf);609goto error;610}611612if (!flags.create &&613strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {614zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,615"property '%s' can only be set to "616"'disabled' at creation time"), propname);617(void) zfs_error(hdl, EZFS_BADPROP, errbuf);618goto error;619}620621if (nvlist_add_uint64(retprops, propname, 0) != 0) {622(void) no_memory(hdl);623goto error;624}625continue;626} else if (prop == ZPOOL_PROP_INVAL &&627zfs_prop_user(propname)) {628/*629* This is a user property: make sure it's a630* string, and that it's less than ZAP_MAXNAMELEN.631*/632if (nvpair_type(elem) != DATA_TYPE_STRING) {633zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,634"'%s' must be a string"), propname);635(void) zfs_error(hdl, EZFS_BADPROP, errbuf);636goto error;637}638639if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {640zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,641"property name '%s' is too long"),642propname);643(void) zfs_error(hdl, EZFS_BADPROP, errbuf);644goto error;645}646647(void) nvpair_value_string(elem, &strval);648649if (strlen(strval) >= ZFS_MAXPROPLEN) {650zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,651"property value '%s' is too long"),652strval);653(void) zfs_error(hdl, EZFS_BADPROP, errbuf);654goto error;655}656657if (nvlist_add_string(retprops, propname,658strval) != 0) {659(void) no_memory(hdl);660goto error;661}662663continue;664}665666/*667* Make sure this property is valid and applies to this type.668*/669if (prop == ZPOOL_PROP_INVAL) {670zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,671"invalid property '%s'"), propname);672(void) zfs_error(hdl, EZFS_BADPROP, errbuf);673goto error;674}675676if (zpool_prop_readonly(prop)) {677zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "678"is readonly"), propname);679(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);680goto error;681}682683if (!flags.create && zpool_prop_setonce(prop)) {684zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,685"property '%s' can only be set at "686"creation time"), propname);687(void) zfs_error(hdl, EZFS_BADPROP, errbuf);688goto error;689}690691if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,692&strval, &intval, errbuf) != 0)693goto error;694695/*696* Perform additional checking for specific properties.697*/698switch (prop) {699case ZPOOL_PROP_VERSION:700if (intval < version ||701!SPA_VERSION_IS_SUPPORTED(intval)) {702zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,703"property '%s' number %llu is invalid."),704propname, (unsigned long long)intval);705(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);706goto error;707}708break;709710case ZPOOL_PROP_ASHIFT:711if (intval != 0 &&712(intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {713zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,714"property '%s' number %llu is invalid, "715"only values between %" PRId32 " and %"716PRId32 " are allowed."),717propname, (unsigned long long)intval,718ASHIFT_MIN, ASHIFT_MAX);719(void) zfs_error(hdl, EZFS_BADPROP, errbuf);720goto error;721}722break;723724case ZPOOL_PROP_BOOTFS:725if (flags.create || flags.import) {726zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,727"property '%s' cannot be set at creation "728"or import time"), propname);729(void) zfs_error(hdl, EZFS_BADPROP, errbuf);730goto error;731}732733if (version < SPA_VERSION_BOOTFS) {734zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,735"pool must be upgraded to support "736"'%s' property"), propname);737(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);738goto error;739}740741/*742* bootfs property value has to be a dataset name and743* the dataset has to be in the same pool as it sets to.744*/745if (!bootfs_name_valid(poolname, strval)) {746zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "747"is an invalid name"), strval);748(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);749goto error;750}751752if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {753zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,754"could not open pool '%s'"), poolname);755(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);756goto error;757}758zpool_close(zhp);759break;760761case ZPOOL_PROP_ALTROOT:762if (!flags.create && !flags.import) {763zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,764"property '%s' can only be set during pool "765"creation or import"), propname);766(void) zfs_error(hdl, EZFS_BADPROP, errbuf);767goto error;768}769770if (strval[0] != '/') {771zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,772"bad alternate root '%s'"), strval);773(void) zfs_error(hdl, EZFS_BADPATH, errbuf);774goto error;775}776break;777778case ZPOOL_PROP_CACHEFILE:779if (strval[0] == '\0')780break;781782if (strcmp(strval, "none") == 0)783break;784785if (strval[0] != '/') {786zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,787"property '%s' must be empty, an "788"absolute path, or 'none'"), propname);789(void) zfs_error(hdl, EZFS_BADPATH, errbuf);790goto error;791}792793parent = strdup(strval);794if (parent == NULL) {795(void) zfs_error(hdl, EZFS_NOMEM, errbuf);796goto error;797}798slash = strrchr(parent, '/');799800if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||801strcmp(slash, "/..") == 0) {802zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,803"'%s' is not a valid file"), parent);804(void) zfs_error(hdl, EZFS_BADPATH, errbuf);805free(parent);806goto error;807}808809*slash = '\0';810811if (parent[0] != '\0' &&812(stat64(parent, &statbuf) != 0 ||813!S_ISDIR(statbuf.st_mode))) {814zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,815"'%s' is not a valid directory"),816parent);817(void) zfs_error(hdl, EZFS_BADPATH, errbuf);818free(parent);819goto error;820}821free(parent);822823break;824825case ZPOOL_PROP_COMPATIBILITY:826switch (zpool_load_compat(strval, NULL, report, 1024)) {827case ZPOOL_COMPATIBILITY_OK:828case ZPOOL_COMPATIBILITY_WARNTOKEN:829break;830case ZPOOL_COMPATIBILITY_BADFILE:831case ZPOOL_COMPATIBILITY_BADTOKEN:832case ZPOOL_COMPATIBILITY_NOFILES:833zfs_error_aux(hdl, "%s", report);834(void) zfs_error(hdl, EZFS_BADPROP, errbuf);835goto error;836}837break;838839case ZPOOL_PROP_COMMENT:840for (check = strval; *check != '\0'; check++) {841if (!isprint(*check)) {842zfs_error_aux(hdl,843dgettext(TEXT_DOMAIN,844"comment may only have printable "845"characters"));846(void) zfs_error(hdl, EZFS_BADPROP,847errbuf);848goto error;849}850}851if (strlen(strval) > ZPROP_MAX_COMMENT) {852zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,853"comment must not exceed %d characters"),854ZPROP_MAX_COMMENT);855(void) zfs_error(hdl, EZFS_BADPROP, errbuf);856goto error;857}858break;859case ZPOOL_PROP_READONLY:860if (!flags.import) {861zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,862"property '%s' can only be set at "863"import time"), propname);864(void) zfs_error(hdl, EZFS_BADPROP, errbuf);865goto error;866}867break;868case ZPOOL_PROP_MULTIHOST:869if (get_system_hostid() == 0) {870zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,871"requires a non-zero system hostid"));872(void) zfs_error(hdl, EZFS_BADPROP, errbuf);873goto error;874}875break;876case ZPOOL_PROP_DEDUPDITTO:877printf("Note: property '%s' no longer has "878"any effect\n", propname);879break;880881default:882break;883}884}885886return (retprops);887error:888nvlist_free(retprops);889return (NULL);890}891892/*893* Set zpool property : propname=propval.894*/895int896zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)897{898zfs_cmd_t zc = {"\0"};899int ret;900char errbuf[ERRBUFLEN];901nvlist_t *nvl = NULL;902nvlist_t *realprops;903uint64_t version;904prop_flags_t flags = { 0 };905906(void) snprintf(errbuf, sizeof (errbuf),907dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),908zhp->zpool_name);909910if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)911return (no_memory(zhp->zpool_hdl));912913if (nvlist_add_string(nvl, propname, propval) != 0) {914nvlist_free(nvl);915return (no_memory(zhp->zpool_hdl));916}917918version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);919if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,920zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {921nvlist_free(nvl);922return (-1);923}924925nvlist_free(nvl);926nvl = realprops;927928/*929* Execute the corresponding ioctl() to set this property.930*/931(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));932933zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);934935ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);936937zcmd_free_nvlists(&zc);938nvlist_free(nvl);939940if (ret)941(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);942else943(void) zpool_props_refresh(zhp);944945return (ret);946}947948int949zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,950zfs_type_t type, boolean_t literal)951{952libzfs_handle_t *hdl = zhp->zpool_hdl;953zprop_list_t *entry;954char buf[ZFS_MAXPROPLEN];955nvlist_t *features = NULL;956nvpair_t *nvp;957zprop_list_t **last;958boolean_t firstexpand = (NULL == *plp);959int i;960961if (zprop_expand_list(hdl, plp, type) != 0)962return (-1);963964if (type == ZFS_TYPE_VDEV)965return (0);966967last = plp;968while (*last != NULL)969last = &(*last)->pl_next;970971if ((*plp)->pl_all)972features = zpool_get_features(zhp);973974if ((*plp)->pl_all && firstexpand) {975/* Handle userprops in the all properties case */976if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))977return (-1);978979nvp = NULL;980while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=981NULL) {982const char *propname = nvpair_name(nvp);983984if (!zfs_prop_user(propname))985continue;986987entry = zfs_alloc(hdl, sizeof (zprop_list_t));988entry->pl_prop = ZPROP_USERPROP;989entry->pl_user_prop = zfs_strdup(hdl, propname);990entry->pl_width = strlen(entry->pl_user_prop);991entry->pl_all = B_TRUE;992993*last = entry;994last = &entry->pl_next;995}996997for (i = 0; i < SPA_FEATURES; i++) {998entry = zfs_alloc(hdl, sizeof (zprop_list_t));999entry->pl_prop = ZPROP_USERPROP;1000entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",1001spa_feature_table[i].fi_uname);1002entry->pl_width = strlen(entry->pl_user_prop);1003entry->pl_all = B_TRUE;10041005*last = entry;1006last = &entry->pl_next;1007}1008}10091010/* add any unsupported features */1011for (nvp = nvlist_next_nvpair(features, NULL);1012nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {1013char *propname;1014boolean_t found;10151016if (zfeature_is_supported(nvpair_name(nvp)))1017continue;10181019propname = zfs_asprintf(hdl, "unsupported@%s",1020nvpair_name(nvp));10211022/*1023* Before adding the property to the list make sure that no1024* other pool already added the same property.1025*/1026found = B_FALSE;1027entry = *plp;1028while (entry != NULL) {1029if (entry->pl_user_prop != NULL &&1030strcmp(propname, entry->pl_user_prop) == 0) {1031found = B_TRUE;1032break;1033}1034entry = entry->pl_next;1035}1036if (found) {1037free(propname);1038continue;1039}10401041entry = zfs_alloc(hdl, sizeof (zprop_list_t));1042entry->pl_prop = ZPROP_USERPROP;1043entry->pl_user_prop = propname;1044entry->pl_width = strlen(entry->pl_user_prop);1045entry->pl_all = B_TRUE;10461047*last = entry;1048last = &entry->pl_next;1049}10501051for (entry = *plp; entry != NULL; entry = entry->pl_next) {1052if (entry->pl_fixed && !literal)1053continue;10541055if (entry->pl_prop != ZPROP_USERPROP &&1056zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),1057NULL, literal) == 0) {1058if (strlen(buf) > entry->pl_width)1059entry->pl_width = strlen(buf);1060} else if (entry->pl_prop == ZPROP_INVAL &&1061zfs_prop_user(entry->pl_user_prop) &&1062zpool_get_userprop(zhp, entry->pl_user_prop, buf,1063sizeof (buf), NULL) == 0) {1064if (strlen(buf) > entry->pl_width)1065entry->pl_width = strlen(buf);1066}1067}10681069return (0);1070}10711072int1073vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,1074zprop_list_t **plp)1075{1076zprop_list_t *entry;1077char buf[ZFS_MAXPROPLEN];1078const char *strval = NULL;1079int err = 0;1080nvpair_t *elem = NULL;1081nvlist_t *vprops = NULL;1082nvlist_t *propval = NULL;1083const char *propname;1084vdev_prop_t prop;1085zprop_list_t **last;10861087for (entry = *plp; entry != NULL; entry = entry->pl_next) {1088if (entry->pl_fixed)1089continue;10901091if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,1092entry->pl_user_prop, buf, sizeof (buf), NULL,1093B_FALSE) == 0) {1094if (strlen(buf) > entry->pl_width)1095entry->pl_width = strlen(buf);1096}1097if (entry->pl_prop == VDEV_PROP_NAME &&1098strlen(vdevname) > entry->pl_width)1099entry->pl_width = strlen(vdevname);1100}11011102/* Handle the all properties case */1103last = plp;1104if (*last != NULL && (*last)->pl_all == B_TRUE) {1105while (*last != NULL)1106last = &(*last)->pl_next;11071108err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);1109if (err != 0)1110return (err);11111112while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {1113propname = nvpair_name(elem);11141115/* Skip properties that are not user defined */1116if ((prop = vdev_name_to_prop(propname)) !=1117VDEV_PROP_USERPROP)1118continue;11191120if (nvpair_value_nvlist(elem, &propval) != 0)1121continue;11221123strval = fnvlist_lookup_string(propval, ZPROP_VALUE);11241125entry = zfs_alloc(zhp->zpool_hdl,1126sizeof (zprop_list_t));1127entry->pl_prop = prop;1128entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,1129propname);1130entry->pl_width = strlen(strval);1131entry->pl_all = B_TRUE;1132*last = entry;1133last = &entry->pl_next;1134}1135}11361137return (0);1138}11391140/*1141* Get the state for the given feature on the given ZFS pool.1142*/1143int1144zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,1145size_t len)1146{1147uint64_t refcount;1148boolean_t found = B_FALSE;1149nvlist_t *features = zpool_get_features(zhp);1150boolean_t supported;1151const char *feature = strchr(propname, '@') + 1;11521153supported = zpool_prop_feature(propname);1154ASSERT(supported || zpool_prop_unsupported(propname));11551156/*1157* Convert from feature name to feature guid. This conversion is1158* unnecessary for unsupported@... properties because they already1159* use guids.1160*/1161if (supported) {1162int ret;1163spa_feature_t fid;11641165ret = zfeature_lookup_name(feature, &fid);1166if (ret != 0) {1167(void) strlcpy(buf, "-", len);1168return (ENOTSUP);1169}1170feature = spa_feature_table[fid].fi_guid;1171}11721173if (nvlist_lookup_uint64(features, feature, &refcount) == 0)1174found = B_TRUE;11751176if (supported) {1177if (!found) {1178(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);1179} else {1180if (refcount == 0)1181(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);1182else1183(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);1184}1185} else {1186if (found) {1187if (refcount == 0) {1188(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);1189} else {1190(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);1191}1192} else {1193(void) strlcpy(buf, "-", len);1194return (ENOTSUP);1195}1196}11971198return (0);1199}12001201/*1202* Validate the given pool name, optionally putting an extended error message in1203* 'buf'.1204*/1205boolean_t1206zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)1207{1208namecheck_err_t why;1209char what;1210int ret;12111212ret = pool_namecheck(pool, &why, &what);12131214/*1215* The rules for reserved pool names were extended at a later point.1216* But we need to support users with existing pools that may now be1217* invalid. So we only check for this expanded set of names during a1218* create (or import), and only in userland.1219*/1220if (ret == 0 && !isopen &&1221(strncmp(pool, "mirror", 6) == 0 ||1222strncmp(pool, "raidz", 5) == 0 ||1223strncmp(pool, "draid", 5) == 0 ||1224strncmp(pool, "spare", 5) == 0 ||1225strcmp(pool, "log") == 0)) {1226if (hdl != NULL)1227zfs_error_aux(hdl,1228dgettext(TEXT_DOMAIN, "name is reserved"));1229return (B_FALSE);1230}123112321233if (ret != 0) {1234if (hdl != NULL) {1235switch (why) {1236case NAME_ERR_TOOLONG:1237zfs_error_aux(hdl,1238dgettext(TEXT_DOMAIN, "name is too long"));1239break;12401241case NAME_ERR_INVALCHAR:1242zfs_error_aux(hdl,1243dgettext(TEXT_DOMAIN, "invalid character "1244"'%c' in pool name"), what);1245break;12461247case NAME_ERR_NOLETTER:1248zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1249"name must begin with a letter"));1250break;12511252case NAME_ERR_RESERVED:1253zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1254"name is reserved"));1255break;12561257case NAME_ERR_DISKLIKE:1258zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1259"pool name is reserved"));1260break;12611262case NAME_ERR_LEADING_SLASH:1263zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1264"leading slash in name"));1265break;12661267case NAME_ERR_EMPTY_COMPONENT:1268zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1269"empty component in name"));1270break;12711272case NAME_ERR_TRAILING_SLASH:1273zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1274"trailing slash in name"));1275break;12761277case NAME_ERR_MULTIPLE_DELIMITERS:1278zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1279"multiple '@' and/or '#' delimiters in "1280"name"));1281break;12821283case NAME_ERR_NO_AT:1284zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1285"permission set is missing '@'"));1286break;12871288default:1289zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1290"(%d) not defined"), why);1291break;1292}1293}1294return (B_FALSE);1295}12961297return (B_TRUE);1298}12991300/*1301* Open a handle to the given pool, even if the pool is currently in the FAULTED1302* state.1303*/1304zpool_handle_t *1305zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)1306{1307zpool_handle_t *zhp;1308boolean_t missing;13091310/*1311* Make sure the pool name is valid.1312*/1313if (!zpool_name_valid(hdl, B_TRUE, pool)) {1314(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,1315dgettext(TEXT_DOMAIN, "cannot open '%s'"),1316pool);1317return (NULL);1318}13191320zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));13211322zhp->zpool_hdl = hdl;1323(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));13241325if (zpool_refresh_stats(zhp, &missing) != 0) {1326zpool_close(zhp);1327return (NULL);1328}13291330if (missing) {1331zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));1332(void) zfs_error_fmt(hdl, EZFS_NOENT,1333dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);1334zpool_close(zhp);1335return (NULL);1336}13371338return (zhp);1339}13401341/*1342* Like the above, but silent on error. Used when iterating over pools (because1343* the configuration cache may be out of date).1344*/1345int1346zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)1347{1348zpool_handle_t *zhp;1349boolean_t missing;13501351zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));13521353zhp->zpool_hdl = hdl;1354(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));13551356if (zpool_refresh_stats(zhp, &missing) != 0) {1357zpool_close(zhp);1358return (-1);1359}13601361if (missing) {1362zpool_close(zhp);1363*ret = NULL;1364return (0);1365}13661367*ret = zhp;1368return (0);1369}13701371/*1372* Similar to zpool_open_canfail(), but refuses to open pools in the faulted1373* state.1374*/1375zpool_handle_t *1376zpool_open(libzfs_handle_t *hdl, const char *pool)1377{1378zpool_handle_t *zhp;13791380if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)1381return (NULL);13821383if (zhp->zpool_state == POOL_STATE_UNAVAIL) {1384(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,1385dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);1386zpool_close(zhp);1387return (NULL);1388}13891390return (zhp);1391}13921393/*1394* Close the handle. Simply frees the memory associated with the handle.1395*/1396void1397zpool_close(zpool_handle_t *zhp)1398{1399nvlist_free(zhp->zpool_config);1400nvlist_free(zhp->zpool_old_config);1401nvlist_free(zhp->zpool_props);1402free(zhp);1403}14041405/*1406* Return the name of the pool.1407*/1408const char *1409zpool_get_name(zpool_handle_t *zhp)1410{1411return (zhp->zpool_name);1412}141314141415/*1416* Return the state of the pool (ACTIVE or UNAVAILABLE)1417*/1418int1419zpool_get_state(zpool_handle_t *zhp)1420{1421return (zhp->zpool_state);1422}14231424/*1425* Check if vdev list contains a dRAID vdev1426*/1427static boolean_t1428zpool_has_draid_vdev(nvlist_t *nvroot)1429{1430nvlist_t **child;1431uint_t children;14321433if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,1434&child, &children) == 0) {1435for (uint_t c = 0; c < children; c++) {1436const char *type;14371438if (nvlist_lookup_string(child[c],1439ZPOOL_CONFIG_TYPE, &type) == 0 &&1440strcmp(type, VDEV_TYPE_DRAID) == 0) {1441return (B_TRUE);1442}1443}1444}1445return (B_FALSE);1446}14471448/*1449* Output a dRAID top-level vdev name in to the provided buffer.1450*/1451static char *1452zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,1453uint64_t spares, uint64_t children)1454{1455snprintf(name, len, "%s%llu:%llud:%lluc:%llus",1456VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,1457(u_longlong_t)children, (u_longlong_t)spares);14581459return (name);1460}14611462/*1463* Return B_TRUE if the provided name is a dRAID spare name.1464*/1465boolean_t1466zpool_is_draid_spare(const char *name)1467{1468uint64_t spare_id, parity, vdev_id;14691470if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",1471(u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,1472(u_longlong_t *)&spare_id) == 3) {1473return (B_TRUE);1474}14751476return (B_FALSE);1477}14781479/*1480* Create the named pool, using the provided vdev list. It is assumed1481* that the consumer has already validated the contents of the nvlist, so we1482* don't have to worry about error semantics.1483*/1484int1485zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,1486nvlist_t *props, nvlist_t *fsprops)1487{1488zfs_cmd_t zc = {"\0"};1489nvlist_t *zc_fsprops = NULL;1490nvlist_t *zc_props = NULL;1491nvlist_t *hidden_args = NULL;1492uint8_t *wkeydata = NULL;1493uint_t wkeylen = 0;1494char errbuf[ERRBUFLEN];1495int ret = -1;14961497(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1498"cannot create '%s'"), pool);14991500if (!zpool_name_valid(hdl, B_FALSE, pool))1501return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));15021503zcmd_write_conf_nvlist(hdl, &zc, nvroot);15041505if (props) {1506prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };15071508if ((zc_props = zpool_valid_proplist(hdl, pool, props,1509SPA_VERSION_1, flags, errbuf)) == NULL) {1510goto create_failed;1511}1512}15131514if (fsprops) {1515uint64_t zoned;1516const char *zonestr;15171518zoned = ((nvlist_lookup_string(fsprops,1519zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&1520strcmp(zonestr, "on") == 0);15211522if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,1523fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {1524goto create_failed;1525}15261527if (!zc_props &&1528(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {1529goto create_failed;1530}1531if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,1532&wkeydata, &wkeylen) != 0) {1533zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);1534goto create_failed;1535}1536if (nvlist_add_nvlist(zc_props,1537ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {1538goto create_failed;1539}1540if (wkeydata != NULL) {1541if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)1542goto create_failed;15431544if (nvlist_add_uint8_array(hidden_args, "wkeydata",1545wkeydata, wkeylen) != 0)1546goto create_failed;15471548if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,1549hidden_args) != 0)1550goto create_failed;1551}1552}15531554if (zc_props)1555zcmd_write_src_nvlist(hdl, &zc, zc_props);15561557(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));15581559if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {15601561zcmd_free_nvlists(&zc);1562nvlist_free(zc_props);1563nvlist_free(zc_fsprops);1564nvlist_free(hidden_args);1565if (wkeydata != NULL)1566free(wkeydata);15671568switch (errno) {1569case EBUSY:1570/*1571* This can happen if the user has specified the same1572* device multiple times. We can't reliably detect this1573* until we try to add it and see we already have a1574* label. This can also happen under if the device is1575* part of an active md or lvm device.1576*/1577zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1578"one or more vdevs refer to the same device, or "1579"one of\nthe devices is part of an active md or "1580"lvm device"));1581return (zfs_error(hdl, EZFS_BADDEV, errbuf));15821583case ERANGE:1584/*1585* This happens if the record size is smaller or larger1586* than the allowed size range, or not a power of 2.1587*1588* NOTE: although zfs_valid_proplist is called earlier,1589* this case may have slipped through since the1590* pool does not exist yet and it is therefore1591* impossible to read properties e.g. max blocksize1592* from the pool.1593*/1594zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1595"record size invalid"));1596return (zfs_error(hdl, EZFS_BADPROP, errbuf));15971598case EOVERFLOW:1599/*1600* This occurs when one of the devices is below1601* SPA_MINDEVSIZE. Unfortunately, we can't detect which1602* device was the problem device since there's no1603* reliable way to determine device size from userland.1604*/1605{1606char buf[64];16071608zfs_nicebytes(SPA_MINDEVSIZE, buf,1609sizeof (buf));16101611zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1612"one or more devices is less than the "1613"minimum size (%s)"), buf);1614}1615return (zfs_error(hdl, EZFS_BADDEV, errbuf));16161617case ENOSPC:1618zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1619"one or more devices is out of space"));1620return (zfs_error(hdl, EZFS_BADDEV, errbuf));16211622case EINVAL:1623if (zpool_has_draid_vdev(nvroot) &&1624zfeature_lookup_name("draid", NULL) != 0) {1625zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1626"dRAID vdevs are unsupported by the "1627"kernel"));1628return (zfs_error(hdl, EZFS_BADDEV, errbuf));1629} else {1630return (zpool_standard_error(hdl, errno,1631errbuf));1632}16331634default:1635return (zpool_standard_error(hdl, errno, errbuf));1636}1637}16381639create_failed:1640zcmd_free_nvlists(&zc);1641nvlist_free(zc_props);1642nvlist_free(zc_fsprops);1643nvlist_free(hidden_args);1644if (wkeydata != NULL)1645free(wkeydata);1646return (ret);1647}16481649/*1650* Destroy the given pool. It is up to the caller to ensure that there are no1651* datasets left in the pool.1652*/1653int1654zpool_destroy(zpool_handle_t *zhp, const char *log_str)1655{1656zfs_cmd_t zc = {"\0"};1657zfs_handle_t *zfp = NULL;1658libzfs_handle_t *hdl = zhp->zpool_hdl;1659char errbuf[ERRBUFLEN];16601661if (zhp->zpool_state == POOL_STATE_ACTIVE &&1662(zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)1663return (-1);16641665(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));1666zc.zc_history = (uint64_t)(uintptr_t)log_str;16671668if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {1669(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1670"cannot destroy '%s'"), zhp->zpool_name);16711672if (errno == EROFS) {1673zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1674"one or more devices is read only"));1675(void) zfs_error(hdl, EZFS_BADDEV, errbuf);1676} else {1677(void) zpool_standard_error(hdl, errno, errbuf);1678}16791680if (zfp)1681zfs_close(zfp);1682return (-1);1683}16841685if (zfp) {1686remove_mountpoint(zfp);1687zfs_close(zfp);1688}16891690return (0);1691}16921693/*1694* Create a checkpoint in the given pool.1695*/1696int1697zpool_checkpoint(zpool_handle_t *zhp)1698{1699libzfs_handle_t *hdl = zhp->zpool_hdl;1700char errbuf[ERRBUFLEN];1701int error;17021703error = lzc_pool_checkpoint(zhp->zpool_name);1704if (error != 0) {1705(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1706"cannot checkpoint '%s'"), zhp->zpool_name);1707(void) zpool_standard_error(hdl, error, errbuf);1708return (-1);1709}17101711return (0);1712}17131714/*1715* Discard the checkpoint from the given pool.1716*/1717int1718zpool_discard_checkpoint(zpool_handle_t *zhp)1719{1720libzfs_handle_t *hdl = zhp->zpool_hdl;1721char errbuf[ERRBUFLEN];1722int error;17231724error = lzc_pool_checkpoint_discard(zhp->zpool_name);1725if (error != 0) {1726(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1727"cannot discard checkpoint in '%s'"), zhp->zpool_name);1728(void) zpool_standard_error(hdl, error, errbuf);1729return (-1);1730}17311732return (0);1733}17341735/*1736* Load data type for the given pool.1737*/1738int1739zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type)1740{1741libzfs_handle_t *hdl = zhp->zpool_hdl;1742char msg[1024];1743int error;17441745error = lzc_pool_prefetch(zhp->zpool_name, type);1746if (error != 0) {1747const char *typename = "unknown";1748if (type == ZPOOL_PREFETCH_DDT)1749typename = "ddt";1750else if (type == ZPOOL_PREFETCH_BRT)1751typename = "brt";1752(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,1753"cannot prefetch %s in '%s'"), typename, zhp->zpool_name);1754(void) zpool_standard_error(hdl, error, msg);1755return (-1);1756}17571758return (0);1759}17601761/*1762* Add the given vdevs to the pool. The caller must have already performed the1763* necessary verification to ensure that the vdev specification is well-formed.1764*/1765int1766zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot, boolean_t check_ashift)1767{1768zfs_cmd_t zc = {"\0"};1769int ret;1770libzfs_handle_t *hdl = zhp->zpool_hdl;1771char errbuf[ERRBUFLEN];1772nvlist_t **spares, **l2cache;1773uint_t nspares, nl2cache;17741775(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,1776"cannot add to '%s'"), zhp->zpool_name);17771778if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <1779SPA_VERSION_SPARES &&1780nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,1781&spares, &nspares) == 0) {1782zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "1783"upgraded to add hot spares"));1784return (zfs_error(hdl, EZFS_BADVERSION, errbuf));1785}17861787if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <1788SPA_VERSION_L2CACHE &&1789nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,1790&l2cache, &nl2cache) == 0) {1791zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "1792"upgraded to add cache devices"));1793return (zfs_error(hdl, EZFS_BADVERSION, errbuf));1794}17951796zcmd_write_conf_nvlist(hdl, &zc, nvroot);1797(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));1798zc.zc_flags = check_ashift;17991800if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {1801switch (errno) {1802case EBUSY:1803/*1804* This can happen if the user has specified the same1805* device multiple times. We can't reliably detect this1806* until we try to add it and see we already have a1807* label.1808*/1809zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1810"one or more vdevs refer to the same device"));1811(void) zfs_error(hdl, EZFS_BADDEV, errbuf);1812break;18131814case EINVAL:18151816if (zpool_has_draid_vdev(nvroot) &&1817zfeature_lookup_name("draid", NULL) != 0) {1818zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1819"dRAID vdevs are unsupported by the "1820"kernel"));1821} else {1822zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1823"invalid config; a pool with removing/"1824"removed vdevs does not support adding "1825"raidz or dRAID vdevs"));1826}18271828(void) zfs_error(hdl, EZFS_BADDEV, errbuf);1829break;18301831case EOVERFLOW:1832/*1833* This occurs when one of the devices is below1834* SPA_MINDEVSIZE. Unfortunately, we can't detect which1835* device was the problem device since there's no1836* reliable way to determine device size from userland.1837*/1838{1839char buf[64];18401841zfs_nicebytes(SPA_MINDEVSIZE, buf,1842sizeof (buf));18431844zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1845"device is less than the minimum "1846"size (%s)"), buf);1847}1848(void) zfs_error(hdl, EZFS_BADDEV, errbuf);1849break;18501851case ENOTSUP:1852zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,1853"pool must be upgraded to add these vdevs"));1854(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);1855break;18561857default:1858(void) zpool_standard_error(hdl, errno, errbuf);1859}18601861ret = -1;1862} else {1863ret = 0;1864}18651866zcmd_free_nvlists(&zc);18671868return (ret);1869}18701871/*1872* Exports the pool from the system. The caller must ensure that there are no1873* mounted datasets in the pool.1874*/1875static int1876zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,1877const char *log_str)1878{1879zfs_cmd_t zc = {"\0"};18801881(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));1882zc.zc_cookie = force;1883zc.zc_guid = hardforce;1884zc.zc_history = (uint64_t)(uintptr_t)log_str;18851886if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {1887switch (errno) {1888case EXDEV:1889zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,1890"use '-f' to override the following errors:\n"1891"'%s' has an active shared spare which could be"1892" used by other pools once '%s' is exported."),1893zhp->zpool_name, zhp->zpool_name);1894return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,1895dgettext(TEXT_DOMAIN, "cannot export '%s'"),1896zhp->zpool_name));1897default:1898return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,1899dgettext(TEXT_DOMAIN, "cannot export '%s'"),1900zhp->zpool_name));1901}1902}19031904return (0);1905}19061907int1908zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)1909{1910return (zpool_export_common(zhp, force, B_FALSE, log_str));1911}19121913int1914zpool_export_force(zpool_handle_t *zhp, const char *log_str)1915{1916return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));1917}19181919static void1920zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,1921nvlist_t *config)1922{1923nvlist_t *nv = NULL;1924uint64_t rewindto;1925int64_t loss = -1;1926struct tm t;1927char timestr[128];19281929if (!hdl->libzfs_printerr || config == NULL)1930return;19311932if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||1933nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {1934return;1935}19361937if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)1938return;1939(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);19401941if (localtime_r((time_t *)&rewindto, &t) != NULL &&1942ctime_r((time_t *)&rewindto, timestr) != NULL) {1943timestr[24] = 0;1944if (dryrun) {1945(void) printf(dgettext(TEXT_DOMAIN,1946"Would be able to return %s "1947"to its state as of %s.\n"),1948name, timestr);1949} else {1950(void) printf(dgettext(TEXT_DOMAIN,1951"Pool %s returned to its state as of %s.\n"),1952name, timestr);1953}1954if (loss > 120) {1955(void) printf(dgettext(TEXT_DOMAIN,1956"%s approximately %lld "),1957dryrun ? "Would discard" : "Discarded",1958((longlong_t)loss + 30) / 60);1959(void) printf(dgettext(TEXT_DOMAIN,1960"minutes of transactions.\n"));1961} else if (loss > 0) {1962(void) printf(dgettext(TEXT_DOMAIN,1963"%s approximately %lld "),1964dryrun ? "Would discard" : "Discarded",1965(longlong_t)loss);1966(void) printf(dgettext(TEXT_DOMAIN,1967"seconds of transactions.\n"));1968}1969}1970}19711972void1973zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,1974nvlist_t *config, char *buf, size_t size)1975{1976nvlist_t *nv = NULL;1977int64_t loss = -1;1978uint64_t edata = UINT64_MAX;1979uint64_t rewindto;1980struct tm t;1981char timestr[128], temp[1024];19821983if (!hdl->libzfs_printerr)1984return;19851986/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */1987if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||1988nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||1989nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)1990goto no_info;19911992(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);1993(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,1994&edata);19951996(void) snprintf(buf, size, dgettext(TEXT_DOMAIN,1997"Recovery is possible, but will result in some data loss.\n"));19981999if (localtime_r((time_t *)&rewindto, &t) != NULL &&2000ctime_r((time_t *)&rewindto, timestr) != NULL) {2001timestr[24] = 0;2002(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,2003"\tReturning the pool to its state as of %s\n"2004"\tshould correct the problem. "), timestr);2005(void) strlcat(buf, temp, size);2006} else {2007(void) strlcat(buf, dgettext(TEXT_DOMAIN,2008"\tReverting the pool to an earlier state "2009"should correct the problem.\n\t"), size);2010}20112012if (loss > 120) {2013(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,2014"Approximately %lld minutes of data\n"2015"\tmust be discarded, irreversibly. "),2016((longlong_t)loss + 30) / 60);2017(void) strlcat(buf, temp, size);2018} else if (loss > 0) {2019(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,2020"Approximately %lld seconds of data\n"2021"\tmust be discarded, irreversibly. "),2022(longlong_t)loss);2023(void) strlcat(buf, temp, size);2024}2025if (edata != 0 && edata != UINT64_MAX) {2026if (edata == 1) {2027(void) strlcat(buf, dgettext(TEXT_DOMAIN,2028"After rewind, at least\n"2029"\tone persistent user-data error will remain. "),2030size);2031} else {2032(void) strlcat(buf, dgettext(TEXT_DOMAIN,2033"After rewind, several\n"2034"\tpersistent user-data errors will remain. "),2035size);2036}2037}2038(void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,2039"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),2040reason >= 0 ? "clear" : "import", name);2041(void) strlcat(buf, temp, size);20422043(void) strlcat(buf, dgettext(TEXT_DOMAIN,2044"A scrub of the pool\n"2045"\tis strongly recommended after recovery.\n"), size);2046return;20472048no_info:2049(void) strlcat(buf, dgettext(TEXT_DOMAIN,2050"Destroy and re-create the pool from\n\ta backup source.\n"), size);2051}20522053/*2054* zpool_import() is a contracted interface. Should be kept the same2055* if possible.2056*2057* Applications should use zpool_import_props() to import a pool with2058* new properties value to be set.2059*/2060int2061zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,2062char *altroot)2063{2064nvlist_t *props = NULL;2065int ret;20662067if (altroot != NULL) {2068if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {2069return (zfs_error_fmt(hdl, EZFS_NOMEM,2070dgettext(TEXT_DOMAIN, "cannot import '%s'"),2071newname));2072}20732074if (nvlist_add_string(props,2075zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||2076nvlist_add_string(props,2077zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {2078nvlist_free(props);2079return (zfs_error_fmt(hdl, EZFS_NOMEM,2080dgettext(TEXT_DOMAIN, "cannot import '%s'"),2081newname));2082}2083}20842085ret = zpool_import_props(hdl, config, newname, props,2086ZFS_IMPORT_NORMAL);2087nvlist_free(props);2088return (ret);2089}20902091static void2092print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,2093int indent)2094{2095nvlist_t **child;2096uint_t c, children;2097char *vname;2098uint64_t is_log = 0;20992100(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,2101&is_log);21022103if (name != NULL)2104(void) printf("\t%*s%s%s\n", indent, "", name,2105is_log ? " [log]" : "");21062107if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,2108&child, &children) != 0)2109return;21102111for (c = 0; c < children; c++) {2112vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);2113print_vdev_tree(hdl, vname, child[c], indent + 2);2114free(vname);2115}2116}21172118void2119zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size)2120{2121nvlist_t *nvinfo, *unsup_feat;2122char temp[512];21232124nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);2125unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);21262127for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);2128nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {2129const char *desc = fnvpair_value_string(nvp);2130if (strlen(desc) > 0) {2131(void) snprintf(temp, 512, "\t%s (%s)\n",2132nvpair_name(nvp), desc);2133(void) strlcat(buf, temp, size);2134} else {2135(void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp));2136(void) strlcat(buf, temp, size);2137}2138}2139}21402141/*2142* Import the given pool using the known configuration and a list of2143* properties to be set. The configuration should have come from2144* zpool_find_import(). The 'newname' parameters control whether the pool2145* is imported with a different name.2146*/2147int2148zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,2149nvlist_t *props, int flags)2150{2151zfs_cmd_t zc = {"\0"};2152zpool_load_policy_t policy;2153nvlist_t *nv = NULL;2154nvlist_t *nvinfo = NULL;2155nvlist_t *missing = NULL;2156const char *thename;2157const char *origname;2158int ret;2159int error = 0;2160char buf[2048];2161char errbuf[ERRBUFLEN];21622163origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);21642165(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,2166"cannot import pool '%s'"), origname);21672168if (newname != NULL) {2169if (!zpool_name_valid(hdl, B_FALSE, newname))2170return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,2171dgettext(TEXT_DOMAIN, "cannot import '%s'"),2172newname));2173thename = newname;2174} else {2175thename = origname;2176}21772178if (props != NULL) {2179uint64_t version;2180prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };21812182version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);21832184if ((props = zpool_valid_proplist(hdl, origname,2185props, version, flags, errbuf)) == NULL)2186return (-1);2187zcmd_write_src_nvlist(hdl, &zc, props);2188nvlist_free(props);2189}21902191(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));21922193zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);21942195zcmd_write_conf_nvlist(hdl, &zc, config);2196zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);21972198zc.zc_cookie = flags;2199while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&2200errno == ENOMEM)2201zcmd_expand_dst_nvlist(hdl, &zc);2202if (ret != 0)2203error = errno;22042205(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);22062207zcmd_free_nvlists(&zc);22082209zpool_get_load_policy(config, &policy);22102211if (error) {2212char desc[1024];2213char aux[256];22142215/*2216* Dry-run failed, but we print out what success2217* looks like if we found a best txg2218*/2219if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {2220zpool_rewind_exclaim(hdl, newname ? origname : thename,2221B_TRUE, nv);2222nvlist_free(nv);2223return (-1);2224}22252226if (newname == NULL)2227(void) snprintf(desc, sizeof (desc),2228dgettext(TEXT_DOMAIN, "cannot import '%s'"),2229thename);2230else2231(void) snprintf(desc, sizeof (desc),2232dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),2233origname, thename);22342235switch (error) {2236case ENOTSUP:2237if (nv != NULL && nvlist_lookup_nvlist(nv,2238ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&2239nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {2240(void) printf(dgettext(TEXT_DOMAIN, "This "2241"pool uses the following feature(s) not "2242"supported by this system:\n"));2243memset(buf, 0, 2048);2244zpool_collect_unsup_feat(nv, buf, 2048);2245(void) printf("%s", buf);2246if (nvlist_exists(nvinfo,2247ZPOOL_CONFIG_CAN_RDONLY)) {2248(void) printf(dgettext(TEXT_DOMAIN,2249"All unsupported features are only "2250"required for writing to the pool."2251"\nThe pool can be imported using "2252"'-o readonly=on'.\n"));2253}2254}2255/*2256* Unsupported version.2257*/2258(void) zfs_error(hdl, EZFS_BADVERSION, desc);2259break;22602261case EREMOTEIO:2262if (nv != NULL && nvlist_lookup_nvlist(nv,2263ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {2264const char *hostname = "<unknown>";2265uint64_t hostid = 0;2266mmp_state_t mmp_state;22672268mmp_state = fnvlist_lookup_uint64(nvinfo,2269ZPOOL_CONFIG_MMP_STATE);22702271if (nvlist_exists(nvinfo,2272ZPOOL_CONFIG_MMP_HOSTNAME))2273hostname = fnvlist_lookup_string(nvinfo,2274ZPOOL_CONFIG_MMP_HOSTNAME);22752276if (nvlist_exists(nvinfo,2277ZPOOL_CONFIG_MMP_HOSTID))2278hostid = fnvlist_lookup_uint64(nvinfo,2279ZPOOL_CONFIG_MMP_HOSTID);22802281if (mmp_state == MMP_STATE_ACTIVE) {2282(void) snprintf(aux, sizeof (aux),2283dgettext(TEXT_DOMAIN, "pool is imp"2284"orted on host '%s' (hostid=%lx).\n"2285"Export the pool on the other "2286"system, then run 'zpool import'."),2287hostname, (unsigned long) hostid);2288} else if (mmp_state == MMP_STATE_NO_HOSTID) {2289(void) snprintf(aux, sizeof (aux),2290dgettext(TEXT_DOMAIN, "pool has "2291"the multihost property on and "2292"the\nsystem's hostid is not set. "2293"Set a unique system hostid with "2294"the zgenhostid(8) command.\n"));2295}22962297(void) zfs_error_aux(hdl, "%s", aux);2298}2299(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);2300break;23012302case EINVAL:2303(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);2304break;23052306case EROFS:2307zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,2308"one or more devices is read only"));2309(void) zfs_error(hdl, EZFS_BADDEV, desc);2310break;23112312case ENXIO:2313if (nv && nvlist_lookup_nvlist(nv,2314ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&2315nvlist_lookup_nvlist(nvinfo,2316ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {2317(void) printf(dgettext(TEXT_DOMAIN,2318"The devices below are missing or "2319"corrupted, use '-m' to import the pool "2320"anyway:\n"));2321print_vdev_tree(hdl, NULL, missing, 2);2322(void) printf("\n");2323}2324(void) zpool_standard_error(hdl, error, desc);2325break;23262327case EEXIST:2328(void) zpool_standard_error(hdl, error, desc);2329break;23302331case EBUSY:2332zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,2333"one or more devices are already in use\n"));2334(void) zfs_error(hdl, EZFS_BADDEV, desc);2335break;2336case ENAMETOOLONG:2337zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,2338"new name of at least one dataset is longer than "2339"the maximum allowable length"));2340(void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);2341break;2342default:2343(void) zpool_standard_error(hdl, error, desc);2344memset(buf, 0, 2048);2345zpool_explain_recover(hdl,2346newname ? origname : thename, -error, nv,2347buf, 2048);2348(void) printf("\t%s", buf);2349break;2350}23512352nvlist_free(nv);2353ret = -1;2354} else {2355zpool_handle_t *zhp;23562357/*2358* This should never fail, but play it safe anyway.2359*/2360if (zpool_open_silent(hdl, thename, &zhp) != 0)2361ret = -1;2362else if (zhp != NULL)2363zpool_close(zhp);2364if (policy.zlp_rewind &2365(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {2366zpool_rewind_exclaim(hdl, newname ? origname : thename,2367((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);2368}2369nvlist_free(nv);2370}23712372return (ret);2373}23742375/*2376* Translate vdev names to guids. If a vdev_path is determined to be2377* unsuitable then a vd_errlist is allocated and the vdev path and errno2378* are added to it.2379*/2380static int2381zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,2382nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)2383{2384nvlist_t *errlist = NULL;2385int error = 0;23862387for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;2388elem = nvlist_next_nvpair(vds, elem)) {2389boolean_t spare, cache;23902391const char *vd_path = nvpair_name(elem);2392nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,2393NULL);23942395if ((tgt == NULL) || cache || spare) {2396if (errlist == NULL) {2397errlist = fnvlist_alloc();2398error = EINVAL;2399}24002401uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :2402(spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);2403fnvlist_add_int64(errlist, vd_path, err);2404continue;2405}24062407uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);2408fnvlist_add_uint64(vdev_guids, vd_path, guid);24092410char msg[MAXNAMELEN];2411(void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);2412fnvlist_add_string(guids_to_paths, msg, vd_path);2413}24142415if (error != 0) {2416verify(errlist != NULL);2417if (vd_errlist != NULL)2418*vd_errlist = errlist;2419else2420fnvlist_free(errlist);2421}24222423return (error);2424}24252426static int2427xlate_init_err(int err)2428{2429switch (err) {2430case ENODEV:2431return (EZFS_NODEVICE);2432case EINVAL:2433case EROFS:2434return (EZFS_BADDEV);2435case EBUSY:2436return (EZFS_INITIALIZING);2437case ESRCH:2438return (EZFS_NO_INITIALIZE);2439}2440return (err);2441}24422443int2444zpool_initialize_one(zpool_handle_t *zhp, void *data)2445{2446int error;2447libzfs_handle_t *hdl = zpool_get_handle(zhp);2448const char *pool_name = zpool_get_name(zhp);2449if (zpool_open_silent(hdl, pool_name, &zhp) != 0)2450return (-1);2451initialize_cbdata_t *cb = data;2452nvlist_t *vdevs = fnvlist_alloc();24532454nvlist_t *config = zpool_get_config(zhp, NULL);2455nvlist_t *nvroot = fnvlist_lookup_nvlist(config,2456ZPOOL_CONFIG_VDEV_TREE);2457zpool_collect_leaves(zhp, nvroot, vdevs);2458if (cb->wait)2459error = zpool_initialize_wait(zhp, cb->cmd_type, vdevs);2460else2461error = zpool_initialize(zhp, cb->cmd_type, vdevs);2462fnvlist_free(vdevs);24632464return (error);2465}24662467/*2468* Begin, suspend, cancel, or uninit (clear) the initialization (initializing2469* of all free blocks) for the given vdevs in the given pool.2470*/2471static int2472zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,2473nvlist_t *vds, boolean_t wait)2474{2475int err;24762477nvlist_t *vdev_guids = fnvlist_alloc();2478nvlist_t *guids_to_paths = fnvlist_alloc();2479nvlist_t *vd_errlist = NULL;2480nvlist_t *errlist;2481nvpair_t *elem;24822483err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,2484guids_to_paths, &vd_errlist);24852486if (err != 0) {2487verify(vd_errlist != NULL);2488goto list_errors;2489}24902491err = lzc_initialize(zhp->zpool_name, cmd_type,2492vdev_guids, &errlist);24932494if (err != 0) {2495if (errlist != NULL && nvlist_lookup_nvlist(errlist,2496ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {2497goto list_errors;2498}24992500if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {2501zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,2502"uninitialize is not supported by kernel"));2503}25042505(void) zpool_standard_error(zhp->zpool_hdl, err,2506dgettext(TEXT_DOMAIN, "operation failed"));2507goto out;2508}25092510if (wait) {2511for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;2512elem = nvlist_next_nvpair(vdev_guids, elem)) {25132514uint64_t guid = fnvpair_value_uint64(elem);25152516err = lzc_wait_tag(zhp->zpool_name,2517ZPOOL_WAIT_INITIALIZE, guid, NULL);2518if (err != 0) {2519(void) zpool_standard_error_fmt(zhp->zpool_hdl,2520err, dgettext(TEXT_DOMAIN, "error "2521"waiting for '%s' to initialize"),2522nvpair_name(elem));25232524goto out;2525}2526}2527}2528goto out;25292530list_errors:2531for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;2532elem = nvlist_next_nvpair(vd_errlist, elem)) {2533int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));2534const char *path;25352536if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),2537&path) != 0)2538path = nvpair_name(elem);25392540(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,2541"cannot initialize '%s'", path);2542}25432544out:2545fnvlist_free(vdev_guids);2546fnvlist_free(guids_to_paths);25472548if (vd_errlist != NULL)2549fnvlist_free(vd_errlist);25502551return (err == 0 ? 0 : -1);2552}25532554int2555zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,2556nvlist_t *vds)2557{2558return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));2559}25602561int2562zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,2563nvlist_t *vds)2564{2565return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));2566}25672568static int2569xlate_trim_err(int err)2570{2571switch (err) {2572case ENODEV:2573return (EZFS_NODEVICE);2574case EINVAL:2575case EROFS:2576return (EZFS_BADDEV);2577case EBUSY:2578return (EZFS_TRIMMING);2579case ESRCH:2580return (EZFS_NO_TRIM);2581case EOPNOTSUPP:2582return (EZFS_TRIM_NOTSUP);2583}2584return (err);2585}25862587void2588zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)2589{2590libzfs_handle_t *hdl = zhp->zpool_hdl;2591uint_t children = 0;2592nvlist_t **child;2593uint_t i;25942595(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,2596&child, &children);25972598if (children == 0) {2599char *path = zpool_vdev_name(hdl, zhp, nvroot,2600VDEV_NAME_PATH);26012602if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&2603strcmp(path, VDEV_TYPE_HOLE) != 0)2604fnvlist_add_boolean(res, path);26052606free(path);2607return;2608}26092610for (i = 0; i < children; i++) {2611zpool_collect_leaves(zhp, child[i], res);2612}2613}26142615int2616zpool_trim_one(zpool_handle_t *zhp, void *data)2617{2618int error;2619libzfs_handle_t *hdl = zpool_get_handle(zhp);2620const char *pool_name = zpool_get_name(zhp);2621if (zpool_open_silent(hdl, pool_name, &zhp) != 0)2622return (-1);26232624trim_cbdata_t *cb = data;2625nvlist_t *vdevs = fnvlist_alloc();26262627/* no individual leaf vdevs specified, so add them all */2628nvlist_t *config = zpool_get_config(zhp, NULL);2629nvlist_t *nvroot = fnvlist_lookup_nvlist(config,2630ZPOOL_CONFIG_VDEV_TREE);26312632zpool_collect_leaves(zhp, nvroot, vdevs);2633error = zpool_trim(zhp, cb->cmd_type, vdevs, &cb->trim_flags);2634fnvlist_free(vdevs);26352636return (error);2637}26382639static int2640zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)2641{2642int err;2643nvpair_t *elem;26442645for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;2646elem = nvlist_next_nvpair(vdev_guids, elem)) {26472648uint64_t guid = fnvpair_value_uint64(elem);26492650err = lzc_wait_tag(zhp->zpool_name,2651ZPOOL_WAIT_TRIM, guid, NULL);2652if (err != 0) {2653(void) zpool_standard_error_fmt(zhp->zpool_hdl,2654err, dgettext(TEXT_DOMAIN, "error "2655"waiting to trim '%s'"), nvpair_name(elem));26562657return (err);2658}2659}2660return (0);2661}26622663/*2664* Check errlist and report any errors, omitting ones which should be2665* suppressed. Returns B_TRUE if any errors were reported.2666*/2667static boolean_t2668check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,2669nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)2670{2671nvpair_t *elem;2672boolean_t reported_errs = B_FALSE;2673int num_vds = 0;2674int num_suppressed_errs = 0;26752676for (elem = nvlist_next_nvpair(vds, NULL);2677elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {2678num_vds++;2679}26802681for (elem = nvlist_next_nvpair(errlist, NULL);2682elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {2683int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));2684const char *path;26852686/*2687* If only the pool was specified, and it was not a secure2688* trim then suppress warnings for individual vdevs which2689* do not support trimming.2690*/2691if (vd_error == EZFS_TRIM_NOTSUP &&2692trim_flags->fullpool &&2693!trim_flags->secure) {2694num_suppressed_errs++;2695continue;2696}26972698reported_errs = B_TRUE;2699if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),2700&path) != 0)2701path = nvpair_name(elem);27022703(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,2704"cannot trim '%s'", path);2705}27062707if (num_suppressed_errs == num_vds) {2708(void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,2709"no devices in pool support trim operations"));2710(void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,2711dgettext(TEXT_DOMAIN, "cannot trim")));2712reported_errs = B_TRUE;2713}27142715return (reported_errs);2716}27172718/*2719* Begin, suspend, or cancel the TRIM (discarding of all free blocks) for2720* the given vdevs in the given pool.2721*/2722int2723zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,2724trimflags_t *trim_flags)2725{2726int err;2727int retval = 0;27282729nvlist_t *vdev_guids = fnvlist_alloc();2730nvlist_t *guids_to_paths = fnvlist_alloc();2731nvlist_t *errlist = NULL;27322733err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,2734guids_to_paths, &errlist);2735if (err != 0) {2736check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);2737retval = -1;2738goto out;2739}27402741err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,2742trim_flags->secure, vdev_guids, &errlist);2743if (err != 0) {2744nvlist_t *vd_errlist;2745if (errlist != NULL && nvlist_lookup_nvlist(errlist,2746ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {2747if (check_trim_errs(zhp, trim_flags, guids_to_paths,2748vds, vd_errlist)) {2749retval = -1;2750goto out;2751}2752} else {2753char errbuf[ERRBUFLEN];27542755(void) snprintf(errbuf, sizeof (errbuf),2756dgettext(TEXT_DOMAIN, "operation failed"));2757zpool_standard_error(zhp->zpool_hdl, err, errbuf);2758retval = -1;2759goto out;2760}2761}276227632764if (trim_flags->wait)2765retval = zpool_trim_wait(zhp, vdev_guids);27662767out:2768if (errlist != NULL)2769fnvlist_free(errlist);2770fnvlist_free(vdev_guids);2771fnvlist_free(guids_to_paths);2772return (retval);2773}27742775/*2776* Scan the pool.2777*/2778int2779zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) {2780return (zpool_scan_range(zhp, func, cmd, 0, 0));2781}27822783int2784zpool_scan_range(zpool_handle_t *zhp, pool_scan_func_t func,2785pool_scrub_cmd_t cmd, time_t date_start, time_t date_end)2786{2787char errbuf[ERRBUFLEN];2788int err;2789libzfs_handle_t *hdl = zhp->zpool_hdl;27902791nvlist_t *args = fnvlist_alloc();2792fnvlist_add_uint64(args, "scan_type", (uint64_t)func);2793fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);2794fnvlist_add_uint64(args, "scan_date_start", (uint64_t)date_start);2795fnvlist_add_uint64(args, "scan_date_end", (uint64_t)date_end);27962797err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);2798fnvlist_free(args);27992800if (err == 0) {2801return (0);2802} else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {2803zfs_cmd_t zc = {"\0"};2804(void) strlcpy(zc.zc_name, zhp->zpool_name,2805sizeof (zc.zc_name));2806zc.zc_cookie = func;2807zc.zc_flags = cmd;28082809if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)2810return (0);2811}28122813/*2814* An ECANCELED on a scrub means one of the following:2815* 1. we resumed a paused scrub.2816* 2. we resumed a paused error scrub.2817* 3. Error scrub is not run because of no error log.2818*2819* Note that we no longer return ECANCELED in case 1 or 2. However, in2820* order to prevent problems where we have a newer userland than2821* kernel, we keep this check in place. That prevents erroneous2822* failures when an older kernel returns ECANCELED in those cases.2823*/2824if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||2825func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)2826return (0);2827/*2828* The following cases have been handled here:2829* 1. Paused a scrub/error scrub if there is none in progress.2830*/2831if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==2832POOL_SCRUB_PAUSE) {2833return (0);2834}28352836ASSERT3U(func, >=, POOL_SCAN_NONE);2837ASSERT3U(func, <, POOL_SCAN_FUNCS);28382839if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {2840if (cmd == POOL_SCRUB_PAUSE) {2841(void) snprintf(errbuf, sizeof (errbuf),2842dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),2843zhp->zpool_name);2844} else {2845assert(cmd == POOL_SCRUB_NORMAL);2846(void) snprintf(errbuf, sizeof (errbuf),2847dgettext(TEXT_DOMAIN, "cannot scrub %s"),2848zhp->zpool_name);2849}2850} else if (func == POOL_SCAN_RESILVER) {2851assert(cmd == POOL_SCRUB_NORMAL);2852(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,2853"cannot restart resilver on %s"), zhp->zpool_name);2854} else if (func == POOL_SCAN_NONE) {2855(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,2856"cannot cancel scrubbing %s"), zhp->zpool_name);2857} else {2858assert(!"unexpected result");2859}28602861/*2862* With EBUSY, six cases are possible:2863*2864* Current state Requested2865* 1. Normal Scrub Running Normal Scrub or Error Scrub2866* 2. Normal Scrub Paused Error Scrub2867* 3. Normal Scrub Paused Pause Normal Scrub2868* 4. Error Scrub Running Normal Scrub or Error Scrub2869* 5. Error Scrub Paused Pause Error Scrub2870* 6. Resilvering Anything else2871*/2872if (err == EBUSY) {2873nvlist_t *nvroot;2874pool_scan_stat_t *ps = NULL;2875uint_t psc;28762877nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,2878ZPOOL_CONFIG_VDEV_TREE);2879(void) nvlist_lookup_uint64_array(nvroot,2880ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);2881if (ps && ps->pss_func == POOL_SCAN_SCRUB &&2882ps->pss_state == DSS_SCANNING) {2883if (ps->pss_pass_scrub_pause == 0) {2884/* handles case 1 */2885assert(cmd == POOL_SCRUB_NORMAL);2886return (zfs_error(hdl, EZFS_SCRUBBING,2887errbuf));2888} else {2889if (func == POOL_SCAN_ERRORSCRUB) {2890/* handles case 2 */2891ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);2892return (zfs_error(hdl,2893EZFS_SCRUB_PAUSED_TO_CANCEL,2894errbuf));2895} else {2896/* handles case 3 */2897ASSERT3U(func, ==, POOL_SCAN_SCRUB);2898ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);2899return (zfs_error(hdl,2900EZFS_SCRUB_PAUSED, errbuf));2901}2902}2903} else if (ps &&2904ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&2905ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {2906if (ps->pss_pass_error_scrub_pause == 0) {2907/* handles case 4 */2908ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);2909return (zfs_error(hdl, EZFS_ERRORSCRUBBING,2910errbuf));2911} else {2912/* handles case 5 */2913ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);2914ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);2915return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,2916errbuf));2917}2918} else {2919/* handles case 6 */2920return (zfs_error(hdl, EZFS_RESILVERING, errbuf));2921}2922} else if (err == ENOENT) {2923return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));2924} else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {2925return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));2926} else {2927return (zpool_standard_error(hdl, err, errbuf));2928}2929}29302931/*2932* Find a vdev that matches the search criteria specified. We use the2933* the nvpair name to determine how we should look for the device.2934* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL2935* spare; but FALSE if its an INUSE spare.2936*2937* If 'return_parent' is set, then return the *parent* of the vdev you're2938* searching for rather than the vdev itself.2939*/2940static nvlist_t *2941vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,2942boolean_t *l2cache, boolean_t *log, boolean_t return_parent)2943{2944uint_t c, children;2945nvlist_t **child;2946nvlist_t *ret;2947uint64_t is_log;2948const char *srchkey;2949nvpair_t *pair = nvlist_next_nvpair(search, NULL);2950const char *tmp = NULL;2951boolean_t is_root;29522953/* Nothing to look for */2954if (search == NULL || pair == NULL)2955return (NULL);29562957/* Obtain the key we will use to search */2958srchkey = nvpair_name(pair);29592960nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp);2961if (strcmp(tmp, "root") == 0)2962is_root = B_TRUE;2963else2964is_root = B_FALSE;29652966switch (nvpair_type(pair)) {2967case DATA_TYPE_UINT64:2968if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {2969uint64_t srchval = fnvpair_value_uint64(pair);2970uint64_t theguid = fnvlist_lookup_uint64(nv,2971ZPOOL_CONFIG_GUID);2972if (theguid == srchval)2973return (nv);2974}2975break;29762977case DATA_TYPE_STRING: {2978const char *srchval, *val;29792980srchval = fnvpair_value_string(pair);2981if (nvlist_lookup_string(nv, srchkey, &val) != 0)2982break;29832984/*2985* Search for the requested value. Special cases:2986*2987* - ZPOOL_CONFIG_PATH for whole disk entries. These end in2988* "-part1", or "p1". The suffix is hidden from the user,2989* but included in the string, so this matches around it.2990* - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()2991* is used to check all possible expanded paths.2992* - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).2993*2994* Otherwise, all other searches are simple string compares.2995*/2996if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {2997uint64_t wholedisk = 0;29982999(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,3000&wholedisk);3001if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)3002return (nv);30033004} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {3005char *type, *idx, *end, *p;3006uint64_t id, vdev_id;30073008/*3009* Determine our vdev type, keeping in mind3010* that the srchval is composed of a type and3011* vdev id pair (i.e. mirror-4).3012*/3013if ((type = strdup(srchval)) == NULL)3014return (NULL);30153016if ((p = strrchr(type, '-')) == NULL) {3017free(type);3018break;3019}3020idx = p + 1;3021*p = '\0';30223023/*3024* draid names are presented like: draid2:4d:6c:0s3025* We match them up to the first ':' so we can still3026* do the parity check below, but the other params3027* are ignored.3028*/3029if ((p = strchr(type, ':')) != NULL) {3030if (strncmp(type, VDEV_TYPE_DRAID,3031strlen(VDEV_TYPE_DRAID)) == 0)3032*p = '\0';3033}30343035/*3036* If the types don't match then keep looking.3037*/3038if (strncmp(val, type, strlen(val)) != 0) {3039free(type);3040break;3041}30423043verify(zpool_vdev_is_interior(type));30443045id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);3046errno = 0;3047vdev_id = strtoull(idx, &end, 10);30483049/*3050* If we are looking for a raidz and a parity is3051* specified, make sure it matches.3052*/3053int rzlen = strlen(VDEV_TYPE_RAIDZ);3054assert(rzlen == strlen(VDEV_TYPE_DRAID));3055int typlen = strlen(type);3056if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||3057strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&3058typlen != rzlen) {3059uint64_t vdev_parity;3060int parity = *(type + rzlen) - '0';30613062if (parity <= 0 || parity > 3 ||3063(typlen - rzlen) != 1) {3064/*3065* Nonsense parity specified, can3066* never match3067*/3068free(type);3069return (NULL);3070}3071vdev_parity = fnvlist_lookup_uint64(nv,3072ZPOOL_CONFIG_NPARITY);3073if ((int)vdev_parity != parity) {3074free(type);3075break;3076}3077}30783079free(type);3080if (errno != 0)3081return (NULL);30823083/*3084* Now verify that we have the correct vdev id.3085*/3086if (vdev_id == id)3087return (nv);3088}30893090/*3091* Common case3092*/3093if (strcmp(srchval, val) == 0)3094return (nv);3095break;3096}30973098default:3099break;3100}31013102if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,3103&child, &children) != 0)3104return (NULL);31053106for (c = 0; c < children; c++) {3107if ((ret = vdev_to_nvlist_iter(child[c], search,3108avail_spare, l2cache, NULL, return_parent)) != NULL) {3109/*3110* The 'is_log' value is only set for the toplevel3111* vdev, not the leaf vdevs. So we always lookup the3112* log device from the root of the vdev tree (where3113* 'log' is non-NULL).3114*/3115if (log != NULL &&3116nvlist_lookup_uint64(child[c],3117ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&3118is_log) {3119*log = B_TRUE;3120}3121return (ret && return_parent && !is_root ? nv : ret);3122}3123}31243125if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,3126&child, &children) == 0) {3127for (c = 0; c < children; c++) {3128if ((ret = vdev_to_nvlist_iter(child[c], search,3129avail_spare, l2cache, NULL, return_parent))3130!= NULL) {3131*avail_spare = B_TRUE;3132return (ret && return_parent &&3133!is_root ? nv : ret);3134}3135}3136}31373138if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,3139&child, &children) == 0) {3140for (c = 0; c < children; c++) {3141if ((ret = vdev_to_nvlist_iter(child[c], search,3142avail_spare, l2cache, NULL, return_parent))3143!= NULL) {3144*l2cache = B_TRUE;3145return (ret && return_parent &&3146!is_root ? nv : ret);3147}3148}3149}31503151return (NULL);3152}31533154/*3155* Given a physical path or guid, find the associated vdev.3156*/3157nvlist_t *3158zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,3159boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)3160{3161nvlist_t *search, *nvroot, *ret;3162uint64_t guid;3163char *end;31643165search = fnvlist_alloc();31663167guid = strtoull(ppath, &end, 0);3168if (guid != 0 && *end == '\0') {3169fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);3170} else {3171fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);3172}31733174nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,3175ZPOOL_CONFIG_VDEV_TREE);31763177*avail_spare = B_FALSE;3178*l2cache = B_FALSE;3179if (log != NULL)3180*log = B_FALSE;3181ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,3182B_FALSE);3183fnvlist_free(search);31843185return (ret);3186}31873188/*3189* Determine if we have an "interior" top-level vdev (i.e mirror/raidz).3190*/3191static boolean_t3192zpool_vdev_is_interior(const char *name)3193{3194if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||3195strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||3196strncmp(name,3197VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||3198strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||3199strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)3200return (B_TRUE);32013202if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&3203!zpool_is_draid_spare(name))3204return (B_TRUE);32053206return (B_FALSE);3207}32083209/*3210* Lookup the nvlist for a given vdev or vdev's parent (depending on3211* if 'return_parent' is set).3212*/3213static nvlist_t *3214__zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,3215boolean_t *l2cache, boolean_t *log, boolean_t return_parent)3216{3217char *end;3218nvlist_t *nvroot, *search, *ret;3219uint64_t guid;3220boolean_t __avail_spare, __l2cache, __log;32213222search = fnvlist_alloc();32233224guid = strtoull(path, &end, 0);3225if (guid != 0 && *end == '\0') {3226fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);3227} else if (zpool_vdev_is_interior(path)) {3228fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);3229} else {3230fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);3231}32323233nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,3234ZPOOL_CONFIG_VDEV_TREE);32353236/*3237* User can pass NULL for avail_spare, l2cache, and log, but3238* we still need to provide variables to vdev_to_nvlist_iter(), so3239* just point them to junk variables here.3240*/3241if (!avail_spare)3242avail_spare = &__avail_spare;3243if (!l2cache)3244l2cache = &__l2cache;3245if (!log)3246log = &__log;32473248*avail_spare = B_FALSE;3249*l2cache = B_FALSE;3250if (log != NULL)3251*log = B_FALSE;3252ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,3253return_parent);3254fnvlist_free(search);32553256return (ret);3257}32583259nvlist_t *3260zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,3261boolean_t *l2cache, boolean_t *log)3262{3263return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,3264B_FALSE));3265}32663267/* Given a vdev path, return its parent's nvlist */3268nvlist_t *3269zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path,3270boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)3271{3272return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,3273B_TRUE));3274}32753276/*3277* Convert a vdev path to a GUID. Returns GUID or 0 on error.3278*3279* If is_spare, is_l2cache, or is_log is non-NULL, then store within it3280* if the VDEV is a spare, l2cache, or log device. If they're NULL then3281* ignore them.3282*/3283static uint64_t3284zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,3285boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)3286{3287boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;3288nvlist_t *tgt;32893290if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,3291&log)) == NULL)3292return (0);32933294if (is_spare != NULL)3295*is_spare = spare;3296if (is_l2cache != NULL)3297*is_l2cache = l2cache;3298if (is_log != NULL)3299*is_log = log;33003301return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));3302}33033304/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */3305uint64_t3306zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)3307{3308return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));3309}33103311/*3312* Bring the specified vdev online. The 'flags' parameter is a set of the3313* ZFS_ONLINE_* flags.3314*/3315int3316zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,3317vdev_state_t *newstate)3318{3319zfs_cmd_t zc = {"\0"};3320char errbuf[ERRBUFLEN];3321nvlist_t *tgt;3322boolean_t avail_spare, l2cache, islog;3323libzfs_handle_t *hdl = zhp->zpool_hdl;33243325if (flags & ZFS_ONLINE_EXPAND) {3326(void) snprintf(errbuf, sizeof (errbuf),3327dgettext(TEXT_DOMAIN, "cannot expand %s"), path);3328} else {3329(void) snprintf(errbuf, sizeof (errbuf),3330dgettext(TEXT_DOMAIN, "cannot online %s"), path);3331}33323333(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3334if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,3335&islog)) == NULL)3336return (zfs_error(hdl, EZFS_NODEVICE, errbuf));33373338zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);33393340if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)3341return (zfs_error(hdl, EZFS_ISSPARE, errbuf));33423343#ifndef __FreeBSD__3344const char *pathname;3345if ((flags & ZFS_ONLINE_EXPAND ||3346zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&3347nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {3348uint64_t wholedisk = 0;33493350(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,3351&wholedisk);33523353/*3354* XXX - L2ARC 1.0 devices can't support expansion.3355*/3356if (l2cache) {3357zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3358"cannot expand cache devices"));3359return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));3360}33613362if (wholedisk) {3363const char *fullpath = path;3364char buf[MAXPATHLEN];3365int error;33663367if (path[0] != '/') {3368error = zfs_resolve_shortname(path, buf,3369sizeof (buf));3370if (error != 0)3371return (zfs_error(hdl, EZFS_NODEVICE,3372errbuf));33733374fullpath = buf;3375}33763377error = zpool_relabel_disk(hdl, fullpath, errbuf);3378if (error != 0)3379return (error);3380}3381}3382#endif33833384zc.zc_cookie = VDEV_STATE_ONLINE;3385zc.zc_obj = flags;33863387if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {3388if (errno == EINVAL) {3389zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "3390"from this pool into a new one. Use '%s' "3391"instead"), "zpool detach");3392return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));3393}3394return (zpool_standard_error(hdl, errno, errbuf));3395}33963397*newstate = zc.zc_cookie;3398return (0);3399}34003401/*3402* Take the specified vdev offline3403*/3404int3405zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)3406{3407zfs_cmd_t zc = {"\0"};3408char errbuf[ERRBUFLEN];3409nvlist_t *tgt;3410boolean_t avail_spare, l2cache;3411libzfs_handle_t *hdl = zhp->zpool_hdl;34123413(void) snprintf(errbuf, sizeof (errbuf),3414dgettext(TEXT_DOMAIN, "cannot offline %s"), path);34153416(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3417if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,3418NULL)) == NULL)3419return (zfs_error(hdl, EZFS_NODEVICE, errbuf));34203421zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);34223423if (avail_spare)3424return (zfs_error(hdl, EZFS_ISSPARE, errbuf));34253426zc.zc_cookie = VDEV_STATE_OFFLINE;3427zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;34283429if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)3430return (0);34313432switch (errno) {3433case EBUSY:34343435/*3436* There are no other replicas of this device.3437*/3438return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));34393440case EEXIST:3441/*3442* The log device has unplayed logs3443*/3444return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));34453446default:3447return (zpool_standard_error(hdl, errno, errbuf));3448}3449}34503451/*3452* Remove the specified vdev asynchronously from the configuration, so3453* that it may come ONLINE if reinserted. This is called from zed on3454* Udev remove event.3455* Note: We also have a similar function zpool_vdev_remove() that3456* removes the vdev from the pool.3457*/3458int3459zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)3460{3461zfs_cmd_t zc = {"\0"};3462char errbuf[ERRBUFLEN];3463nvlist_t *tgt;3464boolean_t avail_spare, l2cache;3465libzfs_handle_t *hdl = zhp->zpool_hdl;34663467(void) snprintf(errbuf, sizeof (errbuf),3468dgettext(TEXT_DOMAIN, "cannot remove %s"), path);34693470(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3471if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,3472NULL)) == NULL)3473return (zfs_error(hdl, EZFS_NODEVICE, errbuf));34743475zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);34763477zc.zc_cookie = VDEV_STATE_REMOVED;34783479if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)3480return (0);34813482return (zpool_standard_error(hdl, errno, errbuf));3483}34843485/*3486* Mark the given vdev faulted.3487*/3488int3489zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)3490{3491zfs_cmd_t zc = {"\0"};3492char errbuf[ERRBUFLEN];3493libzfs_handle_t *hdl = zhp->zpool_hdl;34943495(void) snprintf(errbuf, sizeof (errbuf),3496dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);34973498(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3499zc.zc_guid = guid;3500zc.zc_cookie = VDEV_STATE_FAULTED;3501zc.zc_obj = aux;35023503if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)3504return (0);35053506switch (errno) {3507case EBUSY:35083509/*3510* There are no other replicas of this device.3511*/3512return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));35133514default:3515return (zpool_standard_error(hdl, errno, errbuf));3516}35173518}35193520/*3521* Generic set vdev state function3522*/3523static int3524zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux,3525vdev_state_t state)3526{3527zfs_cmd_t zc = {"\0"};3528char errbuf[ERRBUFLEN];3529libzfs_handle_t *hdl = zhp->zpool_hdl;35303531(void) snprintf(errbuf, sizeof (errbuf),3532dgettext(TEXT_DOMAIN, "cannot set %s %llu"),3533zpool_state_to_name(state, aux), (u_longlong_t)guid);35343535(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3536zc.zc_guid = guid;3537zc.zc_cookie = state;3538zc.zc_obj = aux;35393540if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)3541return (0);35423543return (zpool_standard_error(hdl, errno, errbuf));3544}35453546/*3547* Mark the given vdev degraded.3548*/3549int3550zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)3551{3552return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED));3553}35543555/*3556* Mark the given vdev as in a removed state (as if the device does not exist).3557*3558* This is different than zpool_vdev_remove() which does a removal of a device3559* from the pool (but the device does exist).3560*/3561int3562zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)3563{3564return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED));3565}35663567/*3568* Returns TRUE if the given nvlist is a vdev that was originally swapped in as3569* a hot spare.3570*/3571static boolean_t3572is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)3573{3574nvlist_t **child;3575uint_t c, children;35763577if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,3578&children) == 0) {3579const char *type = fnvlist_lookup_string(search,3580ZPOOL_CONFIG_TYPE);3581if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||3582strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&3583children == 2 && child[which] == tgt)3584return (B_TRUE);35853586for (c = 0; c < children; c++)3587if (is_replacing_spare(child[c], tgt, which))3588return (B_TRUE);3589}35903591return (B_FALSE);3592}35933594/*3595* Attach new_disk (fully described by nvroot) to old_disk.3596* If 'replacing' is specified, the new disk will replace the old one.3597*/3598int3599zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,3600const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)3601{3602zfs_cmd_t zc = {"\0"};3603char errbuf[ERRBUFLEN];3604int ret;3605nvlist_t *tgt;3606boolean_t avail_spare, l2cache, islog;3607uint64_t val;3608char *newname;3609const char *type;3610nvlist_t **child;3611uint_t children;3612nvlist_t *config_root;3613libzfs_handle_t *hdl = zhp->zpool_hdl;36143615if (replacing)3616(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,3617"cannot replace %s with %s"), old_disk, new_disk);3618else3619(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,3620"cannot attach %s to %s"), new_disk, old_disk);36213622(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3623if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,3624&islog)) == NULL)3625return (zfs_error(hdl, EZFS_NODEVICE, errbuf));36263627if (avail_spare)3628return (zfs_error(hdl, EZFS_ISSPARE, errbuf));36293630if (l2cache)3631return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));36323633zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);3634zc.zc_cookie = replacing;3635zc.zc_simple = rebuild;36363637if (rebuild &&3638zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {3639zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3640"the loaded zfs module doesn't support device rebuilds"));3641return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));3642}36433644type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE);3645if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 &&3646zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) {3647zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3648"the loaded zfs module doesn't support raidz expansion"));3649return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));3650}36513652if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,3653&child, &children) != 0 || children != 1) {3654zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3655"new device must be a single disk"));3656return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));3657}36583659config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),3660ZPOOL_CONFIG_VDEV_TREE);36613662if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)3663return (-1);36643665/*3666* If the target is a hot spare that has been swapped in, we can only3667* replace it with another hot spare.3668*/3669if (replacing &&3670nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&3671(zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,3672NULL) == NULL || !avail_spare) &&3673is_replacing_spare(config_root, tgt, 1)) {3674zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3675"can only be replaced by another hot spare"));3676free(newname);3677return (zfs_error(hdl, EZFS_BADTARGET, errbuf));3678}36793680free(newname);36813682zcmd_write_conf_nvlist(hdl, &zc, nvroot);36833684ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);36853686zcmd_free_nvlists(&zc);36873688if (ret == 0)3689return (0);36903691switch (errno) {3692case ENOTSUP:3693/*3694* Can't attach to or replace this type of vdev.3695*/3696if (replacing) {3697uint64_t version = zpool_get_prop_int(zhp,3698ZPOOL_PROP_VERSION, NULL);36993700if (islog) {3701zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3702"cannot replace a log with a spare"));3703} else if (rebuild) {3704zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3705"only mirror and dRAID vdevs support "3706"sequential reconstruction"));3707} else if (zpool_is_draid_spare(new_disk)) {3708zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3709"dRAID spares can only replace child "3710"devices in their parent's dRAID vdev"));3711} else if (version >= SPA_VERSION_MULTI_REPLACE) {3712zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3713"already in replacing/spare config; wait "3714"for completion or use 'zpool detach'"));3715} else {3716zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3717"cannot replace a replacing device"));3718}3719} else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {3720zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3721"raidz_expansion feature must be enabled "3722"in order to attach a device to raidz"));3723} else {3724char status[64] = {0};3725zpool_prop_get_feature(zhp,3726"feature@device_rebuild", status, 63);3727if (rebuild &&3728strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {3729zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3730"device_rebuild feature must be enabled "3731"in order to use sequential "3732"reconstruction"));3733} else {3734zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3735"can only attach to mirrors and top-level "3736"disks"));3737}3738}3739(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);3740break;37413742case EINVAL:3743/*3744* The new device must be a single disk.3745*/3746zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3747"new device must be a single disk"));3748(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);3749break;37503751case EBUSY:3752zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),3753new_disk);3754(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3755break;37563757case EOVERFLOW:3758/*3759* The new device is too small.3760*/3761zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3762"device is too small"));3763(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3764break;37653766case EDOM:3767/*3768* The new device has a different optimal sector size.3769*/3770zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3771"new device has a different optimal sector size; use the "3772"option '-o ashift=N' to override the optimal size"));3773(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3774break;37753776case ENAMETOOLONG:3777/*3778* The resulting top-level vdev spec won't fit in the label.3779*/3780(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);3781break;37823783case ENXIO:3784/*3785* The existing raidz vdev has offline children3786*/3787if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {3788zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3789"raidz vdev has devices that are are offline or "3790"being replaced"));3791(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3792break;3793} else {3794(void) zpool_standard_error(hdl, errno, errbuf);3795}3796break;37973798case EADDRINUSE:3799/*3800* The boot reserved area is already being used (FreeBSD)3801*/3802if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {3803zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3804"the reserved boot area needed for the expansion "3805"is already being used by a boot loader"));3806(void) zfs_error(hdl, EZFS_BADDEV, errbuf);3807} else {3808(void) zpool_standard_error(hdl, errno, errbuf);3809}3810break;38113812case ZFS_ERR_ASHIFT_MISMATCH:3813zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3814"The new device cannot have a higher alignment requirement "3815"than the top-level vdev."));3816(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);3817break;3818default:3819(void) zpool_standard_error(hdl, errno, errbuf);3820}38213822return (-1);3823}38243825/*3826* Detach the specified device.3827*/3828int3829zpool_vdev_detach(zpool_handle_t *zhp, const char *path)3830{3831zfs_cmd_t zc = {"\0"};3832char errbuf[ERRBUFLEN];3833nvlist_t *tgt;3834boolean_t avail_spare, l2cache;3835libzfs_handle_t *hdl = zhp->zpool_hdl;38363837(void) snprintf(errbuf, sizeof (errbuf),3838dgettext(TEXT_DOMAIN, "cannot detach %s"), path);38393840(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));3841if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,3842NULL)) == NULL)3843return (zfs_error(hdl, EZFS_NODEVICE, errbuf));38443845if (avail_spare)3846return (zfs_error(hdl, EZFS_ISSPARE, errbuf));38473848if (l2cache)3849return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));38503851zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);38523853if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)3854return (0);38553856switch (errno) {38573858case ENOTSUP:3859/*3860* Can't detach from this type of vdev.3861*/3862zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "3863"applicable to mirror and replacing vdevs"));3864(void) zfs_error(hdl, EZFS_BADTARGET, errbuf);3865break;38663867case EBUSY:3868/*3869* There are no other replicas of this device.3870*/3871(void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);3872break;38733874default:3875(void) zpool_standard_error(hdl, errno, errbuf);3876}38773878return (-1);3879}38803881/*3882* Find a mirror vdev in the source nvlist.3883*3884* The mchild array contains a list of disks in one of the top-level mirrors3885* of the source pool. The schild array contains a list of disks that the3886* user specified on the command line. We loop over the mchild array to3887* see if any entry in the schild array matches.3888*3889* If a disk in the mchild array is found in the schild array, we return3890* the index of that entry. Otherwise we return -1.3891*/3892static int3893find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,3894nvlist_t **schild, uint_t schildren)3895{3896uint_t mc;38973898for (mc = 0; mc < mchildren; mc++) {3899uint_t sc;3900char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,3901mchild[mc], 0);39023903for (sc = 0; sc < schildren; sc++) {3904char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,3905schild[sc], 0);3906boolean_t result = (strcmp(mpath, spath) == 0);39073908free(spath);3909if (result) {3910free(mpath);3911return (mc);3912}3913}39143915free(mpath);3916}39173918return (-1);3919}39203921/*3922* Split a mirror pool. If newroot points to null, then a new nvlist3923* is generated and it is the responsibility of the caller to free it.3924*/3925int3926zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,3927nvlist_t *props, splitflags_t flags)3928{3929zfs_cmd_t zc = {"\0"};3930char errbuf[ERRBUFLEN];3931const char *bias;3932nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;3933nvlist_t **varray = NULL, *zc_props = NULL;3934uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;3935libzfs_handle_t *hdl = zhp->zpool_hdl;3936uint64_t vers, readonly = B_FALSE;3937boolean_t freelist = B_FALSE, memory_err = B_TRUE;3938int retval = 0;39393940(void) snprintf(errbuf, sizeof (errbuf),3941dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);39423943if (!zpool_name_valid(hdl, B_FALSE, newname))3944return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));39453946if ((config = zpool_get_config(zhp, NULL)) == NULL) {3947(void) fprintf(stderr, gettext("Internal error: unable to "3948"retrieve pool configuration\n"));3949return (-1);3950}39513952tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);3953vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);39543955if (props) {3956prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };3957if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,3958props, vers, flags, errbuf)) == NULL)3959return (-1);3960(void) nvlist_lookup_uint64(zc_props,3961zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);3962if (readonly) {3963zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3964"property %s can only be set at import time"),3965zpool_prop_to_name(ZPOOL_PROP_READONLY));3966return (-1);3967}3968}39693970if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,3971&children) != 0) {3972zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,3973"Source pool is missing vdev tree"));3974nvlist_free(zc_props);3975return (-1);3976}39773978varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));3979vcount = 0;39803981if (*newroot == NULL ||3982nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,3983&newchild, &newchildren) != 0)3984newchildren = 0;39853986for (c = 0; c < children; c++) {3987uint64_t is_log = B_FALSE, is_hole = B_FALSE;3988boolean_t is_special = B_FALSE, is_dedup = B_FALSE;3989const char *type;3990nvlist_t **mchild, *vdev;3991uint_t mchildren;3992int entry;39933994/*3995* Unlike cache & spares, slogs are stored in the3996* ZPOOL_CONFIG_CHILDREN array. We filter them out here.3997*/3998(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,3999&is_log);4000(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,4001&is_hole);4002if (is_log || is_hole) {4003/*4004* Create a hole vdev and put it in the config.4005*/4006if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)4007goto out;4008if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,4009VDEV_TYPE_HOLE) != 0)4010goto out;4011if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,40121) != 0)4013goto out;4014if (lastlog == 0)4015lastlog = vcount;4016varray[vcount++] = vdev;4017continue;4018}4019lastlog = 0;4020type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);40214022if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {4023vdev = child[c];4024if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)4025goto out;4026continue;4027} else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {4028zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4029"Source pool must be composed only of mirrors\n"));4030retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);4031goto out;4032}40334034if (nvlist_lookup_string(child[c],4035ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {4036if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)4037is_special = B_TRUE;4038else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)4039is_dedup = B_TRUE;4040}4041verify(nvlist_lookup_nvlist_array(child[c],4042ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);40434044/* find or add an entry for this top-level vdev */4045if (newchildren > 0 &&4046(entry = find_vdev_entry(zhp, mchild, mchildren,4047newchild, newchildren)) >= 0) {4048/* We found a disk that the user specified. */4049vdev = mchild[entry];4050++found;4051} else {4052/* User didn't specify a disk for this vdev. */4053vdev = mchild[mchildren - 1];4054}40554056if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)4057goto out;40584059if (flags.dryrun != 0) {4060if (is_dedup == B_TRUE) {4061if (nvlist_add_string(varray[vcount - 1],4062ZPOOL_CONFIG_ALLOCATION_BIAS,4063VDEV_ALLOC_BIAS_DEDUP) != 0)4064goto out;4065} else if (is_special == B_TRUE) {4066if (nvlist_add_string(varray[vcount - 1],4067ZPOOL_CONFIG_ALLOCATION_BIAS,4068VDEV_ALLOC_BIAS_SPECIAL) != 0)4069goto out;4070}4071}4072}40734074/* did we find every disk the user specified? */4075if (found != newchildren) {4076zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "4077"include at most one disk from each mirror"));4078retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);4079goto out;4080}40814082/* Prepare the nvlist for populating. */4083if (*newroot == NULL) {4084if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)4085goto out;4086freelist = B_TRUE;4087if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,4088VDEV_TYPE_ROOT) != 0)4089goto out;4090} else {4091verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);4092}40934094/* Add all the children we found */4095if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,4096(const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)4097goto out;40984099/*4100* If we're just doing a dry run, exit now with success.4101*/4102if (flags.dryrun) {4103memory_err = B_FALSE;4104freelist = B_FALSE;4105goto out;4106}41074108/* now build up the config list & call the ioctl */4109if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)4110goto out;41114112if (nvlist_add_nvlist(newconfig,4113ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||4114nvlist_add_string(newconfig,4115ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||4116nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)4117goto out;41184119/*4120* The new pool is automatically part of the namespace unless we4121* explicitly export it.4122*/4123if (!flags.import)4124zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;4125(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4126(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));4127zcmd_write_conf_nvlist(hdl, &zc, newconfig);4128if (zc_props != NULL)4129zcmd_write_src_nvlist(hdl, &zc, zc_props);41304131if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {4132retval = zpool_standard_error(hdl, errno, errbuf);4133goto out;4134}41354136freelist = B_FALSE;4137memory_err = B_FALSE;41384139out:4140if (varray != NULL) {4141int v;41424143for (v = 0; v < vcount; v++)4144nvlist_free(varray[v]);4145free(varray);4146}4147zcmd_free_nvlists(&zc);4148nvlist_free(zc_props);4149nvlist_free(newconfig);4150if (freelist) {4151nvlist_free(*newroot);4152*newroot = NULL;4153}41544155if (retval != 0)4156return (retval);41574158if (memory_err)4159return (no_memory(hdl));41604161return (0);4162}41634164/*4165* Remove the given device.4166*/4167int4168zpool_vdev_remove(zpool_handle_t *zhp, const char *path)4169{4170zfs_cmd_t zc = {"\0"};4171char errbuf[ERRBUFLEN];4172nvlist_t *tgt;4173boolean_t avail_spare, l2cache, islog;4174libzfs_handle_t *hdl = zhp->zpool_hdl;4175uint64_t version;41764177(void) snprintf(errbuf, sizeof (errbuf),4178dgettext(TEXT_DOMAIN, "cannot remove %s"), path);41794180if (zpool_is_draid_spare(path)) {4181zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4182"dRAID spares cannot be removed"));4183return (zfs_error(hdl, EZFS_NODEVICE, errbuf));4184}41854186(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4187if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,4188&islog)) == NULL)4189return (zfs_error(hdl, EZFS_NODEVICE, errbuf));41904191version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);4192if (islog && version < SPA_VERSION_HOLES) {4193zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4194"pool must be upgraded to support log removal"));4195return (zfs_error(hdl, EZFS_BADVERSION, errbuf));4196}41974198zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);41994200if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)4201return (0);42024203switch (errno) {42044205case EALREADY:4206zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4207"removal for this vdev is already in progress."));4208(void) zfs_error(hdl, EZFS_BUSY, errbuf);4209break;42104211case EINVAL:4212zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4213"invalid config; all top-level vdevs must "4214"have the same sector size and not be raidz."));4215(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);4216break;42174218case EBUSY:4219if (islog) {4220zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4221"Mount encrypted datasets to replay logs."));4222} else {4223zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4224"Pool busy; removal may already be in progress"));4225}4226(void) zfs_error(hdl, EZFS_BUSY, errbuf);4227break;42284229case EACCES:4230if (islog) {4231zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4232"Mount encrypted datasets to replay logs."));4233(void) zfs_error(hdl, EZFS_BUSY, errbuf);4234} else {4235(void) zpool_standard_error(hdl, errno, errbuf);4236}4237break;42384239default:4240(void) zpool_standard_error(hdl, errno, errbuf);4241}4242return (-1);4243}42444245int4246zpool_vdev_remove_cancel(zpool_handle_t *zhp)4247{4248zfs_cmd_t zc = {{0}};4249char errbuf[ERRBUFLEN];4250libzfs_handle_t *hdl = zhp->zpool_hdl;42514252(void) snprintf(errbuf, sizeof (errbuf),4253dgettext(TEXT_DOMAIN, "cannot cancel removal"));42544255(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4256zc.zc_cookie = 1;42574258if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)4259return (0);42604261return (zpool_standard_error(hdl, errno, errbuf));4262}42634264int4265zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,4266uint64_t *sizep)4267{4268char errbuf[ERRBUFLEN];4269nvlist_t *tgt;4270boolean_t avail_spare, l2cache, islog;4271libzfs_handle_t *hdl = zhp->zpool_hdl;42724273(void) snprintf(errbuf, sizeof (errbuf),4274dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),4275path);42764277if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,4278&islog)) == NULL)4279return (zfs_error(hdl, EZFS_NODEVICE, errbuf));42804281if (avail_spare || l2cache || islog) {4282*sizep = 0;4283return (0);4284}42854286if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {4287zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,4288"indirect size not available"));4289return (zfs_error(hdl, EINVAL, errbuf));4290}4291return (0);4292}42934294/*4295* Clear the errors for the pool, or the particular device if specified.4296*/4297int4298zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)4299{4300zfs_cmd_t zc = {"\0"};4301char errbuf[ERRBUFLEN];4302nvlist_t *tgt;4303zpool_load_policy_t policy;4304boolean_t avail_spare, l2cache;4305libzfs_handle_t *hdl = zhp->zpool_hdl;4306nvlist_t *nvi = NULL;4307int error;43084309if (path)4310(void) snprintf(errbuf, sizeof (errbuf),4311dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),4312path);4313else4314(void) snprintf(errbuf, sizeof (errbuf),4315dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),4316zhp->zpool_name);43174318(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4319if (path) {4320if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,4321&l2cache, NULL)) == NULL)4322return (zfs_error(hdl, EZFS_NODEVICE, errbuf));43234324/*4325* Don't allow error clearing for hot spares. Do allow4326* error clearing for l2cache devices.4327*/4328if (avail_spare)4329return (zfs_error(hdl, EZFS_ISSPARE, errbuf));43304331zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);4332}43334334zpool_get_load_policy(rewindnvl, &policy);4335zc.zc_cookie = policy.zlp_rewind;43364337zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);4338zcmd_write_src_nvlist(hdl, &zc, rewindnvl);43394340while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&4341errno == ENOMEM)4342zcmd_expand_dst_nvlist(hdl, &zc);43434344if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&4345errno != EPERM && errno != EACCES)) {4346if (policy.zlp_rewind &4347(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {4348(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);4349zpool_rewind_exclaim(hdl, zc.zc_name,4350((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),4351nvi);4352nvlist_free(nvi);4353}4354zcmd_free_nvlists(&zc);4355return (0);4356}43574358zcmd_free_nvlists(&zc);4359return (zpool_standard_error(hdl, errno, errbuf));4360}43614362/*4363* Similar to zpool_clear(), but takes a GUID (used by fmd).4364*/4365int4366zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)4367{4368zfs_cmd_t zc = {"\0"};4369char errbuf[ERRBUFLEN];4370libzfs_handle_t *hdl = zhp->zpool_hdl;43714372(void) snprintf(errbuf, sizeof (errbuf),4373dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),4374(u_longlong_t)guid);43754376(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4377zc.zc_guid = guid;4378zc.zc_cookie = ZPOOL_NO_REWIND;43794380if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)4381return (0);43824383return (zpool_standard_error(hdl, errno, errbuf));4384}43854386/*4387* Change the GUID for a pool.4388*4389* Similar to zpool_reguid(), but may take a GUID.4390*4391* If the guid argument is NULL, then no GUID is passed in the nvlist to the4392* ioctl().4393*/4394int4395zpool_set_guid(zpool_handle_t *zhp, const uint64_t *guid)4396{4397char errbuf[ERRBUFLEN];4398libzfs_handle_t *hdl = zhp->zpool_hdl;4399nvlist_t *nvl = NULL;4400zfs_cmd_t zc = {"\0"};4401int error;44024403if (guid != NULL) {4404if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)4405return (no_memory(hdl));44064407if (nvlist_add_uint64(nvl, ZPOOL_REGUID_GUID, *guid) != 0) {4408nvlist_free(nvl);4409return (no_memory(hdl));4410}44114412zcmd_write_src_nvlist(hdl, &zc, nvl);4413}44144415(void) snprintf(errbuf, sizeof (errbuf),4416dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);44174418(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));4419error = zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc);4420if (error) {4421return (zpool_standard_error(hdl, errno, errbuf));4422}4423if (guid != NULL) {4424zcmd_free_nvlists(&zc);4425nvlist_free(nvl);4426}4427return (0);4428}44294430/*4431* Change the GUID for a pool.4432*/4433int4434zpool_reguid(zpool_handle_t *zhp)4435{4436return (zpool_set_guid(zhp, NULL));4437}44384439/*4440* Reopen the pool.4441*/4442int4443zpool_reopen_one(zpool_handle_t *zhp, void *data)4444{4445libzfs_handle_t *hdl = zpool_get_handle(zhp);4446const char *pool_name = zpool_get_name(zhp);4447boolean_t *scrub_restart = data;4448int error;44494450error = lzc_reopen(pool_name, *scrub_restart);4451if (error) {4452return (zpool_standard_error_fmt(hdl, error,4453dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));4454}44554456return (0);4457}44584459/* call into libzfs_core to execute the sync IOCTL per pool */4460int4461zpool_sync_one(zpool_handle_t *zhp, void *data)4462{4463int ret;4464libzfs_handle_t *hdl = zpool_get_handle(zhp);4465const char *pool_name = zpool_get_name(zhp);4466boolean_t *force = data;4467nvlist_t *innvl = fnvlist_alloc();44684469fnvlist_add_boolean_value(innvl, "force", *force);4470if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {4471nvlist_free(innvl);4472return (zpool_standard_error_fmt(hdl, ret,4473dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));4474}4475nvlist_free(innvl);44764477return (0);4478}44794480#define PATH_BUF_LEN 6444814482/*4483* Given a vdev, return the name to display in iostat. If the vdev has a path,4484* we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.4485* We also check if this is a whole disk, in which case we strip off the4486* trailing 's0' slice name.4487*4488* This routine is also responsible for identifying when disks have been4489* reconfigured in a new location. The kernel will have opened the device by4490* devid, but the path will still refer to the old location. To catch this, we4491* first do a path -> devid translation (which is fast for the common case). If4492* the devid matches, we're done. If not, we do a reverse devid -> path4493* translation and issue the appropriate ioctl() to update the path of the vdev.4494* If 'zhp' is NULL, then this is an exported pool, and we don't need to do any4495* of these checks.4496*/4497char *4498zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,4499int name_flags)4500{4501const char *type, *tpath;4502const char *path;4503uint64_t value;4504char buf[PATH_BUF_LEN];4505char tmpbuf[PATH_BUF_LEN * 2];45064507/*4508* vdev_name will be "root"/"root-0" for the root vdev, but it is the4509* zpool name that will be displayed to the user.4510*/4511type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);4512if (zhp != NULL && strcmp(type, "root") == 0)4513return (zfs_strdup(hdl, zpool_get_name(zhp)));45144515if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))4516name_flags |= VDEV_NAME_PATH;4517if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))4518name_flags |= VDEV_NAME_GUID;4519if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))4520name_flags |= VDEV_NAME_FOLLOW_LINKS;45214522if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||4523name_flags & VDEV_NAME_GUID) {4524(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);4525(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);4526path = buf;4527} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {4528path = tpath;45294530if (name_flags & VDEV_NAME_FOLLOW_LINKS) {4531char *rp = realpath(path, NULL);4532if (rp) {4533strlcpy(buf, rp, sizeof (buf));4534path = buf;4535free(rp);4536}4537}45384539/*4540* For a block device only use the name.4541*/4542if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&4543!(name_flags & VDEV_NAME_PATH)) {4544path = zfs_strip_path(path);4545}45464547/*4548* Remove the partition from the path if this is a whole disk.4549*/4550if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&4551nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)4552== 0 && value && !(name_flags & VDEV_NAME_PATH)) {4553return (zfs_strip_partition(path));4554}4555} else {4556path = type;45574558/*4559* If it's a raidz device, we need to stick in the parity level.4560*/4561if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {4562value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);4563(void) snprintf(buf, sizeof (buf), "%s%llu", path,4564(u_longlong_t)value);4565path = buf;4566}45674568/*4569* If it's a dRAID device, we add parity, groups, and spares.4570*/4571if (strcmp(path, VDEV_TYPE_DRAID) == 0) {4572uint64_t ndata, nparity, nspares;4573nvlist_t **child;4574uint_t children;45754576verify(nvlist_lookup_nvlist_array(nv,4577ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);4578nparity = fnvlist_lookup_uint64(nv,4579ZPOOL_CONFIG_NPARITY);4580ndata = fnvlist_lookup_uint64(nv,4581ZPOOL_CONFIG_DRAID_NDATA);4582nspares = fnvlist_lookup_uint64(nv,4583ZPOOL_CONFIG_DRAID_NSPARES);45844585path = zpool_draid_name(buf, sizeof (buf), ndata,4586nparity, nspares, children);4587}45884589/*4590* We identify each top-level vdev by using a <type-id>4591* naming convention.4592*/4593if (name_flags & VDEV_NAME_TYPE_ID) {4594uint64_t id = fnvlist_lookup_uint64(nv,4595ZPOOL_CONFIG_ID);4596(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",4597path, (u_longlong_t)id);4598path = tmpbuf;4599}4600}46014602return (zfs_strdup(hdl, path));4603}46044605static int4606zbookmark_mem_compare(const void *a, const void *b)4607{4608return (memcmp(a, b, sizeof (zbookmark_phys_t)));4609}46104611void4612zpool_add_propname(zpool_handle_t *zhp, const char *propname)4613{4614assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES);4615zhp->zpool_propnames[zhp->zpool_n_propnames] = propname;4616zhp->zpool_n_propnames++;4617}46184619/*4620* Retrieve the persistent error log, uniquify the members, and return to the4621* caller.4622*/4623int4624zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)4625{4626zfs_cmd_t zc = {"\0"};4627libzfs_handle_t *hdl = zhp->zpool_hdl;4628zbookmark_phys_t *buf;4629uint64_t buflen = 10000; /* approx. 1MB of RAM */46304631if (fnvlist_lookup_uint64(zhp->zpool_config,4632ZPOOL_CONFIG_ERRCOUNT) == 0)4633return (0);46344635/*4636* Retrieve the raw error list from the kernel. If it doesn't fit,4637* allocate a larger buffer and retry.4638*/4639(void) strcpy(zc.zc_name, zhp->zpool_name);4640for (;;) {4641buf = zfs_alloc(zhp->zpool_hdl,4642buflen * sizeof (zbookmark_phys_t));4643zc.zc_nvlist_dst = (uintptr_t)buf;4644zc.zc_nvlist_dst_size = buflen;4645if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,4646&zc) != 0) {4647free(buf);4648if (errno == ENOMEM) {4649buflen *= 2;4650} else {4651return (zpool_standard_error_fmt(hdl, errno,4652dgettext(TEXT_DOMAIN, "errors: List of "4653"errors unavailable")));4654}4655} else {4656break;4657}4658}46594660/*4661* Sort the resulting bookmarks. This is a little confusing due to the4662* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last4663* to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks4664* _not_ copied as part of the process. So we point the start of our4665* array appropriate and decrement the total number of elements.4666*/4667zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;4668uint64_t zblen = buflen - zc.zc_nvlist_dst_size;46694670qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);46714672verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);46734674/*4675* Fill in the nverrlistp with nvlist's of dataset and object numbers.4676*/4677for (uint64_t i = 0; i < zblen; i++) {4678nvlist_t *nv;46794680/* ignoring zb_blkid and zb_level for now */4681if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&4682zb[i-1].zb_object == zb[i].zb_object)4683continue;46844685if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)4686goto nomem;4687if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,4688zb[i].zb_objset) != 0) {4689nvlist_free(nv);4690goto nomem;4691}4692if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,4693zb[i].zb_object) != 0) {4694nvlist_free(nv);4695goto nomem;4696}4697if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {4698nvlist_free(nv);4699goto nomem;4700}4701nvlist_free(nv);4702}47034704free(buf);4705return (0);47064707nomem:4708free(buf);4709return (no_memory(zhp->zpool_hdl));4710}47114712/*4713* Upgrade a ZFS pool to the latest on-disk version.4714*/4715int4716zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)4717{4718zfs_cmd_t zc = {"\0"};4719libzfs_handle_t *hdl = zhp->zpool_hdl;47204721(void) strcpy(zc.zc_name, zhp->zpool_name);4722zc.zc_cookie = new_version;47234724if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)4725return (zpool_standard_error_fmt(hdl, errno,4726dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),4727zhp->zpool_name));4728return (0);4729}47304731void4732zfs_save_arguments(int argc, char **argv, char *string, int len)4733{4734int i;47354736(void) strlcpy(string, zfs_basename(argv[0]), len);4737for (i = 1; i < argc; i++) {4738(void) strlcat(string, " ", len);4739(void) strlcat(string, argv[i], len);4740}4741}47424743int4744zpool_log_history(libzfs_handle_t *hdl, const char *message)4745{4746zfs_cmd_t zc = {"\0"};4747nvlist_t *args;47484749args = fnvlist_alloc();4750fnvlist_add_string(args, "message", message);4751zcmd_write_src_nvlist(hdl, &zc, args);4752int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);4753nvlist_free(args);4754zcmd_free_nvlists(&zc);4755return (err);4756}47574758/*4759* Perform ioctl to get some command history of a pool.4760*4761* 'buf' is the buffer to fill up to 'len' bytes. 'off' is the4762* logical offset of the history buffer to start reading from.4763*4764* Upon return, 'off' is the next logical offset to read from and4765* 'len' is the actual amount of bytes read into 'buf'.4766*/4767static int4768get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)4769{4770zfs_cmd_t zc = {"\0"};4771libzfs_handle_t *hdl = zhp->zpool_hdl;47724773(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));47744775zc.zc_history = (uint64_t)(uintptr_t)buf;4776zc.zc_history_len = *len;4777zc.zc_history_offset = *off;47784779if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {4780switch (errno) {4781case EPERM:4782return (zfs_error_fmt(hdl, EZFS_PERM,4783dgettext(TEXT_DOMAIN,4784"cannot show history for pool '%s'"),4785zhp->zpool_name));4786case ENOENT:4787return (zfs_error_fmt(hdl, EZFS_NOHISTORY,4788dgettext(TEXT_DOMAIN, "cannot get history for pool "4789"'%s'"), zhp->zpool_name));4790case ENOTSUP:4791return (zfs_error_fmt(hdl, EZFS_BADVERSION,4792dgettext(TEXT_DOMAIN, "cannot get history for pool "4793"'%s', pool must be upgraded"), zhp->zpool_name));4794default:4795return (zpool_standard_error_fmt(hdl, errno,4796dgettext(TEXT_DOMAIN,4797"cannot get history for '%s'"), zhp->zpool_name));4798}4799}48004801*len = zc.zc_history_len;4802*off = zc.zc_history_offset;48034804return (0);4805}48064807/*4808* Retrieve the command history of a pool.4809*/4810int4811zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,4812boolean_t *eof)4813{4814libzfs_handle_t *hdl = zhp->zpool_hdl;4815char *buf;4816int buflen = 128 * 1024;4817nvlist_t **records = NULL;4818uint_t numrecords = 0;4819int err = 0, i;4820uint64_t start = *off;48214822buf = zfs_alloc(hdl, buflen);48234824/* process about 1MiB a time */4825while (*off - start < 1024 * 1024) {4826uint64_t bytes_read = buflen;4827uint64_t leftover;48284829if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)4830break;48314832/* if nothing else was read in, we're at EOF, just return */4833if (!bytes_read) {4834*eof = B_TRUE;4835break;4836}48374838if ((err = zpool_history_unpack(buf, bytes_read,4839&leftover, &records, &numrecords)) != 0) {4840zpool_standard_error_fmt(hdl, err,4841dgettext(TEXT_DOMAIN,4842"cannot get history for '%s'"), zhp->zpool_name);4843break;4844}4845*off -= leftover;4846if (leftover == bytes_read) {4847/*4848* no progress made, because buffer is not big enough4849* to hold this record; resize and retry.4850*/4851buflen *= 2;4852free(buf);4853buf = zfs_alloc(hdl, buflen);4854}4855}48564857free(buf);48584859if (!err) {4860*nvhisp = fnvlist_alloc();4861fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,4862(const nvlist_t **)records, numrecords);4863}4864for (i = 0; i < numrecords; i++)4865nvlist_free(records[i]);4866free(records);48674868return (err);4869}48704871/*4872* Retrieve the next event given the passed 'zevent_fd' file descriptor.4873* If there is a new event available 'nvp' will contain a newly allocated4874* nvlist and 'dropped' will be set to the number of missed events since4875* the last call to this function. When 'nvp' is set to NULL it indicates4876* no new events are available. In either case the function returns 0 and4877* it is up to the caller to free 'nvp'. In the case of a fatal error the4878* function will return a non-zero value. When the function is called in4879* blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),4880* it will not return until a new event is available.4881*/4882int4883zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,4884int *dropped, unsigned flags, int zevent_fd)4885{4886zfs_cmd_t zc = {"\0"};4887int error = 0;48884889*nvp = NULL;4890*dropped = 0;4891zc.zc_cleanup_fd = zevent_fd;48924893if (flags & ZEVENT_NONBLOCK)4894zc.zc_guid = ZEVENT_NONBLOCK;48954896zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);48974898retry:4899if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {4900switch (errno) {4901case ESHUTDOWN:4902error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,4903dgettext(TEXT_DOMAIN, "zfs shutdown"));4904goto out;4905case ENOENT:4906/* Blocking error case should not occur */4907if (!(flags & ZEVENT_NONBLOCK))4908error = zpool_standard_error_fmt(hdl, errno,4909dgettext(TEXT_DOMAIN, "cannot get event"));49104911goto out;4912case ENOMEM:4913zcmd_expand_dst_nvlist(hdl, &zc);4914goto retry;4915default:4916error = zpool_standard_error_fmt(hdl, errno,4917dgettext(TEXT_DOMAIN, "cannot get event"));4918goto out;4919}4920}49214922error = zcmd_read_dst_nvlist(hdl, &zc, nvp);4923if (error != 0)4924goto out;49254926*dropped = (int)zc.zc_cookie;4927out:4928zcmd_free_nvlists(&zc);49294930return (error);4931}49324933/*4934* Clear all events.4935*/4936int4937zpool_events_clear(libzfs_handle_t *hdl, int *count)4938{4939zfs_cmd_t zc = {"\0"};49404941if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)4942return (zpool_standard_error(hdl, errno,4943dgettext(TEXT_DOMAIN, "cannot clear events")));49444945if (count != NULL)4946*count = (int)zc.zc_cookie; /* # of events cleared */49474948return (0);4949}49504951/*4952* Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for4953* the passed zevent_fd file handle. On success zero is returned,4954* otherwise -1 is returned and hdl->libzfs_error is set to the errno.4955*/4956int4957zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)4958{4959zfs_cmd_t zc = {"\0"};4960int error = 0;49614962zc.zc_guid = eid;4963zc.zc_cleanup_fd = zevent_fd;49644965if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {4966switch (errno) {4967case ENOENT:4968error = zfs_error_fmt(hdl, EZFS_NOENT,4969dgettext(TEXT_DOMAIN, "cannot get event"));4970break;49714972case ENOMEM:4973error = zfs_error_fmt(hdl, EZFS_NOMEM,4974dgettext(TEXT_DOMAIN, "cannot get event"));4975break;49764977default:4978error = zpool_standard_error_fmt(hdl, errno,4979dgettext(TEXT_DOMAIN, "cannot get event"));4980break;4981}4982}49834984return (error);4985}49864987static void4988zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,4989char *pathname, size_t len, boolean_t always_unmounted)4990{4991zfs_cmd_t zc = {"\0"};4992boolean_t mounted = B_FALSE;4993char *mntpnt = NULL;4994char dsname[ZFS_MAX_DATASET_NAME_LEN];49954996if (dsobj == 0) {4997/* special case for the MOS */4998(void) snprintf(pathname, len, "<metadata>:<0x%llx>",4999(longlong_t)obj);5000return;5001}50025003/* get the dataset's name */5004(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));5005zc.zc_obj = dsobj;5006if (zfs_ioctl(zhp->zpool_hdl,5007ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {5008/* just write out a path of two object numbers */5009(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",5010(longlong_t)dsobj, (longlong_t)obj);5011return;5012}5013(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));50145015/* find out if the dataset is mounted */5016mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,5017&mntpnt);50185019/* get the corrupted object's path */5020(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));5021zc.zc_obj = obj;5022if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,5023&zc) == 0) {5024if (mounted) {5025(void) snprintf(pathname, len, "%s%s", mntpnt,5026zc.zc_value);5027} else {5028(void) snprintf(pathname, len, "%s:%s",5029dsname, zc.zc_value);5030}5031} else {5032(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,5033(longlong_t)obj);5034}5035free(mntpnt);5036}50375038void5039zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,5040char *pathname, size_t len)5041{5042zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);5043}50445045void5046zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,5047char *pathname, size_t len)5048{5049zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);5050}5051/*5052* Wait while the specified activity is in progress in the pool.5053*/5054int5055zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)5056{5057boolean_t missing;50585059int error = zpool_wait_status(zhp, activity, &missing, NULL);50605061if (missing) {5062(void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,5063dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),5064zhp->zpool_name);5065return (ENOENT);5066} else {5067return (error);5068}5069}50705071/*5072* Wait for the given activity and return the status of the wait (whether or not5073* any waiting was done) in the 'waited' parameter. Non-existent pools are5074* reported via the 'missing' parameter, rather than by printing an error5075* message. This is convenient when this function is called in a loop over a5076* long period of time (as it is, for example, by zpool's wait cmd). In that5077* scenario, a pool being exported or destroyed should be considered a normal5078* event, so we don't want to print an error when we find that the pool doesn't5079* exist.5080*/5081int5082zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,5083boolean_t *missing, boolean_t *waited)5084{5085int error = lzc_wait(zhp->zpool_name, activity, waited);5086*missing = (error == ENOENT);5087if (*missing)5088return (0);50895090if (error != 0) {5091(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,5092dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),5093zhp->zpool_name);5094}50955096return (error);5097}50985099int5100zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)5101{5102int error = lzc_set_bootenv(zhp->zpool_name, envmap);5103if (error != 0) {5104(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,5105dgettext(TEXT_DOMAIN,5106"error setting bootenv in pool '%s'"), zhp->zpool_name);5107}51085109return (error);5110}51115112int5113zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)5114{5115nvlist_t *nvl;5116int error;51175118nvl = NULL;5119error = lzc_get_bootenv(zhp->zpool_name, &nvl);5120if (error != 0) {5121(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,5122dgettext(TEXT_DOMAIN,5123"error getting bootenv in pool '%s'"), zhp->zpool_name);5124} else {5125*nvlp = nvl;5126}51275128return (error);5129}51305131/*5132* Attempt to read and parse feature file(s) (from "compatibility" property).5133* Files contain zpool feature names, comma or whitespace-separated.5134* Comments (# character to next newline) are discarded.5135*5136* Arguments:5137* compatibility : string containing feature filenames5138* features : either NULL or pointer to array of boolean5139* report : either NULL or pointer to string buffer5140* rlen : length of "report" buffer5141*5142* compatibility is NULL (unset), "", "off", "legacy", or list of5143* comma-separated filenames. filenames should either be absolute,5144* or relative to:5145* 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or5146* 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).5147* (Unset), "" or "off" => enable all features5148* "legacy" => disable all features5149*5150* Any feature names read from files which match unames in spa_feature_table5151* will have the corresponding boolean set in the features array (if non-NULL).5152* If more than one feature set specified, only features present in *all* of5153* them will be set.5154*5155* "report" if not NULL will be populated with a suitable status message.5156*5157* Return values:5158* ZPOOL_COMPATIBILITY_OK : files read and parsed ok5159* ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file5160* ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name5161* ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name5162* ZPOOL_COMPATIBILITY_NOFILES : no feature files found5163*/5164zpool_compat_status_t5165zpool_load_compat(const char *compat, boolean_t *features, char *report,5166size_t rlen)5167{5168int sdirfd, ddirfd, featfd;5169struct stat fs;5170char *fc;5171char *ps, *ls, *ws;5172char *file, *line, *word;51735174char l_compat[ZFS_MAXPROPLEN];51755176boolean_t ret_nofiles = B_TRUE;5177boolean_t ret_badfile = B_FALSE;5178boolean_t ret_badtoken = B_FALSE;5179boolean_t ret_warntoken = B_FALSE;51805181/* special cases (unset), "" and "off" => enable all features */5182if (compat == NULL || compat[0] == '\0' ||5183strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {5184if (features != NULL) {5185for (uint_t i = 0; i < SPA_FEATURES; i++)5186features[i] = B_TRUE;5187}5188if (report != NULL)5189strlcpy(report, gettext("all features enabled"), rlen);5190return (ZPOOL_COMPATIBILITY_OK);5191}51925193/* Final special case "legacy" => disable all features */5194if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {5195if (features != NULL)5196for (uint_t i = 0; i < SPA_FEATURES; i++)5197features[i] = B_FALSE;5198if (report != NULL)5199strlcpy(report, gettext("all features disabled"), rlen);5200return (ZPOOL_COMPATIBILITY_OK);5201}52025203/*5204* Start with all true; will be ANDed with results from each file5205*/5206if (features != NULL)5207for (uint_t i = 0; i < SPA_FEATURES; i++)5208features[i] = B_TRUE;52095210char err_badfile[ZFS_MAXPROPLEN] = "";5211char err_badtoken[ZFS_MAXPROPLEN] = "";52125213/*5214* We ignore errors from the directory open()5215* as they're only needed if the filename is relative5216* which will be checked during the openat().5217*/52185219/* O_PATH safer than O_RDONLY if system allows it */5220#if defined(O_PATH)5221#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)5222#else5223#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)5224#endif52255226sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);5227ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);52285229(void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);52305231for (file = strtok_r(l_compat, ",", &ps);5232file != NULL;5233file = strtok_r(NULL, ",", &ps)) {52345235boolean_t l_features[SPA_FEATURES];52365237enum { Z_SYSCONF, Z_DATA } source;52385239/* try sysconfdir first, then datadir */5240source = Z_SYSCONF;5241if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {5242featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);5243source = Z_DATA;5244}52455246/* File readable and correct size? */5247if (featfd < 0 ||5248fstat(featfd, &fs) < 0 ||5249fs.st_size < 1 ||5250fs.st_size > ZPOOL_COMPAT_MAXSIZE) {5251(void) close(featfd);5252strlcat(err_badfile, file, ZFS_MAXPROPLEN);5253strlcat(err_badfile, " ", ZFS_MAXPROPLEN);5254ret_badfile = B_TRUE;5255continue;5256}52575258/* Prefault the file if system allows */5259#if defined(MAP_POPULATE)5260#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)5261#elif defined(MAP_PREFAULT_READ)5262#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)5263#else5264#define ZC_MMAP_FLAGS (MAP_PRIVATE)5265#endif52665267/* private mmap() so we can strtok safely */5268fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,5269ZC_MMAP_FLAGS, featfd, 0);5270(void) close(featfd);52715272/* map ok, and last character == newline? */5273if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {5274(void) munmap((void *) fc, fs.st_size);5275strlcat(err_badfile, file, ZFS_MAXPROPLEN);5276strlcat(err_badfile, " ", ZFS_MAXPROPLEN);5277ret_badfile = B_TRUE;5278continue;5279}52805281ret_nofiles = B_FALSE;52825283for (uint_t i = 0; i < SPA_FEATURES; i++)5284l_features[i] = B_FALSE;52855286/* replace final newline with NULL to ensure string ends */5287fc[fs.st_size - 1] = '\0';52885289for (line = strtok_r(fc, "\n", &ls);5290line != NULL;5291line = strtok_r(NULL, "\n", &ls)) {5292/* discard comments */5293char *r = strchr(line, '#');5294if (r != NULL)5295*r = '\0';52965297for (word = strtok_r(line, ", \t", &ws);5298word != NULL;5299word = strtok_r(NULL, ", \t", &ws)) {5300/* Find matching feature name */5301uint_t f;5302for (f = 0; f < SPA_FEATURES; f++) {5303zfeature_info_t *fi =5304&spa_feature_table[f];5305if (strcmp(word, fi->fi_uname) == 0) {5306l_features[f] = B_TRUE;5307break;5308}5309}5310if (f < SPA_FEATURES)5311continue;53125313/* found an unrecognized word */5314/* lightly sanitize it */5315if (strlen(word) > 32)5316word[32] = '\0';5317for (char *c = word; *c != '\0'; c++)5318if (!isprint(*c))5319*c = '?';53205321strlcat(err_badtoken, word, ZFS_MAXPROPLEN);5322strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);5323if (source == Z_SYSCONF)5324ret_badtoken = B_TRUE;5325else5326ret_warntoken = B_TRUE;5327}5328}5329(void) munmap((void *) fc, fs.st_size);53305331if (features != NULL)5332for (uint_t i = 0; i < SPA_FEATURES; i++)5333features[i] &= l_features[i];5334}5335(void) close(sdirfd);5336(void) close(ddirfd);53375338/* Return the most serious error */5339if (ret_badfile) {5340if (report != NULL)5341snprintf(report, rlen, gettext("could not read/"5342"parse feature file(s): %s"), err_badfile);5343return (ZPOOL_COMPATIBILITY_BADFILE);5344}5345if (ret_nofiles) {5346if (report != NULL)5347strlcpy(report,5348gettext("no valid compatibility files specified"),5349rlen);5350return (ZPOOL_COMPATIBILITY_NOFILES);5351}5352if (ret_badtoken) {5353if (report != NULL)5354snprintf(report, rlen, gettext("invalid feature "5355"name(s) in local compatibility files: %s"),5356err_badtoken);5357return (ZPOOL_COMPATIBILITY_BADTOKEN);5358}5359if (ret_warntoken) {5360if (report != NULL)5361snprintf(report, rlen, gettext("unrecognized feature "5362"name(s) in distribution compatibility files: %s"),5363err_badtoken);5364return (ZPOOL_COMPATIBILITY_WARNTOKEN);5365}5366if (report != NULL)5367strlcpy(report, gettext("compatibility set ok"), rlen);5368return (ZPOOL_COMPATIBILITY_OK);5369}53705371static int5372zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)5373{5374nvlist_t *tgt;5375boolean_t avail_spare, l2cache;53765377verify(zhp != NULL);5378if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {5379char errbuf[ERRBUFLEN];5380(void) snprintf(errbuf, sizeof (errbuf),5381dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));5382return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));5383}53845385if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,5386NULL)) == NULL) {5387char errbuf[ERRBUFLEN];5388(void) snprintf(errbuf, sizeof (errbuf),5389dgettext(TEXT_DOMAIN, "can not find %s in %s"),5390vdevname, zhp->zpool_name);5391return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));5392}53935394*vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);5395return (0);5396}53975398/*5399* Get a vdev property value for 'prop' and return the value in5400* a pre-allocated buffer.5401*/5402int5403zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,5404char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)5405{5406nvlist_t *nv;5407const char *strval;5408uint64_t intval;5409zprop_source_t src = ZPROP_SRC_NONE;54105411if (prop == VDEV_PROP_USERPROP) {5412/* user property, prop_name must contain the property name */5413assert(prop_name != NULL);5414if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {5415src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);5416strval = fnvlist_lookup_string(nv, ZPROP_VALUE);5417} else {5418/* user prop not found */5419src = ZPROP_SRC_DEFAULT;5420strval = "-";5421}5422(void) strlcpy(buf, strval, len);5423if (srctype)5424*srctype = src;5425return (0);5426}54275428if (prop_name == NULL)5429prop_name = (char *)vdev_prop_to_name(prop);54305431switch (vdev_prop_get_type(prop)) {5432case PROP_TYPE_STRING:5433if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {5434src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);5435strval = fnvlist_lookup_string(nv, ZPROP_VALUE);5436} else {5437src = ZPROP_SRC_DEFAULT;5438if ((strval = vdev_prop_default_string(prop)) == NULL)5439strval = "-";5440}5441(void) strlcpy(buf, strval, len);5442break;54435444case PROP_TYPE_NUMBER:5445if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {5446src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);5447intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);5448} else {5449src = ZPROP_SRC_DEFAULT;5450intval = vdev_prop_default_numeric(prop);5451}54525453switch (prop) {5454case VDEV_PROP_ASIZE:5455case VDEV_PROP_PSIZE:5456case VDEV_PROP_SIZE:5457case VDEV_PROP_BOOTSIZE:5458case VDEV_PROP_ALLOCATED:5459case VDEV_PROP_FREE:5460case VDEV_PROP_READ_ERRORS:5461case VDEV_PROP_WRITE_ERRORS:5462case VDEV_PROP_CHECKSUM_ERRORS:5463case VDEV_PROP_INITIALIZE_ERRORS:5464case VDEV_PROP_TRIM_ERRORS:5465case VDEV_PROP_SLOW_IOS:5466case VDEV_PROP_OPS_NULL:5467case VDEV_PROP_OPS_READ:5468case VDEV_PROP_OPS_WRITE:5469case VDEV_PROP_OPS_FREE:5470case VDEV_PROP_OPS_CLAIM:5471case VDEV_PROP_OPS_TRIM:5472case VDEV_PROP_BYTES_NULL:5473case VDEV_PROP_BYTES_READ:5474case VDEV_PROP_BYTES_WRITE:5475case VDEV_PROP_BYTES_FREE:5476case VDEV_PROP_BYTES_CLAIM:5477case VDEV_PROP_BYTES_TRIM:5478if (literal) {5479(void) snprintf(buf, len, "%llu",5480(u_longlong_t)intval);5481} else {5482(void) zfs_nicenum(intval, buf, len);5483}5484break;5485case VDEV_PROP_EXPANDSZ:5486if (intval == 0) {5487(void) strlcpy(buf, "-", len);5488} else if (literal) {5489(void) snprintf(buf, len, "%llu",5490(u_longlong_t)intval);5491} else {5492(void) zfs_nicenum(intval, buf, len);5493}5494break;5495case VDEV_PROP_CAPACITY:5496if (literal) {5497(void) snprintf(buf, len, "%llu",5498(u_longlong_t)intval);5499} else {5500(void) snprintf(buf, len, "%llu%%",5501(u_longlong_t)intval);5502}5503break;5504case VDEV_PROP_CHECKSUM_N:5505case VDEV_PROP_CHECKSUM_T:5506case VDEV_PROP_IO_N:5507case VDEV_PROP_IO_T:5508case VDEV_PROP_SLOW_IO_N:5509case VDEV_PROP_SLOW_IO_T:5510if (intval == UINT64_MAX) {5511(void) strlcpy(buf, "-", len);5512} else {5513(void) snprintf(buf, len, "%llu",5514(u_longlong_t)intval);5515}5516break;5517case VDEV_PROP_FRAGMENTATION:5518if (intval == UINT64_MAX) {5519(void) strlcpy(buf, "-", len);5520} else {5521(void) snprintf(buf, len, "%llu%%",5522(u_longlong_t)intval);5523}5524break;5525case VDEV_PROP_STATE:5526if (literal) {5527(void) snprintf(buf, len, "%llu",5528(u_longlong_t)intval);5529} else {5530(void) strlcpy(buf, zpool_state_to_name(intval,5531VDEV_AUX_NONE), len);5532}5533break;5534default:5535(void) snprintf(buf, len, "%llu",5536(u_longlong_t)intval);5537}5538break;55395540case PROP_TYPE_INDEX:5541if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {5542src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);5543intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);5544} else {5545/* 'trim_support' only valid for leaf vdevs */5546if (prop == VDEV_PROP_TRIM_SUPPORT) {5547(void) strlcpy(buf, "-", len);5548break;5549}5550src = ZPROP_SRC_DEFAULT;5551intval = vdev_prop_default_numeric(prop);5552/* Only use if provided by the RAIDZ VDEV above */5553if (prop == VDEV_PROP_RAIDZ_EXPANDING)5554return (ENOENT);5555if (prop == VDEV_PROP_SIT_OUT)5556return (ENOENT);5557}5558if (vdev_prop_index_to_string(prop, intval,5559(const char **)&strval) != 0)5560return (-1);5561(void) strlcpy(buf, strval, len);5562break;55635564default:5565abort();5566}55675568if (srctype)5569*srctype = src;55705571return (0);5572}55735574/*5575* Get a vdev property value for 'prop_name' and return the value in5576* a pre-allocated buffer.5577*/5578int5579zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,5580char *prop_name, char *buf, size_t len, zprop_source_t *srctype,5581boolean_t literal)5582{5583nvlist_t *reqnvl, *reqprops;5584nvlist_t *retprops = NULL;5585uint64_t vdev_guid = 0;5586int ret;55875588if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)5589return (ret);55905591if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)5592return (no_memory(zhp->zpool_hdl));5593if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)5594return (no_memory(zhp->zpool_hdl));55955596fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);55975598if (prop != VDEV_PROP_USERPROP) {5599/* prop_name overrides prop value */5600if (prop_name != NULL)5601prop = vdev_name_to_prop(prop_name);5602else5603prop_name = (char *)vdev_prop_to_name(prop);5604assert(prop < VDEV_NUM_PROPS);5605}56065607assert(prop_name != NULL);5608if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {5609nvlist_free(reqnvl);5610nvlist_free(reqprops);5611return (no_memory(zhp->zpool_hdl));5612}56135614fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);56155616ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);56175618if (ret == 0) {5619ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,5620len, srctype, literal);5621} else {5622char errbuf[ERRBUFLEN];5623(void) snprintf(errbuf, sizeof (errbuf),5624dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"5625" %s in %s"), prop_name, vdevname, zhp->zpool_name);5626(void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);5627}56285629nvlist_free(reqnvl);5630nvlist_free(reqprops);5631nvlist_free(retprops);56325633return (ret);5634}56355636/*5637* Get all vdev properties5638*/5639int5640zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,5641nvlist_t **outnvl)5642{5643nvlist_t *nvl = NULL;5644uint64_t vdev_guid = 0;5645int ret;56465647if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)5648return (ret);56495650if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)5651return (no_memory(zhp->zpool_hdl));56525653fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);56545655ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);56565657nvlist_free(nvl);56585659if (ret) {5660char errbuf[ERRBUFLEN];5661(void) snprintf(errbuf, sizeof (errbuf),5662dgettext(TEXT_DOMAIN, "cannot get vdev properties for"5663" %s in %s"), vdevname, zhp->zpool_name);5664(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);5665}56665667return (ret);5668}56695670/*5671* Set vdev property5672*/5673int5674zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,5675const char *propname, const char *propval)5676{5677int ret;5678nvlist_t *nvl = NULL;5679nvlist_t *outnvl = NULL;5680nvlist_t *props;5681nvlist_t *realprops;5682prop_flags_t flags = { 0 };5683uint64_t version;5684uint64_t vdev_guid;56855686if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)5687return (ret);56885689if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)5690return (no_memory(zhp->zpool_hdl));5691if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)5692return (no_memory(zhp->zpool_hdl));56935694fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);56955696if (nvlist_add_string(props, propname, propval) != 0) {5697nvlist_free(props);5698return (no_memory(zhp->zpool_hdl));5699}57005701char errbuf[ERRBUFLEN];5702(void) snprintf(errbuf, sizeof (errbuf),5703dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),5704propname, vdevname, zhp->zpool_name);57055706flags.vdevprop = 1;5707version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);5708if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,5709zhp->zpool_name, props, version, flags, errbuf)) == NULL) {5710nvlist_free(props);5711nvlist_free(nvl);5712return (-1);5713}57145715nvlist_free(props);5716props = realprops;57175718fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);57195720ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);57215722nvlist_free(props);5723nvlist_free(nvl);5724nvlist_free(outnvl);57255726if (ret) {5727if (errno == ENOTSUP) {5728zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,5729"property not supported for this vdev"));5730(void) zfs_error(zhp->zpool_hdl, EZFS_PROPTYPE, errbuf);5731} else {5732(void) zpool_standard_error(zhp->zpool_hdl, errno,5733errbuf);5734}5735}57365737return (ret);5738}57395740/*5741* Prune older entries from the DDT to reclaim space under the quota5742*/5743int5744zpool_ddt_prune(zpool_handle_t *zhp, zpool_ddt_prune_unit_t unit,5745uint64_t amount)5746{5747int error = lzc_ddt_prune(zhp->zpool_name, unit, amount);5748if (error != 0) {5749libzfs_handle_t *hdl = zhp->zpool_hdl;5750char errbuf[ERRBUFLEN];57515752(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,5753"cannot prune dedup table on '%s'"), zhp->zpool_name);57545755if (error == EALREADY) {5756zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,5757"a prune operation is already in progress"));5758(void) zfs_error(hdl, EZFS_BUSY, errbuf);5759} else {5760(void) zpool_standard_error(hdl, errno, errbuf);5761}5762return (-1);5763}57645765return (0);5766}576757685769