Path: blob/main/sys/contrib/openzfs/cmd/zpool/zpool_main.c
108453 views
// SPDX-License-Identifier: CDDL-1.01/*2* CDDL HEADER START3*4* The contents of this file are subject to the terms of the5* Common Development and Distribution License (the "License").6* You may not use this file except in compliance with the License.7*8* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE9* or https://opensource.org/licenses/CDDL-1.0.10* See the License for the specific language governing permissions11* and limitations under the License.12*13* When distributing Covered Code, include this CDDL HEADER in each14* file and include the License file at usr/src/OPENSOLARIS.LICENSE.15* If applicable, add the following below this CDDL HEADER, with the16* fields enclosed by brackets "[]" replaced with your own identifying17* information: Portions Copyright [yyyy] [name of copyright owner]18*19* CDDL HEADER END20*/2122/*23* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.24* Copyright 2011 Nexenta Systems, Inc. All rights reserved.25* Copyright (c) 2011, 2024 by Delphix. All rights reserved.26* Copyright (c) 2012 by Frederik Wessels. All rights reserved.27* Copyright (c) 2012 by Cyril Plisko. All rights reserved.28* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.29* Copyright 2016 Igor Kozhukhov <[email protected]>.30* Copyright (c) 2017 Datto Inc.31* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.32* Copyright (c) 2017, Intel Corporation.33* Copyright (c) 2019, loli10K <[email protected]>34* Copyright (c) 2021, Colm Buckley <[email protected]>35* Copyright (c) 2021, 2023, 2025, Klara, Inc.36* Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.37*/3839#include <assert.h>40#include <ctype.h>41#include <dirent.h>42#include <errno.h>43#include <fcntl.h>44#include <getopt.h>45#include <inttypes.h>46#include <libgen.h>47#include <libintl.h>48#include <locale.h>49#include <pthread.h>50#include <stdio.h>51#include <stdlib.h>52#include <string.h>53#include <termios.h>54#include <time.h>55#include <unistd.h>56#include <pwd.h>57#include <zone.h>58#include <sys/wait.h>59#include <zfs_prop.h>60#include <sys/fs/zfs.h>61#include <sys/stat.h>62#include <sys/systeminfo.h>63#include <sys/fm/fs/zfs.h>64#include <sys/fm/util.h>65#include <sys/fm/protocol.h>66#include <sys/zfs_ioctl.h>67#include <sys/mount.h>68#include <sys/sysmacros.h>69#include <string.h>70#include <math.h>7172#include <libzfs.h>73#include <libzutil.h>7475#include "zpool_util.h"76#include "zfs_comutil.h"77#include "zfeature_common.h"78#include "zfs_valstr.h"7980#include "statcommon.h"8182libzfs_handle_t *g_zfs;8384static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */8586static int zpool_do_create(int, char **);87static int zpool_do_destroy(int, char **);8889static int zpool_do_add(int, char **);90static int zpool_do_remove(int, char **);91static int zpool_do_labelclear(int, char **);9293static int zpool_do_checkpoint(int, char **);94static int zpool_do_prefetch(int, char **);9596static int zpool_do_list(int, char **);97static int zpool_do_iostat(int, char **);98static int zpool_do_status(int, char **);99100static int zpool_do_online(int, char **);101static int zpool_do_offline(int, char **);102static int zpool_do_clear(int, char **);103static int zpool_do_reopen(int, char **);104105static int zpool_do_reguid(int, char **);106107static int zpool_do_attach(int, char **);108static int zpool_do_detach(int, char **);109static int zpool_do_replace(int, char **);110static int zpool_do_split(int, char **);111112static int zpool_do_initialize(int, char **);113static int zpool_do_scrub(int, char **);114static int zpool_do_resilver(int, char **);115static int zpool_do_trim(int, char **);116117static int zpool_do_import(int, char **);118static int zpool_do_export(int, char **);119120static int zpool_do_upgrade(int, char **);121122static int zpool_do_history(int, char **);123static int zpool_do_events(int, char **);124125static int zpool_do_get(int, char **);126static int zpool_do_set(int, char **);127128static int zpool_do_sync(int, char **);129130static int zpool_do_version(int, char **);131132static int zpool_do_wait(int, char **);133134static int zpool_do_ddt_prune(int, char **);135136static int zpool_do_help(int argc, char **argv);137138static zpool_compat_status_t zpool_do_load_compat(139const char *, boolean_t *);140141enum zpool_options {142ZPOOL_OPTION_POWER = 1024,143ZPOOL_OPTION_ALLOW_INUSE,144ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,145ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,146ZPOOL_OPTION_POOL_KEY_GUID,147ZPOOL_OPTION_JSON_NUMS_AS_INT,148ZPOOL_OPTION_JSON_FLAT_VDEVS149};150151/*152* These libumem hooks provide a reasonable set of defaults for the allocator's153* debugging facilities.154*/155156#ifdef DEBUG157const char *158_umem_debug_init(void)159{160return ("default,verbose"); /* $UMEM_DEBUG setting */161}162163const char *164_umem_logging_init(void)165{166return ("fail,contents"); /* $UMEM_LOGGING setting */167}168#endif169170typedef enum {171HELP_ADD,172HELP_ATTACH,173HELP_CLEAR,174HELP_CREATE,175HELP_CHECKPOINT,176HELP_DDT_PRUNE,177HELP_DESTROY,178HELP_DETACH,179HELP_EXPORT,180HELP_HISTORY,181HELP_IMPORT,182HELP_IOSTAT,183HELP_LABELCLEAR,184HELP_LIST,185HELP_OFFLINE,186HELP_ONLINE,187HELP_PREFETCH,188HELP_REPLACE,189HELP_REMOVE,190HELP_INITIALIZE,191HELP_SCRUB,192HELP_RESILVER,193HELP_TRIM,194HELP_STATUS,195HELP_UPGRADE,196HELP_EVENTS,197HELP_GET,198HELP_SET,199HELP_SPLIT,200HELP_SYNC,201HELP_REGUID,202HELP_REOPEN,203HELP_VERSION,204HELP_WAIT205} zpool_help_t;206207208/*209* Flags for stats to display with "zpool iostats"210*/211enum iostat_type {212IOS_DEFAULT = 0,213IOS_LATENCY = 1,214IOS_QUEUES = 2,215IOS_L_HISTO = 3,216IOS_RQ_HISTO = 4,217IOS_COUNT, /* always last element */218};219220/* iostat_type entries as bitmasks */221#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)222#define IOS_LATENCY_M (1ULL << IOS_LATENCY)223#define IOS_QUEUES_M (1ULL << IOS_QUEUES)224#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)225#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)226227/* Mask of all the histo bits */228#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)229230/*231* Lookup table for iostat flags to nvlist names. Basically a list232* of all the nvlists a flag requires. Also specifies the order in233* which data gets printed in zpool iostat.234*/235static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {236[IOS_L_HISTO] = {237ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,238ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,239ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,240ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,241ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,242ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,243ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,244ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,245ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,246ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,247ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,248NULL},249[IOS_LATENCY] = {250ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,251ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,252ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,253ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,254ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,255ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,256NULL},257[IOS_QUEUES] = {258ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,259ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,260ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,261ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,262ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,263ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,264ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,265NULL},266[IOS_RQ_HISTO] = {267ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,268ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,269ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,270ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,271ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,272ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,273ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,274ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,275ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,276ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,277ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,278ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,279ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,280ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,281NULL},282};283284static const char *pool_scan_func_str[] = {285"NONE",286"SCRUB",287"RESILVER",288"ERRORSCRUB"289};290291static const char *pool_scan_state_str[] = {292"NONE",293"SCANNING",294"FINISHED",295"CANCELED",296"ERRORSCRUBBING"297};298299static const char *vdev_rebuild_state_str[] = {300"NONE",301"ACTIVE",302"CANCELED",303"COMPLETE"304};305306static const char *checkpoint_state_str[] = {307"NONE",308"EXISTS",309"DISCARDING"310};311312static const char *vdev_state_str[] = {313"UNKNOWN",314"CLOSED",315"OFFLINE",316"REMOVED",317"CANT_OPEN",318"FAULTED",319"DEGRADED",320"ONLINE"321};322323static const char *vdev_aux_str[] = {324"NONE",325"OPEN_FAILED",326"CORRUPT_DATA",327"NO_REPLICAS",328"BAD_GUID_SUM",329"TOO_SMALL",330"BAD_LABEL",331"VERSION_NEWER",332"VERSION_OLDER",333"UNSUP_FEAT",334"SPARED",335"ERR_EXCEEDED",336"IO_FAILURE",337"BAD_LOG",338"EXTERNAL",339"SPLIT_POOL",340"BAD_ASHIFT",341"EXTERNAL_PERSIST",342"ACTIVE",343"CHILDREN_OFFLINE",344"ASHIFT_TOO_BIG"345};346347static const char *vdev_init_state_str[] = {348"NONE",349"ACTIVE",350"CANCELED",351"SUSPENDED",352"COMPLETE"353};354355static const char *vdev_trim_state_str[] = {356"NONE",357"ACTIVE",358"CANCELED",359"SUSPENDED",360"COMPLETE"361};362363#define ZFS_NICE_TIMESTAMP 100364365/*366* Given a cb->cb_flags with a histogram bit set, return the iostat_type.367* Right now, only one histo bit is ever set at one time, so we can368* just do a highbit64(a)369*/370#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)371372typedef struct zpool_command {373const char *name;374int (*func)(int, char **);375zpool_help_t usage;376} zpool_command_t;377378/*379* Master command table. Each ZFS command has a name, associated function, and380* usage message. The usage messages need to be internationalized, so we have381* to have a function to return the usage message based on a command index.382*383* These commands are organized according to how they are displayed in the usage384* message. An empty command (one with a NULL name) indicates an empty line in385* the generic usage message.386*/387static zpool_command_t command_table[] = {388{ "version", zpool_do_version, HELP_VERSION },389{ NULL },390{ "create", zpool_do_create, HELP_CREATE },391{ "destroy", zpool_do_destroy, HELP_DESTROY },392{ NULL },393{ "add", zpool_do_add, HELP_ADD },394{ "remove", zpool_do_remove, HELP_REMOVE },395{ NULL },396{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },397{ NULL },398{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },399{ "prefetch", zpool_do_prefetch, HELP_PREFETCH },400{ NULL },401{ "list", zpool_do_list, HELP_LIST },402{ "iostat", zpool_do_iostat, HELP_IOSTAT },403{ "status", zpool_do_status, HELP_STATUS },404{ NULL },405{ "online", zpool_do_online, HELP_ONLINE },406{ "offline", zpool_do_offline, HELP_OFFLINE },407{ "clear", zpool_do_clear, HELP_CLEAR },408{ "reopen", zpool_do_reopen, HELP_REOPEN },409{ NULL },410{ "attach", zpool_do_attach, HELP_ATTACH },411{ "detach", zpool_do_detach, HELP_DETACH },412{ "replace", zpool_do_replace, HELP_REPLACE },413{ "split", zpool_do_split, HELP_SPLIT },414{ NULL },415{ "initialize", zpool_do_initialize, HELP_INITIALIZE },416{ "resilver", zpool_do_resilver, HELP_RESILVER },417{ "scrub", zpool_do_scrub, HELP_SCRUB },418{ "trim", zpool_do_trim, HELP_TRIM },419{ NULL },420{ "import", zpool_do_import, HELP_IMPORT },421{ "export", zpool_do_export, HELP_EXPORT },422{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },423{ "reguid", zpool_do_reguid, HELP_REGUID },424{ NULL },425{ "history", zpool_do_history, HELP_HISTORY },426{ "events", zpool_do_events, HELP_EVENTS },427{ NULL },428{ "get", zpool_do_get, HELP_GET },429{ "set", zpool_do_set, HELP_SET },430{ "sync", zpool_do_sync, HELP_SYNC },431{ NULL },432{ "wait", zpool_do_wait, HELP_WAIT },433{ NULL },434{ "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },435};436437#define NCOMMAND (ARRAY_SIZE(command_table))438439#define VDEV_ALLOC_CLASS_LOGS "logs"440441#define MAX_CMD_LEN 256442443static zpool_command_t *current_command;444static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);445static char history_str[HIS_MAX_RECORD_LEN];446static boolean_t log_history = B_TRUE;447static uint_t timestamp_fmt = NODATE;448449static const char *450get_usage(zpool_help_t idx)451{452switch (idx) {453case HELP_ADD:454return (gettext("\tadd [-afgLnP] [-o property=value] "455"<pool> <vdev> ...\n"));456case HELP_ATTACH:457return (gettext("\tattach [-fsw] [-o property=value] "458"<pool> <vdev> <new-device>\n"));459case HELP_CLEAR:460return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));461case HELP_CREATE:462return (gettext("\tcreate [-fnd] [-o property=value] ... \n"463"\t [-O file-system-property=value] ... \n"464"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));465case HELP_CHECKPOINT:466return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));467case HELP_DESTROY:468return (gettext("\tdestroy [-f] <pool>\n"));469case HELP_DETACH:470return (gettext("\tdetach <pool> <device>\n"));471case HELP_EXPORT:472return (gettext("\texport [-af] <pool> ...\n"));473case HELP_HISTORY:474return (gettext("\thistory [-il] [<pool>] ...\n"));475case HELP_IMPORT:476return (gettext("\timport [-d dir] [-D]\n"477"\timport [-o mntopts] [-o property=value] ... \n"478"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "479"[-R root] [-F [-n]] -a\n"480"\timport [-o mntopts] [-o property=value] ... \n"481"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "482"[-R root] [-F [-n]]\n"483"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));484case HELP_IOSTAT:485return (gettext("\tiostat [[[-c [script1,script2,...]"486"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"487"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"488" [[-n] interval [count]]\n"));489case HELP_LABELCLEAR:490return (gettext("\tlabelclear [-f] <vdev>\n"));491case HELP_LIST:492return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "493"[--json-int, --json-pool-key-guid]] ...\n"494"\t [-T d|u] [pool] [interval [count]]\n"));495case HELP_PREFETCH:496return (gettext("\tprefetch [-t <type>] <pool>\n"));497case HELP_OFFLINE:498return (gettext("\toffline [--power]|[[-f][-t]] <pool> "499"<device> ...\n"));500case HELP_ONLINE:501return (gettext("\tonline [--power][-e] <pool> <device> "502"...\n"));503case HELP_REPLACE:504return (gettext("\treplace [-fsw] [-o property=value] "505"<pool> <device> [new-device]\n"));506case HELP_REMOVE:507return (gettext("\tremove [-npsw] <pool> <device> ...\n"));508case HELP_REOPEN:509return (gettext("\treopen [-n] <pool>\n"));510case HELP_INITIALIZE:511return (gettext("\tinitialize [-c | -s | -u] [-w] <-a | <pool> "512"[<device> ...]>\n"));513case HELP_SCRUB:514return (gettext("\tscrub [-e | -s | -p | -C | -E | -S] [-w] "515"<-a | <pool> [<pool> ...]>\n"));516case HELP_RESILVER:517return (gettext("\tresilver <pool> ...\n"));518case HELP_TRIM:519return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] "520"<-a | <pool> [<device> ...]>\n"));521case HELP_STATUS:522return (gettext("\tstatus [-DdegiLPpstvx] "523"[-c script1[,script2,...]] ...\n"524"\t [-j|--json [--json-flat-vdevs] [--json-int] "525"[--json-pool-key-guid]] ...\n"526"\t [-T d|u] [--power] [pool] [interval [count]]\n"));527case HELP_UPGRADE:528return (gettext("\tupgrade\n"529"\tupgrade -v\n"530"\tupgrade [-V version] <-a | pool ...>\n"));531case HELP_EVENTS:532return (gettext("\tevents [-vHf [pool] | -c]\n"));533case HELP_GET:534return (gettext("\tget [-Hp] [-j [--json-int, "535"--json-pool-key-guid]] ...\n"536"\t [-o \"all\" | field[,...]] "537"<\"all\" | property[,...]> <pool> ...\n"));538case HELP_SET:539return (gettext("\tset <property=value> <pool>\n"540"\tset <vdev_property=value> <pool> <vdev>\n"));541case HELP_SPLIT:542return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"543"\t [-o property=value] <pool> <newpool> "544"[<device> ...]\n"));545case HELP_REGUID:546return (gettext("\treguid [-g guid] <pool>\n"));547case HELP_SYNC:548return (gettext("\tsync [pool] ...\n"));549case HELP_VERSION:550return (gettext("\tversion [-j]\n"));551case HELP_WAIT:552return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "553"<pool> [interval]\n"));554case HELP_DDT_PRUNE:555return (gettext("\tddtprune -d|-p <amount> <pool>\n"));556default:557__builtin_unreachable();558}559}560561/*562* Callback routine that will print out a pool property value.563*/564static int565print_pool_prop_cb(int prop, void *cb)566{567FILE *fp = cb;568569(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));570571if (zpool_prop_readonly(prop))572(void) fprintf(fp, " NO ");573else574(void) fprintf(fp, " YES ");575576if (zpool_prop_values(prop) == NULL)577(void) fprintf(fp, "-\n");578else579(void) fprintf(fp, "%s\n", zpool_prop_values(prop));580581return (ZPROP_CONT);582}583584/*585* Callback routine that will print out a vdev property value.586*/587static int588print_vdev_prop_cb(int prop, void *cb)589{590FILE *fp = cb;591592(void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));593594if (vdev_prop_readonly(prop))595(void) fprintf(fp, " NO ");596else597(void) fprintf(fp, " YES ");598599if (vdev_prop_values(prop) == NULL)600(void) fprintf(fp, "-\n");601else602(void) fprintf(fp, "%s\n", vdev_prop_values(prop));603604return (ZPROP_CONT);605}606607/*608* Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like609* '/dev/disk/by-vdev/L5'.610*/611static const char *612vdev_name_to_path(zpool_handle_t *zhp, char *vdev)613{614nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);615if (vdev_nv == NULL) {616return (NULL);617}618return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));619}620621static int622zpool_power_on(zpool_handle_t *zhp, char *vdev)623{624return (zpool_power(zhp, vdev, B_TRUE));625}626627static int628zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)629{630int rc;631632rc = zpool_power_on(zhp, vdev);633if (rc != 0)634return (rc);635636(void) zpool_disk_wait(vdev_name_to_path(zhp, vdev));637638return (0);639}640641static int642zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)643{644nvlist_t *nv;645const char *path = NULL;646int rc;647648/* Power up all the devices first */649FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {650path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);651if (path != NULL) {652rc = zpool_power_on(zhp, (char *)path);653if (rc != 0) {654return (rc);655}656}657}658659/*660* Wait for their devices to show up. Since we powered them on661* at roughly the same time, they should all come online around662* the same time.663*/664FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {665path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);666(void) zpool_disk_wait(path);667}668669return (0);670}671672static int673zpool_power_off(zpool_handle_t *zhp, char *vdev)674{675return (zpool_power(zhp, vdev, B_FALSE));676}677678/*679* Display usage message. If we're inside a command, display only the usage for680* that command. Otherwise, iterate over the entire command table and display681* a complete usage message.682*/683static __attribute__((noreturn)) void684usage(boolean_t requested)685{686FILE *fp = requested ? stdout : stderr;687688if (current_command == NULL) {689int i;690691(void) fprintf(fp, gettext("usage: zpool command args ...\n"));692(void) fprintf(fp,693gettext("where 'command' is one of the following:\n\n"));694695for (i = 0; i < NCOMMAND; i++) {696if (command_table[i].name == NULL)697(void) fprintf(fp, "\n");698else699(void) fprintf(fp, "%s",700get_usage(command_table[i].usage));701}702703(void) fprintf(fp,704gettext("\nFor further help on a command or topic, "705"run: %s\n"), "zpool help [<topic>]");706} else {707(void) fprintf(fp, gettext("usage:\n"));708(void) fprintf(fp, "%s", get_usage(current_command->usage));709}710711if (current_command != NULL &&712current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&713((strcmp(current_command->name, "set") == 0) ||714(strcmp(current_command->name, "get") == 0) ||715(strcmp(current_command->name, "list") == 0))) {716717(void) fprintf(fp, "%s",718gettext("\nthe following properties are supported:\n"));719720(void) fprintf(fp, "\n\t%-19s %s %s\n\n",721"PROPERTY", "EDIT", "VALUES");722723/* Iterate over all properties */724if (current_prop_type == ZFS_TYPE_POOL) {725(void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,726B_TRUE, current_prop_type);727728(void) fprintf(fp, "\t%-19s ", "feature@...");729(void) fprintf(fp, "YES "730"disabled | enabled | active\n");731732(void) fprintf(fp, gettext("\nThe feature@ properties "733"must be appended with a feature name.\n"734"See zpool-features(7).\n"));735} else if (current_prop_type == ZFS_TYPE_VDEV) {736(void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,737B_TRUE, current_prop_type);738}739}740741/*742* See comments at end of main().743*/744if (getenv("ZFS_ABORT") != NULL) {745(void) printf("dumping core by request\n");746abort();747}748749exit(requested ? 0 : 2);750}751752/*753* zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...]754* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool755* if none specified.756*757* -a Use all pools.758* -c Cancel. Ends active initializing.759* -s Suspend. Initializing can then be restarted with no flags.760* -u Uninitialize. Clears initialization state.761* -w Wait. Blocks until initializing has completed.762*/763int764zpool_do_initialize(int argc, char **argv)765{766int c;767char *poolname;768zpool_handle_t *zhp;769int err = 0;770boolean_t wait = B_FALSE;771boolean_t initialize_all = B_FALSE;772773struct option long_options[] = {774{"cancel", no_argument, NULL, 'c'},775{"suspend", no_argument, NULL, 's'},776{"uninit", no_argument, NULL, 'u'},777{"wait", no_argument, NULL, 'w'},778{"all", no_argument, NULL, 'a'},779{0, 0, 0, 0}780};781782pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;783while ((c = getopt_long(argc, argv, "acsuw", long_options,784NULL)) != -1) {785switch (c) {786case 'a':787initialize_all = B_TRUE;788break;789case 'c':790if (cmd_type != POOL_INITIALIZE_START &&791cmd_type != POOL_INITIALIZE_CANCEL) {792(void) fprintf(stderr, gettext("-c cannot be "793"combined with other options\n"));794usage(B_FALSE);795}796cmd_type = POOL_INITIALIZE_CANCEL;797break;798case 's':799if (cmd_type != POOL_INITIALIZE_START &&800cmd_type != POOL_INITIALIZE_SUSPEND) {801(void) fprintf(stderr, gettext("-s cannot be "802"combined with other options\n"));803usage(B_FALSE);804}805cmd_type = POOL_INITIALIZE_SUSPEND;806break;807case 'u':808if (cmd_type != POOL_INITIALIZE_START &&809cmd_type != POOL_INITIALIZE_UNINIT) {810(void) fprintf(stderr, gettext("-u cannot be "811"combined with other options\n"));812usage(B_FALSE);813}814cmd_type = POOL_INITIALIZE_UNINIT;815break;816case 'w':817wait = B_TRUE;818break;819case '?':820if (optopt != 0) {821(void) fprintf(stderr,822gettext("invalid option '%c'\n"), optopt);823} else {824(void) fprintf(stderr,825gettext("invalid option '%s'\n"),826argv[optind - 1]);827}828usage(B_FALSE);829}830}831832argc -= optind;833argv += optind;834835initialize_cbdata_t cbdata = {836.wait = wait,837.cmd_type = cmd_type838};839840if (initialize_all && argc > 0) {841(void) fprintf(stderr, gettext("-a cannot be combined with "842"individual pools or vdevs\n"));843usage(B_FALSE);844}845846if (argc < 1 && !initialize_all) {847(void) fprintf(stderr, gettext("missing pool name argument\n"));848usage(B_FALSE);849}850851if (wait && (cmd_type != POOL_INITIALIZE_START)) {852(void) fprintf(stderr, gettext("-w cannot be used with -c, -s"853"or -u\n"));854usage(B_FALSE);855}856857if (argc == 0 && initialize_all) {858/* Initilize each pool recursively */859err = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,860B_FALSE, zpool_initialize_one, &cbdata);861return (err);862} else if (argc == 1) {863/* no individual leaf vdevs specified, initialize the pool */864poolname = argv[0];865zhp = zpool_open(g_zfs, poolname);866if (zhp == NULL)867return (-1);868err = zpool_initialize_one(zhp, &cbdata);869} else {870/* individual leaf vdevs specified, initialize them */871poolname = argv[0];872zhp = zpool_open(g_zfs, poolname);873if (zhp == NULL)874return (-1);875nvlist_t *vdevs = fnvlist_alloc();876for (int i = 1; i < argc; i++) {877fnvlist_add_boolean(vdevs, argv[i]);878}879if (wait)880err = zpool_initialize_wait(zhp, cmd_type, vdevs);881else882err = zpool_initialize(zhp, cmd_type, vdevs);883fnvlist_free(vdevs);884}885886zpool_close(zhp);887888return (err);889}890891/*892* print a pool vdev config for dry runs893*/894static void895print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,896const char *match, int name_flags)897{898nvlist_t **child;899uint_t c, children;900char *vname;901boolean_t printed = B_FALSE;902903if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,904&child, &children) != 0) {905if (name != NULL)906(void) printf("\t%*s%s\n", indent, "", name);907return;908}909910for (c = 0; c < children; c++) {911uint64_t is_log = B_FALSE, is_hole = B_FALSE;912const char *class = "";913914(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,915&is_hole);916917if (is_hole == B_TRUE) {918continue;919}920921(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,922&is_log);923if (is_log)924class = VDEV_ALLOC_BIAS_LOG;925(void) nvlist_lookup_string(child[c],926ZPOOL_CONFIG_ALLOCATION_BIAS, &class);927if (strcmp(match, class) != 0)928continue;929930if (!printed && name != NULL) {931(void) printf("\t%*s%s\n", indent, "", name);932printed = B_TRUE;933}934vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);935print_vdev_tree(zhp, vname, child[c], indent + 2, "",936name_flags);937free(vname);938}939}940941/*942* Print the list of l2cache devices for dry runs.943*/944static void945print_cache_list(nvlist_t *nv, int indent)946{947nvlist_t **child;948uint_t c, children;949950if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,951&child, &children) == 0 && children > 0) {952(void) printf("\t%*s%s\n", indent, "", "cache");953} else {954return;955}956for (c = 0; c < children; c++) {957char *vname;958959vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);960(void) printf("\t%*s%s\n", indent + 2, "", vname);961free(vname);962}963}964965/*966* Print the list of spares for dry runs.967*/968static void969print_spare_list(nvlist_t *nv, int indent)970{971nvlist_t **child;972uint_t c, children;973974if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,975&child, &children) == 0 && children > 0) {976(void) printf("\t%*s%s\n", indent, "", "spares");977} else {978return;979}980for (c = 0; c < children; c++) {981char *vname;982983vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);984(void) printf("\t%*s%s\n", indent + 2, "", vname);985free(vname);986}987}988989typedef struct spare_cbdata {990uint64_t cb_guid;991zpool_handle_t *cb_zhp;992} spare_cbdata_t;993994static boolean_t995find_vdev(nvlist_t *nv, uint64_t search)996{997uint64_t guid;998nvlist_t **child;999uint_t c, children;10001001if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&1002search == guid)1003return (B_TRUE);10041005if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,1006&child, &children) == 0) {1007for (c = 0; c < children; c++)1008if (find_vdev(child[c], search))1009return (B_TRUE);1010}10111012return (B_FALSE);1013}10141015static int1016find_spare(zpool_handle_t *zhp, void *data)1017{1018spare_cbdata_t *cbp = data;1019nvlist_t *config, *nvroot;10201021config = zpool_get_config(zhp, NULL);1022verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,1023&nvroot) == 0);10241025if (find_vdev(nvroot, cbp->cb_guid)) {1026cbp->cb_zhp = zhp;1027return (1);1028}10291030zpool_close(zhp);1031return (0);1032}10331034static void1035nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,1036boolean_t literal, boolean_t as_int, int format)1037{1038char buf[256];10391040if (literal) {1041if (!as_int)1042(void) snprintf(buf, 256, "%llu", (u_longlong_t)value);1043} else {1044switch (format) {1045case ZFS_NICENUM_1024:1046zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);1047break;1048case ZFS_NICENUM_BYTES:1049zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);1050break;1051case ZFS_NICENUM_TIME:1052zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);1053break;1054case ZFS_NICE_TIMESTAMP:1055format_timestamp(value, buf, 256);1056break;1057default:1058fprintf(stderr, "Invalid number format");1059exit(1);1060}1061}1062if (as_int)1063fnvlist_add_uint64(item, key, value);1064else1065fnvlist_add_string(item, key, buf);1066}10671068/*1069* Generates an nvlist with output version for every command based on params.1070* Purpose of this is to add a version of JSON output, considering the schema1071* format might be updated for each command in future.1072*1073* Schema:1074*1075* "output_version": {1076* "command": string,1077* "vers_major": integer,1078* "vers_minor": integer,1079* }1080*/1081static nvlist_t *1082zpool_json_schema(int maj_v, int min_v)1083{1084char cmd[MAX_CMD_LEN];1085nvlist_t *sch = fnvlist_alloc();1086nvlist_t *ov = fnvlist_alloc();10871088(void) snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);1089fnvlist_add_string(ov, "command", cmd);1090fnvlist_add_uint32(ov, "vers_major", maj_v);1091fnvlist_add_uint32(ov, "vers_minor", min_v);1092fnvlist_add_nvlist(sch, "output_version", ov);1093fnvlist_free(ov);1094return (sch);1095}10961097static void1098fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,1099boolean_t as_int)1100{1101nvlist_t *config = zpool_get_config(zhp, NULL);1102uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);1103uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);11041105fnvlist_add_string(list, "name", zpool_get_name(zhp));1106if (addtype)1107fnvlist_add_string(list, "type", "POOL");1108fnvlist_add_string(list, "state", zpool_get_state_str(zhp));1109if (as_int) {1110if (guid)1111fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);1112if (txg)1113fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);1114fnvlist_add_uint64(list, "spa_version", SPA_VERSION);1115fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);1116} else {1117char value[ZFS_MAXPROPLEN];1118if (guid) {1119(void) snprintf(value, ZFS_MAXPROPLEN, "%llu",1120(u_longlong_t)guid);1121fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);1122}1123if (txg) {1124(void) snprintf(value, ZFS_MAXPROPLEN, "%llu",1125(u_longlong_t)txg);1126fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);1127}1128fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);1129fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);1130}1131}11321133static void1134used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)1135{1136spare_cbdata_t spare_cb;1137verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,1138&spare_cb.cb_guid) == 0);1139if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {1140if (strcmp(zpool_get_name(spare_cb.cb_zhp),1141zpool_get_name(zhp)) != 0) {1142fnvlist_add_string(list, "used_by",1143zpool_get_name(spare_cb.cb_zhp));1144}1145zpool_close(spare_cb.cb_zhp);1146}1147}11481149static void1150fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,1151boolean_t addtype, boolean_t as_int)1152{1153boolean_t l2c = B_FALSE;1154const char *path, *phys, *devid, *bias = NULL;1155uint64_t hole = 0, log = 0, spare = 0;1156vdev_stat_t *vs;1157uint_t c;1158nvlist_t *nvdev;1159nvlist_t *nvdev_parent = NULL;1160char *_name;11611162if (strcmp(name, zpool_get_name(zhp)) != 0)1163_name = name;1164else1165_name = (char *)"root-0";11661167nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);11681169fnvlist_add_string(list, "name", name);1170if (addtype)1171fnvlist_add_string(list, "type", "VDEV");1172if (nvdev) {1173const char *type = fnvlist_lookup_string(nvdev,1174ZPOOL_CONFIG_TYPE);1175if (type)1176fnvlist_add_string(list, "vdev_type", type);1177uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);1178if (guid) {1179if (as_int) {1180fnvlist_add_uint64(list, "guid", guid);1181} else {1182char buf[ZFS_MAXPROPLEN];1183(void) snprintf(buf, ZFS_MAXPROPLEN, "%llu",1184(u_longlong_t)guid);1185fnvlist_add_string(list, "guid", buf);1186}1187}1188if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)1189fnvlist_add_string(list, "path", path);1190if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,1191&phys) == 0)1192fnvlist_add_string(list, "phys_path", phys);1193if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,1194&devid) == 0)1195fnvlist_add_string(list, "devid", devid);1196(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);1197(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,1198&spare);1199(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);1200if (hole)1201fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);1202else if (l2c)1203fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);1204else if (spare)1205fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);1206else if (log)1207fnvlist_add_string(list, "class", VDEV_TYPE_LOG);1208else {1209(void) nvlist_lookup_string(nvdev,1210ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);1211if (bias != NULL)1212fnvlist_add_string(list, "class", bias);1213else {1214nvdev_parent = NULL;1215nvdev_parent = zpool_find_parent_vdev(zhp,1216_name, NULL, NULL, NULL);12171218/*1219* With a mirrored special device, the parent1220* "mirror" vdev will have1221* ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"1222* not the leaf vdevs. If we're a leaf vdev1223* in that case we need to look at our parent1224* to see if they're "special" to know if we1225* are "special" too.1226*/1227if (nvdev_parent) {1228(void) nvlist_lookup_string(1229nvdev_parent,1230ZPOOL_CONFIG_ALLOCATION_BIAS,1231&bias);1232}1233if (bias != NULL)1234fnvlist_add_string(list, "class", bias);1235else1236fnvlist_add_string(list, "class",1237"normal");1238}1239}1240if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,1241(uint64_t **)&vs, &c) == 0) {1242fnvlist_add_string(list, "state",1243vdev_state_str[vs->vs_state]);1244}1245}1246}12471248static boolean_t1249prop_list_contains_feature(nvlist_t *proplist)1250{1251nvpair_t *nvp;1252for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;1253nvp = nvlist_next_nvpair(proplist, nvp)) {1254if (zpool_prop_feature(nvpair_name(nvp)))1255return (B_TRUE);1256}1257return (B_FALSE);1258}12591260/*1261* Add a property pair (name, string-value) into a property nvlist.1262*/1263static int1264add_prop_list(const char *propname, const char *propval, nvlist_t **props,1265boolean_t poolprop)1266{1267zpool_prop_t prop = ZPOOL_PROP_INVAL;1268nvlist_t *proplist;1269const char *normnm;1270const char *strval;12711272if (*props == NULL &&1273nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {1274(void) fprintf(stderr,1275gettext("internal error: out of memory\n"));1276return (1);1277}12781279proplist = *props;12801281if (poolprop) {1282const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);1283const char *cname =1284zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);12851286if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&1287(!zpool_prop_feature(propname) &&1288!zpool_prop_vdev(propname))) {1289(void) fprintf(stderr, gettext("property '%s' is "1290"not a valid pool or vdev property\n"), propname);1291return (2);1292}12931294/*1295* feature@ properties and version should not be specified1296* at the same time.1297*/1298if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&1299nvlist_exists(proplist, vname)) ||1300(prop == ZPOOL_PROP_VERSION &&1301prop_list_contains_feature(proplist))) {1302(void) fprintf(stderr, gettext("'feature@' and "1303"'version' properties cannot be specified "1304"together\n"));1305return (2);1306}13071308/*1309* if version is specified, only "legacy" compatibility1310* may be requested1311*/1312if ((prop == ZPOOL_PROP_COMPATIBILITY &&1313strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&1314nvlist_exists(proplist, vname)) ||1315(prop == ZPOOL_PROP_VERSION &&1316nvlist_exists(proplist, cname) &&1317strcmp(fnvlist_lookup_string(proplist, cname),1318ZPOOL_COMPAT_LEGACY) != 0)) {1319(void) fprintf(stderr, gettext("when 'version' is "1320"specified, the 'compatibility' feature may only "1321"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));1322return (2);1323}13241325if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))1326normnm = propname;1327else1328normnm = zpool_prop_to_name(prop);1329} else {1330zfs_prop_t fsprop = zfs_name_to_prop(propname);13311332if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,1333B_FALSE)) {1334normnm = zfs_prop_to_name(fsprop);1335} else if (zfs_prop_user(propname) ||1336zfs_prop_userquota(propname)) {1337normnm = propname;1338} else {1339(void) fprintf(stderr, gettext("property '%s' is "1340"not a valid filesystem property\n"), propname);1341return (2);1342}1343}13441345if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&1346prop != ZPOOL_PROP_CACHEFILE) {1347(void) fprintf(stderr, gettext("property '%s' "1348"specified multiple times\n"), propname);1349return (2);1350}13511352if (nvlist_add_string(proplist, normnm, propval) != 0) {1353(void) fprintf(stderr, gettext("internal "1354"error: out of memory\n"));1355return (1);1356}13571358return (0);1359}13601361/*1362* Set a default property pair (name, string-value) in a property nvlist1363*/1364static int1365add_prop_list_default(const char *propname, const char *propval,1366nvlist_t **props)1367{1368const char *pval;13691370if (nvlist_lookup_string(*props, propname, &pval) == 0)1371return (0);13721373return (add_prop_list(propname, propval, props, B_TRUE));1374}13751376/*1377* zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...1378*1379* -a Disable the ashift validation checks1380* -f Force addition of devices, even if they appear in use1381* -g Display guid for individual vdev name.1382* -L Follow links when resolving vdev path name.1383* -n Do not add the devices, but display the resulting layout if1384* they were to be added.1385* -o Set property=value.1386* -P Display full path for vdev name.1387*1388* Adds the given vdevs to 'pool'. As with create, the bulk of this work is1389* handled by make_root_vdev(), which constructs the nvlist needed to pass to1390* libzfs.1391*/1392int1393zpool_do_add(int argc, char **argv)1394{1395boolean_t check_replication = B_TRUE;1396boolean_t check_inuse = B_TRUE;1397boolean_t dryrun = B_FALSE;1398boolean_t check_ashift = B_TRUE;1399boolean_t force = B_FALSE;1400int name_flags = 0;1401int c;1402nvlist_t *nvroot;1403char *poolname;1404int ret;1405zpool_handle_t *zhp;1406nvlist_t *config;1407nvlist_t *props = NULL;1408char *propval;14091410struct option long_options[] = {1411{"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},1412{"allow-replication-mismatch", no_argument, NULL,1413ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},1414{"allow-ashift-mismatch", no_argument, NULL,1415ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},1416{0, 0, 0, 0}1417};14181419/* check options */1420while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))1421!= -1) {1422switch (c) {1423case 'f':1424force = B_TRUE;1425break;1426case 'g':1427name_flags |= VDEV_NAME_GUID;1428break;1429case 'L':1430name_flags |= VDEV_NAME_FOLLOW_LINKS;1431break;1432case 'n':1433dryrun = B_TRUE;1434break;1435case 'o':1436if ((propval = strchr(optarg, '=')) == NULL) {1437(void) fprintf(stderr, gettext("missing "1438"'=' for -o option\n"));1439usage(B_FALSE);1440}1441*propval = '\0';1442propval++;14431444if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||1445(add_prop_list(optarg, propval, &props, B_TRUE)))1446usage(B_FALSE);1447break;1448case 'P':1449name_flags |= VDEV_NAME_PATH;1450break;1451case ZPOOL_OPTION_ALLOW_INUSE:1452check_inuse = B_FALSE;1453break;1454case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:1455check_replication = B_FALSE;1456break;1457case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:1458check_ashift = B_FALSE;1459break;1460case '?':1461(void) fprintf(stderr, gettext("invalid option '%c'\n"),1462optopt);1463usage(B_FALSE);1464}1465}14661467argc -= optind;1468argv += optind;14691470/* get pool name and check number of arguments */1471if (argc < 1) {1472(void) fprintf(stderr, gettext("missing pool name argument\n"));1473usage(B_FALSE);1474}1475if (argc < 2) {1476(void) fprintf(stderr, gettext("missing vdev specification\n"));1477usage(B_FALSE);1478}14791480if (force) {1481if (!check_inuse || !check_replication || !check_ashift) {1482(void) fprintf(stderr, gettext("'-f' option is not "1483"allowed with '--allow-replication-mismatch', "1484"'--allow-ashift-mismatch', or "1485"'--allow-in-use'\n"));1486usage(B_FALSE);1487}1488check_inuse = B_FALSE;1489check_replication = B_FALSE;1490check_ashift = B_FALSE;1491}14921493poolname = argv[0];14941495argc--;1496argv++;14971498if ((zhp = zpool_open(g_zfs, poolname)) == NULL)1499return (1);15001501if ((config = zpool_get_config(zhp, NULL)) == NULL) {1502(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),1503poolname);1504zpool_close(zhp);1505return (1);1506}15071508/* unless manually specified use "ashift" pool property (if set) */1509if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {1510int intval;1511zprop_source_t src;1512char strval[ZPOOL_MAXPROPLEN];15131514intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);1515if (src != ZPROP_SRC_DEFAULT) {1516(void) sprintf(strval, "%" PRId32, intval);1517verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,1518&props, B_TRUE) == 0);1519}1520}15211522/* pass off to make_root_vdev for processing */1523nvroot = make_root_vdev(zhp, props, !check_inuse,1524check_replication, B_FALSE, dryrun, argc, argv);1525if (nvroot == NULL) {1526zpool_close(zhp);1527return (1);1528}15291530if (dryrun) {1531nvlist_t *poolnvroot;1532nvlist_t **l2child, **sparechild;1533uint_t l2children, sparechildren, c;1534char *vname;1535boolean_t hadcache = B_FALSE, hadspare = B_FALSE;15361537verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,1538&poolnvroot) == 0);15391540(void) printf(gettext("would update '%s' to the following "1541"configuration:\n\n"), zpool_get_name(zhp));15421543/* print original main pool and new tree */1544print_vdev_tree(zhp, poolname, poolnvroot, 0, "",1545name_flags | VDEV_NAME_TYPE_ID);1546print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);15471548/* print other classes: 'dedup', 'special', and 'log' */1549if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {1550print_vdev_tree(zhp, "dedup", poolnvroot, 0,1551VDEV_ALLOC_BIAS_DEDUP, name_flags);1552print_vdev_tree(zhp, NULL, nvroot, 0,1553VDEV_ALLOC_BIAS_DEDUP, name_flags);1554} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {1555print_vdev_tree(zhp, "dedup", nvroot, 0,1556VDEV_ALLOC_BIAS_DEDUP, name_flags);1557}15581559if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {1560print_vdev_tree(zhp, "special", poolnvroot, 0,1561VDEV_ALLOC_BIAS_SPECIAL, name_flags);1562print_vdev_tree(zhp, NULL, nvroot, 0,1563VDEV_ALLOC_BIAS_SPECIAL, name_flags);1564} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {1565print_vdev_tree(zhp, "special", nvroot, 0,1566VDEV_ALLOC_BIAS_SPECIAL, name_flags);1567}15681569if (num_logs(poolnvroot) > 0) {1570print_vdev_tree(zhp, "logs", poolnvroot, 0,1571VDEV_ALLOC_BIAS_LOG, name_flags);1572print_vdev_tree(zhp, NULL, nvroot, 0,1573VDEV_ALLOC_BIAS_LOG, name_flags);1574} else if (num_logs(nvroot) > 0) {1575print_vdev_tree(zhp, "logs", nvroot, 0,1576VDEV_ALLOC_BIAS_LOG, name_flags);1577}15781579/* Do the same for the caches */1580if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,1581&l2child, &l2children) == 0 && l2children) {1582hadcache = B_TRUE;1583(void) printf(gettext("\tcache\n"));1584for (c = 0; c < l2children; c++) {1585vname = zpool_vdev_name(g_zfs, NULL,1586l2child[c], name_flags);1587(void) printf("\t %s\n", vname);1588free(vname);1589}1590}1591if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,1592&l2child, &l2children) == 0 && l2children) {1593if (!hadcache)1594(void) printf(gettext("\tcache\n"));1595for (c = 0; c < l2children; c++) {1596vname = zpool_vdev_name(g_zfs, NULL,1597l2child[c], name_flags);1598(void) printf("\t %s\n", vname);1599free(vname);1600}1601}1602/* And finally the spares */1603if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,1604&sparechild, &sparechildren) == 0 && sparechildren > 0) {1605hadspare = B_TRUE;1606(void) printf(gettext("\tspares\n"));1607for (c = 0; c < sparechildren; c++) {1608vname = zpool_vdev_name(g_zfs, NULL,1609sparechild[c], name_flags);1610(void) printf("\t %s\n", vname);1611free(vname);1612}1613}1614if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,1615&sparechild, &sparechildren) == 0 && sparechildren > 0) {1616if (!hadspare)1617(void) printf(gettext("\tspares\n"));1618for (c = 0; c < sparechildren; c++) {1619vname = zpool_vdev_name(g_zfs, NULL,1620sparechild[c], name_flags);1621(void) printf("\t %s\n", vname);1622free(vname);1623}1624}16251626ret = 0;1627} else {1628ret = (zpool_add(zhp, nvroot, check_ashift) != 0);1629}16301631nvlist_free(props);1632nvlist_free(nvroot);1633zpool_close(zhp);16341635return (ret);1636}16371638/*1639* zpool remove [-npsw] <pool> <vdev> ...1640*1641* Removes the given vdev from the pool.1642*/1643int1644zpool_do_remove(int argc, char **argv)1645{1646char *poolname;1647int i, ret = 0;1648zpool_handle_t *zhp = NULL;1649boolean_t stop = B_FALSE;1650int c;1651boolean_t noop = B_FALSE;1652boolean_t parsable = B_FALSE;1653boolean_t wait = B_FALSE;16541655/* check options */1656while ((c = getopt(argc, argv, "npsw")) != -1) {1657switch (c) {1658case 'n':1659noop = B_TRUE;1660break;1661case 'p':1662parsable = B_TRUE;1663break;1664case 's':1665stop = B_TRUE;1666break;1667case 'w':1668wait = B_TRUE;1669break;1670case '?':1671(void) fprintf(stderr, gettext("invalid option '%c'\n"),1672optopt);1673usage(B_FALSE);1674}1675}16761677argc -= optind;1678argv += optind;16791680/* get pool name and check number of arguments */1681if (argc < 1) {1682(void) fprintf(stderr, gettext("missing pool name argument\n"));1683usage(B_FALSE);1684}16851686poolname = argv[0];16871688if ((zhp = zpool_open(g_zfs, poolname)) == NULL)1689return (1);16901691if (stop && noop) {1692zpool_close(zhp);1693(void) fprintf(stderr, gettext("stop request ignored\n"));1694return (0);1695}16961697if (stop) {1698if (argc > 1) {1699(void) fprintf(stderr, gettext("too many arguments\n"));1700usage(B_FALSE);1701}1702if (zpool_vdev_remove_cancel(zhp) != 0)1703ret = 1;1704if (wait) {1705(void) fprintf(stderr, gettext("invalid option "1706"combination: -w cannot be used with -s\n"));1707usage(B_FALSE);1708}1709} else {1710if (argc < 2) {1711(void) fprintf(stderr, gettext("missing device\n"));1712usage(B_FALSE);1713}17141715for (i = 1; i < argc; i++) {1716if (noop) {1717uint64_t size;17181719if (zpool_vdev_indirect_size(zhp, argv[i],1720&size) != 0) {1721ret = 1;1722break;1723}1724if (parsable) {1725(void) printf("%s %llu\n",1726argv[i], (unsigned long long)size);1727} else {1728char valstr[32];1729zfs_nicenum(size, valstr,1730sizeof (valstr));1731(void) printf("Memory that will be "1732"used after removing %s: %s\n",1733argv[i], valstr);1734}1735} else {1736if (zpool_vdev_remove(zhp, argv[i]) != 0)1737ret = 1;1738}1739}17401741if (ret == 0 && wait)1742ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);1743}1744zpool_close(zhp);17451746return (ret);1747}17481749/*1750* Return 1 if a vdev is active (being used in a pool)1751* Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)1752*1753* This is useful for checking if a disk in an active pool is offlined or1754* faulted.1755*/1756static int1757vdev_is_active(char *vdev_path)1758{1759int fd;1760fd = open(vdev_path, O_EXCL);1761if (fd < 0) {1762return (1); /* cant open O_EXCL - disk is active */1763}17641765(void) close(fd);1766return (0); /* disk is inactive in the pool */1767}17681769/*1770* zpool labelclear [-f] <vdev>1771*1772* -f Force clearing the label for the vdevs which are members of1773* the exported or foreign pools.1774*1775* Verifies that the vdev is not active and zeros out the label information1776* on the device.1777*/1778int1779zpool_do_labelclear(int argc, char **argv)1780{1781char vdev[MAXPATHLEN];1782char *name = NULL;1783int c, fd, ret = 0;1784nvlist_t *config;1785pool_state_t state;1786boolean_t inuse = B_FALSE;1787boolean_t force = B_FALSE;17881789/* check options */1790while ((c = getopt(argc, argv, "f")) != -1) {1791switch (c) {1792case 'f':1793force = B_TRUE;1794break;1795default:1796(void) fprintf(stderr, gettext("invalid option '%c'\n"),1797optopt);1798usage(B_FALSE);1799}1800}18011802argc -= optind;1803argv += optind;18041805/* get vdev name */1806if (argc < 1) {1807(void) fprintf(stderr, gettext("missing vdev name\n"));1808usage(B_FALSE);1809}1810if (argc > 1) {1811(void) fprintf(stderr, gettext("too many arguments\n"));1812usage(B_FALSE);1813}18141815(void) strlcpy(vdev, argv[0], sizeof (vdev));18161817/*1818* If we cannot open an absolute path, we quit.1819* Otherwise if the provided vdev name doesn't point to a file,1820* try prepending expected disk paths and partition numbers.1821*/1822if ((fd = open(vdev, O_RDWR)) < 0) {1823int error;1824if (vdev[0] == '/') {1825(void) fprintf(stderr, gettext("failed to open "1826"%s: %s\n"), vdev, strerror(errno));1827return (1);1828}18291830error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);1831if (error == 0 && zfs_dev_is_whole_disk(vdev)) {1832if (zfs_append_partition(vdev, MAXPATHLEN) == -1)1833error = ENOENT;1834}18351836if (error || ((fd = open(vdev, O_RDWR)) < 0)) {1837if (errno == ENOENT) {1838(void) fprintf(stderr, gettext(1839"failed to find device %s, try "1840"specifying absolute path instead\n"),1841argv[0]);1842return (1);1843}18441845(void) fprintf(stderr, gettext("failed to open %s:"1846" %s\n"), vdev, strerror(errno));1847return (1);1848}1849}18501851/*1852* Flush all dirty pages for the block device. This should not be1853* fatal when the device does not support BLKFLSBUF as would be the1854* case for a file vdev.1855*/1856if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))1857(void) fprintf(stderr, gettext("failed to invalidate "1858"cache for %s: %s\n"), vdev, strerror(errno));18591860if (zpool_read_label(fd, &config, NULL) != 0) {1861(void) fprintf(stderr,1862gettext("failed to read label from %s\n"), vdev);1863ret = 1;1864goto errout;1865}1866nvlist_free(config);18671868ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);1869if (ret != 0) {1870(void) fprintf(stderr,1871gettext("failed to check state for %s\n"), vdev);1872ret = 1;1873goto errout;1874}18751876if (!inuse)1877goto wipe_label;18781879switch (state) {1880default:1881case POOL_STATE_ACTIVE:1882case POOL_STATE_SPARE:1883case POOL_STATE_L2CACHE:1884/*1885* We allow the user to call 'zpool offline -f'1886* on an offlined disk in an active pool. We can check if1887* the disk is online by calling vdev_is_active().1888*/1889if (force && !vdev_is_active(vdev))1890break;18911892(void) fprintf(stderr, gettext(1893"%s is a member (%s) of pool \"%s\""),1894vdev, zpool_pool_state_to_name(state), name);18951896if (force) {1897(void) fprintf(stderr, gettext(1898". Offline the disk first to clear its label."));1899}1900printf("\n");1901ret = 1;1902goto errout;19031904case POOL_STATE_EXPORTED:1905if (force)1906break;1907(void) fprintf(stderr, gettext(1908"use '-f' to override the following error:\n"1909"%s is a member of exported pool \"%s\"\n"),1910vdev, name);1911ret = 1;1912goto errout;19131914case POOL_STATE_POTENTIALLY_ACTIVE:1915if (force)1916break;1917(void) fprintf(stderr, gettext(1918"use '-f' to override the following error:\n"1919"%s is a member of potentially active pool \"%s\"\n"),1920vdev, name);1921ret = 1;1922goto errout;19231924case POOL_STATE_DESTROYED:1925/* inuse should never be set for a destroyed pool */1926assert(0);1927break;1928}19291930wipe_label:1931ret = zpool_clear_label(fd);1932if (ret != 0) {1933(void) fprintf(stderr,1934gettext("failed to clear label for %s\n"), vdev);1935}19361937errout:1938free(name);1939(void) close(fd);19401941return (ret);1942}19431944/*1945* zpool create [-fnd] [-o property=value] ...1946* [-O file-system-property=value] ...1947* [-R root] [-m mountpoint] <pool> <dev> ...1948*1949* -f Force creation, even if devices appear in use1950* -n Do not create the pool, but display the resulting layout if it1951* were to be created.1952* -R Create a pool under an alternate root1953* -m Set default mountpoint for the root dataset. By default it's1954* '/<pool>'1955* -o Set property=value.1956* -o Set feature@feature=enabled|disabled.1957* -d Don't automatically enable all supported pool features1958* (individual features can be enabled with -o).1959* -O Set fsproperty=value in the pool's root file system1960*1961* Creates the named pool according to the given vdev specification. The1962* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.1963* Once we get the nvlist back from make_root_vdev(), we either print out the1964* contents (if '-n' was specified), or pass it to libzfs to do the creation.1965*/1966int1967zpool_do_create(int argc, char **argv)1968{1969boolean_t force = B_FALSE;1970boolean_t dryrun = B_FALSE;1971boolean_t enable_pool_features = B_TRUE;19721973int c;1974nvlist_t *nvroot = NULL;1975char *poolname;1976char *tname = NULL;1977int ret = 1;1978char *altroot = NULL;1979char *compat = NULL;1980char *mountpoint = NULL;1981nvlist_t *fsprops = NULL;1982nvlist_t *props = NULL;1983char *propval;19841985/* check options */1986while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {1987switch (c) {1988case 'f':1989force = B_TRUE;1990break;1991case 'n':1992dryrun = B_TRUE;1993break;1994case 'd':1995enable_pool_features = B_FALSE;1996break;1997case 'R':1998altroot = optarg;1999if (add_prop_list(zpool_prop_to_name(2000ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))2001goto errout;2002if (add_prop_list_default(zpool_prop_to_name(2003ZPOOL_PROP_CACHEFILE), "none", &props))2004goto errout;2005break;2006case 'm':2007/* Equivalent to -O mountpoint=optarg */2008mountpoint = optarg;2009break;2010case 'o':2011if ((propval = strchr(optarg, '=')) == NULL) {2012(void) fprintf(stderr, gettext("missing "2013"'=' for -o option\n"));2014goto errout;2015}2016*propval = '\0';2017propval++;20182019if (add_prop_list(optarg, propval, &props, B_TRUE))2020goto errout;20212022/*2023* If the user is creating a pool that doesn't support2024* feature flags, don't enable any features.2025*/2026if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {2027char *end;2028u_longlong_t ver;20292030ver = strtoull(propval, &end, 0);2031if (*end == '\0' &&2032ver < SPA_VERSION_FEATURES) {2033enable_pool_features = B_FALSE;2034}2035}2036if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)2037altroot = propval;2038if (zpool_name_to_prop(optarg) ==2039ZPOOL_PROP_COMPATIBILITY)2040compat = propval;2041break;2042case 'O':2043if ((propval = strchr(optarg, '=')) == NULL) {2044(void) fprintf(stderr, gettext("missing "2045"'=' for -O option\n"));2046goto errout;2047}2048*propval = '\0';2049propval++;20502051/*2052* Mountpoints are checked and then added later.2053* Uniquely among properties, they can be specified2054* more than once, to avoid conflict with -m.2055*/2056if (0 == strcmp(optarg,2057zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {2058mountpoint = propval;2059} else if (add_prop_list(optarg, propval, &fsprops,2060B_FALSE)) {2061goto errout;2062}2063break;2064case 't':2065/*2066* Sanity check temporary pool name.2067*/2068if (strchr(optarg, '/') != NULL) {2069(void) fprintf(stderr, gettext("cannot create "2070"'%s': invalid character '/' in temporary "2071"name\n"), optarg);2072(void) fprintf(stderr, gettext("use 'zfs "2073"create' to create a dataset\n"));2074goto errout;2075}20762077if (add_prop_list(zpool_prop_to_name(2078ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))2079goto errout;2080if (add_prop_list_default(zpool_prop_to_name(2081ZPOOL_PROP_CACHEFILE), "none", &props))2082goto errout;2083tname = optarg;2084break;2085case ':':2086(void) fprintf(stderr, gettext("missing argument for "2087"'%c' option\n"), optopt);2088goto badusage;2089case '?':2090(void) fprintf(stderr, gettext("invalid option '%c'\n"),2091optopt);2092goto badusage;2093}2094}20952096argc -= optind;2097argv += optind;20982099/* get pool name and check number of arguments */2100if (argc < 1) {2101(void) fprintf(stderr, gettext("missing pool name argument\n"));2102goto badusage;2103}2104if (argc < 2) {2105(void) fprintf(stderr, gettext("missing vdev specification\n"));2106goto badusage;2107}21082109poolname = argv[0];21102111/*2112* As a special case, check for use of '/' in the name, and direct the2113* user to use 'zfs create' instead.2114*/2115if (strchr(poolname, '/') != NULL) {2116(void) fprintf(stderr, gettext("cannot create '%s': invalid "2117"character '/' in pool name\n"), poolname);2118(void) fprintf(stderr, gettext("use 'zfs create' to "2119"create a dataset\n"));2120goto errout;2121}21222123/* pass off to make_root_vdev for bulk processing */2124nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,2125argc - 1, argv + 1);2126if (nvroot == NULL)2127goto errout;21282129/* make_root_vdev() allows 0 toplevel children if there are spares */2130if (!zfs_allocatable_devs(nvroot)) {2131(void) fprintf(stderr, gettext("invalid vdev "2132"specification: at least one toplevel vdev must be "2133"specified\n"));2134goto errout;2135}21362137if (altroot != NULL && altroot[0] != '/') {2138(void) fprintf(stderr, gettext("invalid alternate root '%s': "2139"must be an absolute path\n"), altroot);2140goto errout;2141}21422143/*2144* Check the validity of the mountpoint and direct the user to use the2145* '-m' mountpoint option if it looks like its in use.2146*/2147if (mountpoint == NULL ||2148(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&2149strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {2150char buf[MAXPATHLEN];2151DIR *dirp;21522153if (mountpoint && mountpoint[0] != '/') {2154(void) fprintf(stderr, gettext("invalid mountpoint "2155"'%s': must be an absolute path, 'legacy', or "2156"'none'\n"), mountpoint);2157goto errout;2158}21592160if (mountpoint == NULL) {2161if (altroot != NULL)2162(void) snprintf(buf, sizeof (buf), "%s/%s",2163altroot, poolname);2164else2165(void) snprintf(buf, sizeof (buf), "/%s",2166poolname);2167} else {2168if (altroot != NULL)2169(void) snprintf(buf, sizeof (buf), "%s%s",2170altroot, mountpoint);2171else2172(void) snprintf(buf, sizeof (buf), "%s",2173mountpoint);2174}21752176if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {2177(void) fprintf(stderr, gettext("mountpoint '%s' : "2178"%s\n"), buf, strerror(errno));2179(void) fprintf(stderr, gettext("use '-m' "2180"option to provide a different default\n"));2181goto errout;2182} else if (dirp) {2183int count = 0;21842185while (count < 3 && readdir(dirp) != NULL)2186count++;2187(void) closedir(dirp);21882189if (count > 2) {2190(void) fprintf(stderr, gettext("mountpoint "2191"'%s' exists and is not empty\n"), buf);2192(void) fprintf(stderr, gettext("use '-m' "2193"option to provide a "2194"different default\n"));2195goto errout;2196}2197}2198}21992200/*2201* Now that the mountpoint's validity has been checked, ensure that2202* the property is set appropriately prior to creating the pool.2203*/2204if (mountpoint != NULL) {2205ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),2206mountpoint, &fsprops, B_FALSE);2207if (ret != 0)2208goto errout;2209}22102211ret = 1;2212if (dryrun) {2213/*2214* For a dry run invocation, print out a basic message and run2215* through all the vdevs in the list and print out in an2216* appropriate hierarchy.2217*/2218(void) printf(gettext("would create '%s' with the "2219"following layout:\n\n"), poolname);22202221print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);2222print_vdev_tree(NULL, "dedup", nvroot, 0,2223VDEV_ALLOC_BIAS_DEDUP, 0);2224print_vdev_tree(NULL, "special", nvroot, 0,2225VDEV_ALLOC_BIAS_SPECIAL, 0);2226print_vdev_tree(NULL, "logs", nvroot, 0,2227VDEV_ALLOC_BIAS_LOG, 0);2228print_cache_list(nvroot, 0);2229print_spare_list(nvroot, 0);22302231ret = 0;2232} else {2233/*2234* Load in feature set.2235* Note: if compatibility property not given, we'll have2236* NULL, which means 'all features'.2237*/2238boolean_t requested_features[SPA_FEATURES];2239if (zpool_do_load_compat(compat, requested_features) !=2240ZPOOL_COMPATIBILITY_OK)2241goto errout;22422243/*2244* props contains list of features to enable.2245* For each feature:2246* - remove it if feature@name=disabled2247* - leave it there if feature@name=enabled2248* - add it if:2249* - enable_pool_features (ie: no '-d' or '-o version')2250* - it's supported by the kernel module2251* - it's in the requested feature set2252* - warn if it's enabled but not in compat2253*/2254for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {2255char propname[MAXPATHLEN];2256const char *propval;2257zfeature_info_t *feat = &spa_feature_table[i];22582259(void) snprintf(propname, sizeof (propname),2260"feature@%s", feat->fi_uname);22612262if (!nvlist_lookup_string(props, propname, &propval)) {2263if (strcmp(propval,2264ZFS_FEATURE_DISABLED) == 0) {2265(void) nvlist_remove_all(props,2266propname);2267} else if (strcmp(propval,2268ZFS_FEATURE_ENABLED) == 0 &&2269!requested_features[i]) {2270(void) fprintf(stderr, gettext(2271"Warning: feature \"%s\" enabled "2272"but is not in specified "2273"'compatibility' feature set.\n"),2274feat->fi_uname);2275}2276} else if (2277enable_pool_features &&2278feat->fi_zfs_mod_supported &&2279requested_features[i]) {2280ret = add_prop_list(propname,2281ZFS_FEATURE_ENABLED, &props, B_TRUE);2282if (ret != 0)2283goto errout;2284}2285}22862287ret = 1;2288if (zpool_create(g_zfs, poolname,2289nvroot, props, fsprops) == 0) {2290zfs_handle_t *pool = zfs_open(g_zfs,2291tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);2292if (pool != NULL) {2293if (zfs_mount(pool, NULL, 0) == 0) {2294ret = zfs_share(pool, NULL);2295zfs_commit_shares(NULL);2296}2297zfs_close(pool);2298}2299} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {2300(void) fprintf(stderr, gettext("pool name may have "2301"been omitted\n"));2302}2303}23042305errout:2306nvlist_free(nvroot);2307nvlist_free(fsprops);2308nvlist_free(props);2309return (ret);2310badusage:2311nvlist_free(fsprops);2312nvlist_free(props);2313usage(B_FALSE);2314return (2);2315}23162317/*2318* zpool destroy <pool>2319*2320* -f Forcefully unmount any datasets2321*2322* Destroy the given pool. Automatically unmounts any datasets in the pool.2323*/2324int2325zpool_do_destroy(int argc, char **argv)2326{2327boolean_t force = B_FALSE;2328int c;2329char *pool;2330zpool_handle_t *zhp;2331int ret;23322333/* check options */2334while ((c = getopt(argc, argv, "f")) != -1) {2335switch (c) {2336case 'f':2337force = B_TRUE;2338break;2339case '?':2340(void) fprintf(stderr, gettext("invalid option '%c'\n"),2341optopt);2342usage(B_FALSE);2343}2344}23452346argc -= optind;2347argv += optind;23482349/* check arguments */2350if (argc < 1) {2351(void) fprintf(stderr, gettext("missing pool argument\n"));2352usage(B_FALSE);2353}2354if (argc > 1) {2355(void) fprintf(stderr, gettext("too many arguments\n"));2356usage(B_FALSE);2357}23582359pool = argv[0];23602361if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {2362/*2363* As a special case, check for use of '/' in the name, and2364* direct the user to use 'zfs destroy' instead.2365*/2366if (strchr(pool, '/') != NULL)2367(void) fprintf(stderr, gettext("use 'zfs destroy' to "2368"destroy a dataset\n"));2369return (1);2370}23712372if (zpool_disable_datasets(zhp, force) != 0) {2373(void) fprintf(stderr, gettext("could not destroy '%s': "2374"could not unmount datasets\n"), zpool_get_name(zhp));2375zpool_close(zhp);2376return (1);2377}23782379/* The history must be logged as part of the export */2380log_history = B_FALSE;23812382ret = (zpool_destroy(zhp, history_str) != 0);23832384zpool_close(zhp);23852386return (ret);2387}23882389typedef struct export_cbdata {2390taskq_t *taskq;2391pthread_mutex_t mnttab_lock;2392boolean_t force;2393boolean_t hardforce;2394int retval;2395} export_cbdata_t;239623972398typedef struct {2399char *aea_poolname;2400export_cbdata_t *aea_cbdata;2401} async_export_args_t;24022403/*2404* Export one pool2405*/2406static int2407zpool_export_one(zpool_handle_t *zhp, void *data)2408{2409export_cbdata_t *cb = data;24102411/*2412* zpool_disable_datasets() is not thread-safe for mnttab access.2413* So we serialize access here for 'zpool export -a' parallel case.2414*/2415if (cb->taskq != NULL)2416(void) pthread_mutex_lock(&cb->mnttab_lock);24172418int retval = zpool_disable_datasets(zhp, cb->force);24192420if (cb->taskq != NULL)2421(void) pthread_mutex_unlock(&cb->mnttab_lock);24222423if (retval)2424return (1);24252426if (cb->hardforce) {2427if (zpool_export_force(zhp, history_str) != 0)2428return (1);2429} else if (zpool_export(zhp, cb->force, history_str) != 0) {2430return (1);2431}24322433return (0);2434}24352436/*2437* Asynchronous export request2438*/2439static void2440zpool_export_task(void *arg)2441{2442async_export_args_t *aea = arg;24432444zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);2445if (zhp != NULL) {2446int ret = zpool_export_one(zhp, aea->aea_cbdata);2447if (ret != 0)2448aea->aea_cbdata->retval = ret;2449zpool_close(zhp);2450} else {2451aea->aea_cbdata->retval = 1;2452}24532454free(aea->aea_poolname);2455free(aea);2456}24572458/*2459* Process an export request in parallel2460*/2461static int2462zpool_export_one_async(zpool_handle_t *zhp, void *data)2463{2464taskq_t *tq = ((export_cbdata_t *)data)->taskq;2465async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));24662467/* save pool name since zhp will go out of scope */2468aea->aea_poolname = strdup(zpool_get_name(zhp));2469aea->aea_cbdata = data;24702471/* ship off actual export to another thread */2472if (taskq_dispatch(tq, zpool_export_task, (void *)aea,2473TQ_SLEEP) == TASKQID_INVALID)2474return (errno); /* unlikely */2475else2476return (0);2477}24782479/*2480* zpool export [-f] <pool> ...2481*2482* -a Export all pools2483* -f Forcefully unmount datasets2484*2485* Export the given pools. By default, the command will attempt to cleanly2486* unmount any active datasets within the pool. If the '-f' flag is specified,2487* then the datasets will be forcefully unmounted.2488*/2489int2490zpool_do_export(int argc, char **argv)2491{2492export_cbdata_t cb;2493boolean_t do_all = B_FALSE;2494boolean_t force = B_FALSE;2495boolean_t hardforce = B_FALSE;2496int c, ret;24972498/* check options */2499while ((c = getopt(argc, argv, "afF")) != -1) {2500switch (c) {2501case 'a':2502do_all = B_TRUE;2503break;2504case 'f':2505force = B_TRUE;2506break;2507case 'F':2508hardforce = B_TRUE;2509break;2510case '?':2511(void) fprintf(stderr, gettext("invalid option '%c'\n"),2512optopt);2513usage(B_FALSE);2514}2515}25162517cb.force = force;2518cb.hardforce = hardforce;2519cb.taskq = NULL;2520cb.retval = 0;2521argc -= optind;2522argv += optind;25232524/* The history will be logged as part of the export itself */2525log_history = B_FALSE;25262527if (do_all) {2528if (argc != 0) {2529(void) fprintf(stderr, gettext("too many arguments\n"));2530usage(B_FALSE);2531}25322533cb.taskq = taskq_create("zpool_export",25345 * sysconf(_SC_NPROCESSORS_ONLN), minclsyspri, 1, INT_MAX,2535TASKQ_DYNAMIC);2536(void) pthread_mutex_init(&cb.mnttab_lock, NULL);25372538/* Asynchronously call zpool_export_one using thread pool */2539ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,2540B_FALSE, zpool_export_one_async, &cb);25412542taskq_wait(cb.taskq);2543taskq_destroy(cb.taskq);2544(void) pthread_mutex_destroy(&cb.mnttab_lock);25452546return (ret | cb.retval);2547}25482549/* check arguments */2550if (argc < 1) {2551(void) fprintf(stderr, gettext("missing pool argument\n"));2552usage(B_FALSE);2553}25542555ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,2556B_FALSE, zpool_export_one, &cb);25572558return (ret);2559}25602561/*2562* Given a vdev configuration, determine the maximum width needed for the device2563* name column.2564*/2565static int2566max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,2567int name_flags)2568{2569static const char *const subtypes[] =2570{ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};25712572char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);2573max = MAX(strlen(name) + depth, max);2574free(name);25752576nvlist_t **child;2577uint_t children;2578for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)2579if (nvlist_lookup_nvlist_array(nv, subtypes[i],2580&child, &children) == 0)2581for (uint_t c = 0; c < children; ++c)2582max = MAX(max_width(zhp, child[c], depth + 2,2583max, name_flags), max);25842585return (max);2586}25872588typedef struct status_cbdata {2589int cb_count;2590int cb_name_flags;2591int cb_namewidth;2592boolean_t cb_allpools;2593boolean_t cb_verbose;2594boolean_t cb_literal;2595boolean_t cb_explain;2596boolean_t cb_first;2597boolean_t cb_dedup_stats;2598boolean_t cb_print_unhealthy;2599boolean_t cb_print_status;2600boolean_t cb_print_slow_ios;2601boolean_t cb_print_dio_verify;2602boolean_t cb_print_vdev_init;2603boolean_t cb_print_vdev_trim;2604vdev_cmd_data_list_t *vcdl;2605boolean_t cb_print_power;2606boolean_t cb_json;2607boolean_t cb_flat_vdevs;2608nvlist_t *cb_jsobj;2609boolean_t cb_json_as_int;2610boolean_t cb_json_pool_key_guid;2611} status_cbdata_t;26122613/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */2614static boolean_t2615is_blank_str(const char *str)2616{2617for (; str != NULL && *str != '\0'; ++str)2618if (!isblank(*str))2619return (B_FALSE);2620return (B_TRUE);2621}26222623static void2624zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,2625nvlist_t *item)2626{2627vdev_cmd_data_t *data;2628int i, j, k = 1;2629char tmp[256];2630const char *val;26312632for (i = 0; i < vcdl->count; i++) {2633if ((strcmp(vcdl->data[i].path, path) != 0) ||2634(strcmp(vcdl->data[i].pool, pool) != 0))2635continue;26362637data = &vcdl->data[i];2638for (j = 0; j < vcdl->uniq_cols_cnt; j++) {2639val = NULL;2640for (int k = 0; k < data->cols_cnt; k++) {2641if (strcmp(data->cols[k],2642vcdl->uniq_cols[j]) == 0) {2643val = data->lines[k];2644break;2645}2646}2647if (val == NULL || is_blank_str(val))2648val = "-";2649fnvlist_add_string(item, vcdl->uniq_cols[j], val);2650}26512652for (j = data->cols_cnt; j < data->lines_cnt; j++) {2653if (data->lines[j]) {2654(void) snprintf(tmp, 256, "extra_%d", k++);2655fnvlist_add_string(item, tmp,2656data->lines[j]);2657}2658}2659break;2660}2661}26622663/* Print command output lines for specific vdev in a specific pool */2664static void2665zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)2666{2667vdev_cmd_data_t *data;2668int i, j;2669const char *val;26702671for (i = 0; i < vcdl->count; i++) {2672if ((strcmp(vcdl->data[i].path, path) != 0) ||2673(strcmp(vcdl->data[i].pool, pool) != 0)) {2674/* Not the vdev we're looking for */2675continue;2676}26772678data = &vcdl->data[i];2679/* Print out all the output values for this vdev */2680for (j = 0; j < vcdl->uniq_cols_cnt; j++) {2681val = NULL;2682/* Does this vdev have values for this column? */2683for (int k = 0; k < data->cols_cnt; k++) {2684if (strcmp(data->cols[k],2685vcdl->uniq_cols[j]) == 0) {2686/* yes it does, record the value */2687val = data->lines[k];2688break;2689}2690}2691/*2692* Mark empty values with dashes to make output2693* awk-able.2694*/2695if (val == NULL || is_blank_str(val))2696val = "-";26972698printf("%*s", vcdl->uniq_cols_width[j], val);2699if (j < vcdl->uniq_cols_cnt - 1)2700(void) fputs(" ", stdout);2701}27022703/* Print out any values that aren't in a column at the end */2704for (j = data->cols_cnt; j < data->lines_cnt; j++) {2705/* Did we have any columns? If so print a spacer. */2706if (vcdl->uniq_cols_cnt > 0)2707(void) fputs(" ", stdout);27082709val = data->lines[j];2710(void) fputs(val ?: "", stdout);2711}2712break;2713}2714}27152716/*2717* Print vdev initialization status for leaves2718*/2719static void2720print_status_initialize(vdev_stat_t *vs, boolean_t verbose)2721{2722if (verbose) {2723if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||2724vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||2725vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&2726!vs->vs_scan_removing) {2727char zbuf[1024];2728char tbuf[256];27292730time_t t = vs->vs_initialize_action_time;2731int initialize_pct = 100;2732if (vs->vs_initialize_state !=2733VDEV_INITIALIZE_COMPLETE) {2734initialize_pct = (vs->vs_initialize_bytes_done *2735100 / (vs->vs_initialize_bytes_est + 1));2736}27372738(void) ctime_r(&t, tbuf);2739tbuf[24] = 0;27402741switch (vs->vs_initialize_state) {2742case VDEV_INITIALIZE_SUSPENDED:2743(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2744gettext("suspended, started at"), tbuf);2745break;2746case VDEV_INITIALIZE_ACTIVE:2747(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2748gettext("started at"), tbuf);2749break;2750case VDEV_INITIALIZE_COMPLETE:2751(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2752gettext("completed at"), tbuf);2753break;2754}27552756(void) printf(gettext(" (%d%% initialized%s)"),2757initialize_pct, zbuf);2758} else {2759(void) printf(gettext(" (uninitialized)"));2760}2761} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {2762(void) printf(gettext(" (initializing)"));2763}2764}27652766/*2767* Print vdev TRIM status for leaves2768*/2769static void2770print_status_trim(vdev_stat_t *vs, boolean_t verbose)2771{2772if (verbose) {2773if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||2774vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||2775vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&2776!vs->vs_scan_removing) {2777char zbuf[1024];2778char tbuf[256];27792780time_t t = vs->vs_trim_action_time;2781int trim_pct = 100;2782if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {2783trim_pct = (vs->vs_trim_bytes_done *2784100 / (vs->vs_trim_bytes_est + 1));2785}27862787(void) ctime_r(&t, tbuf);2788tbuf[24] = 0;27892790switch (vs->vs_trim_state) {2791case VDEV_TRIM_SUSPENDED:2792(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2793gettext("suspended, started at"), tbuf);2794break;2795case VDEV_TRIM_ACTIVE:2796(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2797gettext("started at"), tbuf);2798break;2799case VDEV_TRIM_COMPLETE:2800(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2801gettext("completed at"), tbuf);2802break;2803}28042805(void) printf(gettext(" (%d%% trimmed%s)"),2806trim_pct, zbuf);2807} else if (vs->vs_trim_notsup) {2808(void) printf(gettext(" (trim unsupported)"));2809} else {2810(void) printf(gettext(" (untrimmed)"));2811}2812} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {2813(void) printf(gettext(" (trimming)"));2814}2815}28162817/*2818* Return the color associated with a health string. This includes returning2819* NULL for no color change.2820*/2821static const char *2822health_str_to_color(const char *health)2823{2824if (strcmp(health, gettext("FAULTED")) == 0 ||2825strcmp(health, gettext("SUSPENDED")) == 0 ||2826strcmp(health, gettext("UNAVAIL")) == 0) {2827return (ANSI_RED);2828}28292830if (strcmp(health, gettext("OFFLINE")) == 0 ||2831strcmp(health, gettext("DEGRADED")) == 0 ||2832strcmp(health, gettext("REMOVED")) == 0) {2833return (ANSI_YELLOW);2834}28352836return (NULL);2837}28382839/*2840* Called for each leaf vdev. Returns 0 if the vdev is healthy.2841* A vdev is unhealthy if any of the following are true:2842* 1) there are read, write, or checksum errors,2843* 2) its state is not ONLINE, or2844* 3) slow IO reporting was requested (-s) and there are slow IOs.2845*/2846static int2847vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)2848{2849status_cbdata_t *cb = data;2850vdev_stat_t *vs;2851uint_t vsc;2852(void) hdl_data;28532854if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,2855(uint64_t **)&vs, &vsc) != 0)2856return (1);28572858if (vs->vs_checksum_errors || vs->vs_read_errors ||2859vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)2860return (1);28612862if (cb->cb_print_slow_ios && vs->vs_slow_ios)2863return (1);28642865return (0);2866}28672868/*2869* Print out configuration state as requested by status_callback.2870*/2871static void2872print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,2873nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)2874{2875nvlist_t **child, *root;2876uint_t c, i, vsc, children;2877pool_scan_stat_t *ps = NULL;2878vdev_stat_t *vs;2879char rbuf[6], wbuf[6], cbuf[6], dbuf[6];2880char *vname;2881uint64_t notpresent;2882spare_cbdata_t spare_cb;2883const char *state;2884const char *type;2885const char *path = NULL;2886const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,2887*scolor = NULL;28882889if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,2890&child, &children) != 0)2891children = 0;28922893verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,2894(uint64_t **)&vs, &vsc) == 0);28952896verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);28972898if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)2899return;29002901state = zpool_state_to_name(vs->vs_state, vs->vs_aux);29022903if (isspare) {2904/*2905* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for2906* online drives.2907*/2908if (vs->vs_aux == VDEV_AUX_SPARED)2909state = gettext("INUSE");2910else if (vs->vs_state == VDEV_STATE_HEALTHY)2911state = gettext("AVAIL");2912}29132914/*2915* If '-e' is specified then top-level vdevs and their children2916* can be pruned if all of their leaves are healthy.2917*/2918if (cb->cb_print_unhealthy && depth > 0 &&2919for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {2920return;2921}29222923(void) printf_color(health_str_to_color(state),2924"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,2925name, state);29262927if (!isspare) {2928if (vs->vs_read_errors)2929rcolor = ANSI_RED;29302931if (vs->vs_write_errors)2932wcolor = ANSI_RED;29332934if (vs->vs_checksum_errors)2935ccolor = ANSI_RED;29362937if (vs->vs_slow_ios)2938scolor = ANSI_BLUE;29392940if (cb->cb_literal) {2941(void) fputc(' ', stdout);2942(void) printf_color(rcolor, "%5llu",2943(u_longlong_t)vs->vs_read_errors);2944(void) fputc(' ', stdout);2945(void) printf_color(wcolor, "%5llu",2946(u_longlong_t)vs->vs_write_errors);2947(void) fputc(' ', stdout);2948(void) printf_color(ccolor, "%5llu",2949(u_longlong_t)vs->vs_checksum_errors);2950} else {2951zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));2952zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));2953zfs_nicenum(vs->vs_checksum_errors, cbuf,2954sizeof (cbuf));2955(void) fputc(' ', stdout);2956(void) printf_color(rcolor, "%5s", rbuf);2957(void) fputc(' ', stdout);2958(void) printf_color(wcolor, "%5s", wbuf);2959(void) fputc(' ', stdout);2960(void) printf_color(ccolor, "%5s", cbuf);2961}2962if (cb->cb_print_slow_ios) {2963if (children == 0) {2964/* Only leafs vdevs have slow IOs */2965zfs_nicenum(vs->vs_slow_ios, rbuf,2966sizeof (rbuf));2967} else {2968(void) snprintf(rbuf, sizeof (rbuf), "-");2969}29702971if (cb->cb_literal)2972(void) printf_color(scolor, " %5llu",2973(u_longlong_t)vs->vs_slow_ios);2974else2975(void) printf_color(scolor, " %5s", rbuf);2976}2977if (cb->cb_print_power) {2978if (children == 0) {2979/* Only leaf vdevs have physical slots */2980switch (zpool_power_current_state(zhp, (char *)2981fnvlist_lookup_string(nv,2982ZPOOL_CONFIG_PATH))) {2983case 0:2984(void) printf_color(ANSI_RED, " %5s",2985gettext("off"));2986break;2987case 1:2988printf(" %5s", gettext("on"));2989break;2990default:2991printf(" %5s", "-");2992}2993} else {2994printf(" %5s", "-");2995}2996}2997if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&2998cb->cb_print_dio_verify) {2999zfs_nicenum(vs->vs_dio_verify_errors, dbuf,3000sizeof (dbuf));30013002if (cb->cb_literal)3003printf(" %5llu",3004(u_longlong_t)vs->vs_dio_verify_errors);3005else3006printf(" %5s", dbuf);3007}3008}30093010if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,3011¬present) == 0) {3012verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);3013(void) printf(" %s %s", gettext("was"), path);3014} else if (vs->vs_aux != 0) {3015(void) printf(" ");3016color_start(ANSI_RED);3017switch (vs->vs_aux) {3018case VDEV_AUX_OPEN_FAILED:3019(void) printf(gettext("cannot open"));3020break;30213022case VDEV_AUX_BAD_GUID_SUM:3023(void) printf(gettext("missing device"));3024break;30253026case VDEV_AUX_NO_REPLICAS:3027(void) printf(gettext("insufficient replicas"));3028break;30293030case VDEV_AUX_VERSION_NEWER:3031(void) printf(gettext("newer version"));3032break;30333034case VDEV_AUX_UNSUP_FEAT:3035(void) printf(gettext("unsupported feature(s)"));3036break;30373038case VDEV_AUX_ASHIFT_TOO_BIG:3039(void) printf(gettext("unsupported minimum blocksize"));3040break;30413042case VDEV_AUX_SPARED:3043verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,3044&spare_cb.cb_guid) == 0);3045if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {3046if (strcmp(zpool_get_name(spare_cb.cb_zhp),3047zpool_get_name(zhp)) == 0)3048(void) printf(gettext("currently in "3049"use"));3050else3051(void) printf(gettext("in use by "3052"pool '%s'"),3053zpool_get_name(spare_cb.cb_zhp));3054zpool_close(spare_cb.cb_zhp);3055} else {3056(void) printf(gettext("currently in use"));3057}3058break;30593060case VDEV_AUX_ERR_EXCEEDED:3061if (vs->vs_read_errors + vs->vs_write_errors +3062vs->vs_checksum_errors == 0 && children == 0 &&3063vs->vs_slow_ios > 0) {3064(void) printf(gettext("too many slow I/Os"));3065} else {3066(void) printf(gettext("too many errors"));3067}3068break;30693070case VDEV_AUX_IO_FAILURE:3071(void) printf(gettext("experienced I/O failures"));3072break;30733074case VDEV_AUX_BAD_LOG:3075(void) printf(gettext("bad intent log"));3076break;30773078case VDEV_AUX_EXTERNAL:3079(void) printf(gettext("external device fault"));3080break;30813082case VDEV_AUX_SPLIT_POOL:3083(void) printf(gettext("split into new pool"));3084break;30853086case VDEV_AUX_ACTIVE:3087(void) printf(gettext("currently in use"));3088break;30893090case VDEV_AUX_CHILDREN_OFFLINE:3091(void) printf(gettext("all children offline"));3092break;30933094case VDEV_AUX_BAD_LABEL:3095(void) printf(gettext("invalid label"));3096break;30973098default:3099(void) printf(gettext("corrupted data"));3100break;3101}3102color_end();3103} else if (children == 0 && !isspare &&3104getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&3105VDEV_STAT_VALID(vs_physical_ashift, vsc) &&3106vs->vs_configured_ashift < vs->vs_physical_ashift) {3107(void) printf(3108gettext(" block size: %dB configured, %dB native"),31091 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);3110}31113112if (vs->vs_scan_removing != 0) {3113(void) printf(gettext(" (removing)"));3114} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {3115(void) printf(gettext(" (non-allocating)"));3116}31173118/* The root vdev has the scrub/resilver stats */3119root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),3120ZPOOL_CONFIG_VDEV_TREE);3121(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,3122(uint64_t **)&ps, &c);31233124/*3125* If you force fault a drive that's resilvering, its scan stats can3126* get frozen in time, giving the false impression that it's3127* being resilvered. That's why we check the state to see if the vdev3128* is healthy before reporting "resilvering" or "repairing".3129*/3130if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&3131vs->vs_state == VDEV_STATE_HEALTHY) {3132if (vs->vs_scan_processed != 0) {3133(void) printf(gettext(" (%s)"),3134(ps->pss_func == POOL_SCAN_RESILVER) ?3135"resilvering" : "repairing");3136} else if (vs->vs_resilver_deferred) {3137(void) printf(gettext(" (awaiting resilver)"));3138}3139}31403141/* The top-level vdevs have the rebuild stats */3142if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&3143children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {3144if (vs->vs_rebuild_processed != 0) {3145(void) printf(gettext(" (resilvering)"));3146}3147}31483149if (cb->vcdl != NULL) {3150if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {3151printf(" ");3152zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);3153}3154}31553156/* Display vdev initialization and trim status for leaves. */3157if (children == 0) {3158print_status_initialize(vs, cb->cb_print_vdev_init);3159print_status_trim(vs, cb->cb_print_vdev_trim);3160}31613162(void) printf("\n");31633164for (c = 0; c < children; c++) {3165uint64_t islog = B_FALSE, ishole = B_FALSE;31663167/* Don't print logs or holes here */3168(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,3169&islog);3170(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,3171&ishole);3172if (islog || ishole)3173continue;3174/* Only print normal classes here */3175if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))3176continue;31773178/* Provide vdev_rebuild_stats to children if available */3179if (vrs == NULL) {3180(void) nvlist_lookup_uint64_array(nv,3181ZPOOL_CONFIG_REBUILD_STATS,3182(uint64_t **)&vrs, &i);3183}31843185vname = zpool_vdev_name(g_zfs, zhp, child[c],3186cb->cb_name_flags | VDEV_NAME_TYPE_ID);3187print_status_config(zhp, cb, vname, child[c], depth + 2,3188isspare, vrs);3189free(vname);3190}3191}31923193/*3194* Print the configuration of an exported pool. Iterate over all vdevs in the3195* pool, printing out the name and status for each one.3196*/3197static void3198print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,3199int depth)3200{3201nvlist_t **child;3202uint_t c, children;3203vdev_stat_t *vs;3204const char *type;3205char *vname;32063207verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);3208if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||3209strcmp(type, VDEV_TYPE_HOLE) == 0)3210return;32113212verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,3213(uint64_t **)&vs, &c) == 0);32143215(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);3216(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));32173218if (vs->vs_aux != 0) {3219(void) printf(" ");32203221switch (vs->vs_aux) {3222case VDEV_AUX_OPEN_FAILED:3223(void) printf(gettext("cannot open"));3224break;32253226case VDEV_AUX_BAD_GUID_SUM:3227(void) printf(gettext("missing device"));3228break;32293230case VDEV_AUX_NO_REPLICAS:3231(void) printf(gettext("insufficient replicas"));3232break;32333234case VDEV_AUX_VERSION_NEWER:3235(void) printf(gettext("newer version"));3236break;32373238case VDEV_AUX_UNSUP_FEAT:3239(void) printf(gettext("unsupported feature(s)"));3240break;32413242case VDEV_AUX_ERR_EXCEEDED:3243(void) printf(gettext("too many errors"));3244break;32453246case VDEV_AUX_ACTIVE:3247(void) printf(gettext("currently in use"));3248break;32493250case VDEV_AUX_CHILDREN_OFFLINE:3251(void) printf(gettext("all children offline"));3252break;32533254case VDEV_AUX_BAD_LABEL:3255(void) printf(gettext("invalid label"));3256break;32573258default:3259(void) printf(gettext("corrupted data"));3260break;3261}3262}3263(void) printf("\n");32643265if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,3266&child, &children) != 0)3267return;32683269for (c = 0; c < children; c++) {3270uint64_t is_log = B_FALSE;32713272(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,3273&is_log);3274if (is_log)3275continue;3276if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))3277continue;32783279vname = zpool_vdev_name(g_zfs, NULL, child[c],3280cb->cb_name_flags | VDEV_NAME_TYPE_ID);3281print_import_config(cb, vname, child[c], depth + 2);3282free(vname);3283}32843285if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,3286&child, &children) == 0) {3287(void) printf(gettext("\tcache\n"));3288for (c = 0; c < children; c++) {3289vname = zpool_vdev_name(g_zfs, NULL, child[c],3290cb->cb_name_flags);3291(void) printf("\t %s\n", vname);3292free(vname);3293}3294}32953296if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,3297&child, &children) == 0) {3298(void) printf(gettext("\tspares\n"));3299for (c = 0; c < children; c++) {3300vname = zpool_vdev_name(g_zfs, NULL, child[c],3301cb->cb_name_flags);3302(void) printf("\t %s\n", vname);3303free(vname);3304}3305}3306}33073308/*3309* Print specialized class vdevs.3310*3311* These are recorded as top level vdevs in the main pool child array3312* but with "is_log" set to 1 or an "alloc_bias" string. We use either3313* print_status_config() or print_import_config() to print the top level3314* class vdevs then any of their children (eg mirrored slogs) are printed3315* recursively - which works because only the top level vdev is marked.3316*/3317static void3318print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,3319const char *class)3320{3321uint_t c, children;3322nvlist_t **child;3323boolean_t printed = B_FALSE;33243325assert(zhp != NULL || !cb->cb_verbose);33263327if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,3328&children) != 0)3329return;33303331for (c = 0; c < children; c++) {3332uint64_t is_log = B_FALSE;3333const char *bias = NULL;3334const char *type = NULL;33353336(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,3337&is_log);33383339if (is_log) {3340bias = (char *)VDEV_ALLOC_CLASS_LOGS;3341} else {3342(void) nvlist_lookup_string(child[c],3343ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);3344(void) nvlist_lookup_string(child[c],3345ZPOOL_CONFIG_TYPE, &type);3346}33473348if (bias == NULL || strcmp(bias, class) != 0)3349continue;3350if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)3351continue;33523353if (!printed) {3354(void) printf("\t%s\t\n", gettext(class));3355printed = B_TRUE;3356}33573358char *name = zpool_vdev_name(g_zfs, zhp, child[c],3359cb->cb_name_flags | VDEV_NAME_TYPE_ID);3360if (cb->cb_print_status)3361print_status_config(zhp, cb, name, child[c], 2,3362B_FALSE, NULL);3363else3364print_import_config(cb, name, child[c], 2);3365free(name);3366}3367}33683369/*3370* Display the status for the given pool.3371*/3372static int3373show_import(nvlist_t *config, boolean_t report_error)3374{3375uint64_t pool_state;3376vdev_stat_t *vs;3377const char *name;3378uint64_t guid;3379uint64_t hostid = 0;3380const char *msgid;3381const char *hostname = "unknown";3382nvlist_t *nvroot, *nvinfo;3383zpool_status_t reason;3384zpool_errata_t errata;3385const char *health;3386uint_t vsc;3387const char *comment;3388const char *indent;3389char buf[2048];3390status_cbdata_t cb = { 0 };33913392verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,3393&name) == 0);3394verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,3395&guid) == 0);3396verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,3397&pool_state) == 0);3398verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,3399&nvroot) == 0);34003401verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,3402(uint64_t **)&vs, &vsc) == 0);3403health = zpool_state_to_name(vs->vs_state, vs->vs_aux);34043405reason = zpool_import_status(config, &msgid, &errata);34063407/*3408* If we're importing using a cachefile, then we won't report any3409* errors unless we are in the scan phase of the import.3410*/3411if (reason != ZPOOL_STATUS_OK && !report_error)3412return (reason);34133414if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {3415indent = " ";3416} else {3417comment = NULL;3418indent = "";3419}34203421(void) printf(gettext("%s pool: %s\n"), indent, name);3422(void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);3423(void) printf(gettext("%s state: %s"), indent, health);3424if (pool_state == POOL_STATE_DESTROYED)3425(void) printf(gettext(" (DESTROYED)"));3426(void) printf("\n");34273428if (reason != ZPOOL_STATUS_OK) {3429(void) printf("%s", indent);3430(void) printf_color(ANSI_BOLD, gettext("status: "));3431}3432switch (reason) {3433case ZPOOL_STATUS_MISSING_DEV_R:3434case ZPOOL_STATUS_MISSING_DEV_NR:3435case ZPOOL_STATUS_BAD_GUID_SUM:3436(void) printf_color(ANSI_YELLOW, gettext("One or more devices "3437"are missing from the system.\n"));3438break;34393440case ZPOOL_STATUS_CORRUPT_LABEL_R:3441case ZPOOL_STATUS_CORRUPT_LABEL_NR:3442(void) printf_color(ANSI_YELLOW, gettext("One or more devices "3443"contains corrupted data.\n"));3444break;34453446case ZPOOL_STATUS_CORRUPT_DATA:3447(void) printf_color(ANSI_YELLOW, gettext("The pool data is "3448"corrupted.\n"));3449break;34503451case ZPOOL_STATUS_OFFLINE_DEV:3452(void) printf_color(ANSI_YELLOW, gettext("One or more devices "3453"are offlined.\n"));3454break;34553456case ZPOOL_STATUS_CORRUPT_POOL:3457(void) printf_color(ANSI_YELLOW, gettext("The pool metadata is "3458"corrupted.\n"));3459break;34603461case ZPOOL_STATUS_VERSION_OLDER:3462(void) printf_color(ANSI_YELLOW, gettext("The pool is "3463"formatted using a legacy on-disk version.\n"));3464break;34653466case ZPOOL_STATUS_VERSION_NEWER:3467(void) printf_color(ANSI_YELLOW, gettext("The pool is "3468"formatted using an incompatible version.\n"));3469break;34703471case ZPOOL_STATUS_FEAT_DISABLED:3472(void) printf_color(ANSI_YELLOW, gettext("Some supported "3473"features are not enabled on the pool.\n"3474"\t%s(Note that they may be intentionally disabled if the\n"3475"\t%s'compatibility' property is set.)\n"), indent, indent);3476break;34773478case ZPOOL_STATUS_COMPATIBILITY_ERR:3479(void) printf_color(ANSI_YELLOW, gettext("Error reading or "3480"parsing the file(s) indicated by the 'compatibility'\n"3481"\t%sproperty.\n"), indent);3482break;34833484case ZPOOL_STATUS_INCOMPATIBLE_FEAT:3485(void) printf_color(ANSI_YELLOW, gettext("One or more features "3486"are enabled on the pool despite not being\n"3487"\t%srequested by the 'compatibility' property.\n"),3488indent);3489break;34903491case ZPOOL_STATUS_UNSUP_FEAT_READ:3492(void) printf_color(ANSI_YELLOW, gettext("The pool uses the "3493"following feature(s) not supported on this system:\n"));3494color_start(ANSI_YELLOW);3495zpool_collect_unsup_feat(config, buf, 2048);3496(void) printf("%s", buf);3497color_end();3498break;34993500case ZPOOL_STATUS_UNSUP_FEAT_WRITE:3501(void) printf_color(ANSI_YELLOW, gettext("The pool can only be "3502"accessed in read-only mode on this system. It\n"3503"\t%scannot be accessed in read-write mode because it uses "3504"the following\n"3505"\t%sfeature(s) not supported on this system:\n"),3506indent, indent);3507color_start(ANSI_YELLOW);3508zpool_collect_unsup_feat(config, buf, 2048);3509(void) printf("%s", buf);3510color_end();3511break;35123513case ZPOOL_STATUS_HOSTID_ACTIVE:3514(void) printf_color(ANSI_YELLOW, gettext("The pool is "3515"currently imported by another system.\n"));3516break;35173518case ZPOOL_STATUS_HOSTID_REQUIRED:3519(void) printf_color(ANSI_YELLOW, gettext("The pool has the "3520"multihost property on. It cannot\n"3521"\t%sbe safely imported when the system hostid is not "3522"set.\n"), indent);3523break;35243525case ZPOOL_STATUS_HOSTID_MISMATCH:3526(void) printf_color(ANSI_YELLOW, gettext("The pool was last "3527"accessed by another system.\n"));3528break;35293530case ZPOOL_STATUS_FAULTED_DEV_R:3531case ZPOOL_STATUS_FAULTED_DEV_NR:3532(void) printf_color(ANSI_YELLOW, gettext("One or more devices "3533"are faulted.\n"));3534break;35353536case ZPOOL_STATUS_BAD_LOG:3537(void) printf_color(ANSI_YELLOW, gettext("An intent log record "3538"cannot be read.\n"));3539break;35403541case ZPOOL_STATUS_RESILVERING:3542case ZPOOL_STATUS_REBUILDING:3543(void) printf_color(ANSI_YELLOW, gettext("One or more devices "3544"were being resilvered.\n"));3545break;35463547case ZPOOL_STATUS_ERRATA:3548(void) printf_color(ANSI_YELLOW,3549gettext("Errata #%d detected.\n"),3550errata);3551break;35523553case ZPOOL_STATUS_NON_NATIVE_ASHIFT:3554(void) printf_color(ANSI_YELLOW, gettext("One or more devices "3555"are configured to use a non-native block size.\n"3556"\t%sExpect reduced performance.\n"), indent);3557break;35583559default:3560/*3561* No other status can be seen when importing pools.3562*/3563assert(reason == ZPOOL_STATUS_OK);3564}35653566/*3567* Print out an action according to the overall state of the pool.3568*/3569if (vs->vs_state != VDEV_STATE_HEALTHY ||3570reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {3571(void) printf("%s", indent);3572(void) printf(gettext("action: "));3573}3574if (vs->vs_state == VDEV_STATE_HEALTHY) {3575if (reason == ZPOOL_STATUS_VERSION_OLDER ||3576reason == ZPOOL_STATUS_FEAT_DISABLED) {3577(void) printf(gettext("The pool can be imported using "3578"its name or numeric identifier, though\n"3579"\t%ssome features will not be available without "3580"an explicit 'zpool upgrade'.\n"), indent);3581} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {3582(void) printf(gettext("The pool can be imported using "3583"its name or numeric\n"3584"\t%sidentifier, though the file(s) indicated by "3585"its 'compatibility'\n"3586"\t%sproperty cannot be parsed at this time.\n"),3587indent, indent);3588} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {3589(void) printf(gettext("The pool can be imported using "3590"its name or numeric identifier and\n"3591"\t%sthe '-f' flag.\n"), indent);3592} else if (reason == ZPOOL_STATUS_ERRATA) {3593switch (errata) {3594case ZPOOL_ERRATA_ZOL_2094_SCRUB:3595(void) printf(gettext("The pool can be "3596"imported using its name or numeric "3597"identifier,\n"3598"\t%showever there is a compatibility "3599"issue which should be corrected\n"3600"\t%sby running 'zpool scrub'\n"),3601indent, indent);3602break;36033604case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:3605(void) printf(gettext("The pool cannot be "3606"imported with this version of ZFS due to\n"3607"\t%san active asynchronous destroy. "3608"Revert to an earlier version\n"3609"\t%sand allow the destroy to complete "3610"before updating.\n"), indent, indent);3611break;36123613case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:3614(void) printf(gettext("Existing encrypted "3615"datasets contain an on-disk "3616"incompatibility, which\n"3617"\t%sneeds to be corrected. Backup these "3618"datasets to new encrypted datasets\n"3619"\t%sand destroy the old ones.\n"),3620indent, indent);3621break;36223623case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:3624(void) printf(gettext("Existing encrypted "3625"snapshots and bookmarks contain an "3626"on-disk\n"3627"\t%sincompatibility. This may cause "3628"on-disk corruption if they are used\n"3629"\t%swith 'zfs recv'. To correct the "3630"issue, enable the bookmark_v2 feature.\n"3631"\t%sNo additional action is needed if "3632"there are no encrypted snapshots or\n"3633"\t%sbookmarks. If preserving the "3634"encrypted snapshots and bookmarks is\n"3635"\t%srequired, use a non-raw send to "3636"backup and restore them. Alternately,\n"3637"\t%sthey may be removed to resolve the "3638"incompatibility.\n"), indent, indent,3639indent, indent, indent, indent);3640break;3641default:3642/*3643* All errata must contain an action message.3644*/3645assert(errata == ZPOOL_ERRATA_NONE);3646}3647} else {3648(void) printf(gettext("The pool can be imported using "3649"its name or numeric identifier.\n"));3650}3651} else if (vs->vs_state == VDEV_STATE_DEGRADED) {3652(void) printf(gettext("The pool can be imported despite "3653"missing or damaged devices. The\n"3654"\t%sfault tolerance of the pool may be compromised if "3655"imported.\n"), indent);3656} else {3657switch (reason) {3658case ZPOOL_STATUS_VERSION_NEWER:3659(void) printf(gettext("The pool cannot be imported. "3660"Access the pool on a system running newer\n"3661"\t%ssoftware, or recreate the pool from "3662"backup.\n"), indent);3663break;3664case ZPOOL_STATUS_UNSUP_FEAT_READ:3665(void) printf(gettext("The pool cannot be imported. "3666"Access the pool on a system that supports\n"3667"\t%sthe required feature(s), or recreate the pool "3668"from backup.\n"), indent);3669break;3670case ZPOOL_STATUS_UNSUP_FEAT_WRITE:3671(void) printf(gettext("The pool cannot be imported in "3672"read-write mode. Import the pool with\n"3673"\t%s'-o readonly=on', access the pool on a system "3674"that supports the\n"3675"\t%srequired feature(s), or recreate the pool "3676"from backup.\n"), indent, indent);3677break;3678case ZPOOL_STATUS_MISSING_DEV_R:3679case ZPOOL_STATUS_MISSING_DEV_NR:3680case ZPOOL_STATUS_BAD_GUID_SUM:3681(void) printf(gettext("The pool cannot be imported. "3682"Attach the missing\n"3683"\t%sdevices and try again.\n"), indent);3684break;3685case ZPOOL_STATUS_HOSTID_ACTIVE:3686VERIFY0(nvlist_lookup_nvlist(config,3687ZPOOL_CONFIG_LOAD_INFO, &nvinfo));36883689if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))3690hostname = fnvlist_lookup_string(nvinfo,3691ZPOOL_CONFIG_MMP_HOSTNAME);36923693if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))3694hostid = fnvlist_lookup_uint64(nvinfo,3695ZPOOL_CONFIG_MMP_HOSTID);36963697(void) printf(gettext("The pool must be exported from "3698"%s (hostid=%"PRIx64")\n"3699"\t%sbefore it can be safely imported.\n"),3700hostname, hostid, indent);3701break;3702case ZPOOL_STATUS_HOSTID_REQUIRED:3703(void) printf(gettext("Set a unique system hostid with "3704"the zgenhostid(8) command.\n"));3705break;3706default:3707(void) printf(gettext("The pool cannot be imported due "3708"to damaged devices or data.\n"));3709}3710}37113712/* Print the comment attached to the pool. */3713if (comment != NULL)3714(void) printf(gettext("comment: %s\n"), comment);37153716/*3717* If the state is "closed" or "can't open", and the aux state3718* is "corrupt data":3719*/3720if ((vs->vs_state == VDEV_STATE_CLOSED ||3721vs->vs_state == VDEV_STATE_CANT_OPEN) &&3722vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {3723if (pool_state == POOL_STATE_DESTROYED)3724(void) printf(gettext("\t%sThe pool was destroyed, "3725"but can be imported using the '-Df' flags.\n"),3726indent);3727else if (pool_state != POOL_STATE_EXPORTED)3728(void) printf(gettext("\t%sThe pool may be active on "3729"another system, but can be imported using\n"3730"\t%sthe '-f' flag.\n"), indent, indent);3731}37323733if (msgid != NULL) {3734(void) printf(gettext("%s see: "3735"https://openzfs.github.io/openzfs-docs/msg/%s\n"),3736indent, msgid);3737}37383739(void) printf(gettext("%sconfig:\n\n"), indent);37403741cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),3742VDEV_NAME_TYPE_ID);3743if (cb.cb_namewidth < 10)3744cb.cb_namewidth = 10;37453746print_import_config(&cb, name, nvroot, 0);37473748print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);3749print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);3750print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);37513752if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {3753(void) printf(gettext("\n\t%sAdditional devices are known to "3754"be part of this pool, though their\n"3755"\t%sexact configuration cannot be determined.\n"),3756indent, indent);3757}3758return (0);3759}37603761static boolean_t3762zfs_force_import_required(nvlist_t *config)3763{3764uint64_t state;3765uint64_t hostid = 0;3766nvlist_t *nvinfo;37673768state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);3769nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);37703771/*3772* The hostid on LOAD_INFO comes from the MOS label via3773* spa_tryimport(). If its not there then we're likely talking to an3774* older kernel, so use the top one, which will be from the label3775* discovered in zpool_find_import(), or if a cachefile is in use, the3776* local hostid.3777*/3778if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)3779(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,3780&hostid);37813782if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())3783return (B_TRUE);37843785if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {3786mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,3787ZPOOL_CONFIG_MMP_STATE);37883789if (mmp_state != MMP_STATE_INACTIVE)3790return (B_TRUE);3791}37923793return (B_FALSE);3794}37953796/*3797* Perform the import for the given configuration. This passes the heavy3798* lifting off to zpool_import_props(), and then mounts the datasets contained3799* within the pool.3800*/3801static int3802do_import(nvlist_t *config, const char *newname, const char *mntopts,3803nvlist_t *props, int flags, uint_t mntthreads)3804{3805int ret = 0;3806int ms_status = 0;3807zpool_handle_t *zhp;3808const char *name;3809uint64_t version;38103811name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);3812version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);38133814if (!SPA_VERSION_IS_SUPPORTED(version)) {3815(void) fprintf(stderr, gettext("cannot import '%s': pool "3816"is formatted using an unsupported ZFS version\n"), name);3817return (1);3818} else if (zfs_force_import_required(config) &&3819!(flags & ZFS_IMPORT_ANY_HOST)) {3820mmp_state_t mmp_state = MMP_STATE_INACTIVE;3821nvlist_t *nvinfo;38223823nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);3824if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))3825mmp_state = fnvlist_lookup_uint64(nvinfo,3826ZPOOL_CONFIG_MMP_STATE);38273828if (mmp_state == MMP_STATE_ACTIVE) {3829const char *hostname = "<unknown>";3830uint64_t hostid = 0;38313832if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))3833hostname = fnvlist_lookup_string(nvinfo,3834ZPOOL_CONFIG_MMP_HOSTNAME);38353836if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))3837hostid = fnvlist_lookup_uint64(nvinfo,3838ZPOOL_CONFIG_MMP_HOSTID);38393840(void) fprintf(stderr, gettext("cannot import '%s': "3841"pool is imported on %s (hostid: "3842"0x%"PRIx64")\nExport the pool on the other "3843"system, then run 'zpool import'.\n"),3844name, hostname, hostid);3845} else if (mmp_state == MMP_STATE_NO_HOSTID) {3846(void) fprintf(stderr, gettext("Cannot import '%s': "3847"pool has the multihost property on and the\n"3848"system's hostid is not set. Set a unique hostid "3849"with the zgenhostid(8) command.\n"), name);3850} else {3851const char *hostname = "<unknown>";3852time_t timestamp = 0;3853uint64_t hostid = 0;38543855if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))3856hostname = fnvlist_lookup_string(nvinfo,3857ZPOOL_CONFIG_HOSTNAME);3858else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))3859hostname = fnvlist_lookup_string(config,3860ZPOOL_CONFIG_HOSTNAME);38613862if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))3863timestamp = fnvlist_lookup_uint64(config,3864ZPOOL_CONFIG_TIMESTAMP);38653866if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))3867hostid = fnvlist_lookup_uint64(nvinfo,3868ZPOOL_CONFIG_HOSTID);3869else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))3870hostid = fnvlist_lookup_uint64(config,3871ZPOOL_CONFIG_HOSTID);38723873(void) fprintf(stderr, gettext("cannot import '%s': "3874"pool was previously in use from another system.\n"3875"Last accessed by %s (hostid=%"PRIx64") at %s"3876"The pool can be imported, use 'zpool import -f' "3877"to import the pool.\n"), name, hostname,3878hostid, ctime(×tamp));3879}38803881return (1);3882}38833884if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)3885return (1);38863887if (newname != NULL)3888name = newname;38893890if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)3891return (1);38923893/*3894* Loading keys is best effort. We don't want to return immediately3895* if it fails but we do want to give the error to the caller.3896*/3897if (flags & ZFS_IMPORT_LOAD_KEYS &&3898zfs_crypto_attempt_load_keys(g_zfs, name) != 0)3899ret = 1;39003901if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&3902!(flags & ZFS_IMPORT_ONLY)) {3903ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);3904if (ms_status == EZFS_SHAREFAILED) {3905(void) fprintf(stderr, gettext("Import was "3906"successful, but unable to share some datasets\n"));3907} else if (ms_status == EZFS_MOUNTFAILED) {3908(void) fprintf(stderr, gettext("Import was "3909"successful, but unable to mount some datasets\n"));3910}3911}39123913zpool_close(zhp);3914return (ret);3915}39163917typedef struct import_parameters {3918nvlist_t *ip_config;3919const char *ip_mntopts;3920nvlist_t *ip_props;3921int ip_flags;3922uint_t ip_mntthreads;3923int *ip_err;3924} import_parameters_t;39253926static void3927do_import_task(void *arg)3928{3929import_parameters_t *ip = arg;3930*ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,3931ip->ip_props, ip->ip_flags, ip->ip_mntthreads);3932free(ip);3933}393439353936static int3937import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,3938char *orig_name, char *new_name, importargs_t *import)3939{3940nvlist_t *config = NULL;3941nvlist_t *found_config = NULL;3942uint64_t pool_state;3943boolean_t pool_specified = (import->poolname != NULL ||3944import->guid != 0);3945uint_t npools = 0;394639473948taskq_t *tq = NULL;3949if (import->do_all) {3950tq = taskq_create("zpool_import_all",39515 * sysconf(_SC_NPROCESSORS_ONLN), minclsyspri, 1, INT_MAX,3952TASKQ_DYNAMIC);3953}39543955/*3956* At this point we have a list of import candidate configs. Even if3957* we were searching by pool name or guid, we still need to3958* post-process the list to deal with pool state and possible3959* duplicate names.3960*/3961int err = 0;3962nvpair_t *elem = NULL;3963boolean_t first = B_TRUE;3964if (!pool_specified && import->do_all) {3965while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)3966npools++;3967}3968while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {39693970verify(nvpair_value_nvlist(elem, &config) == 0);39713972verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,3973&pool_state) == 0);3974if (!import->do_destroyed &&3975pool_state == POOL_STATE_DESTROYED)3976continue;3977if (import->do_destroyed &&3978pool_state != POOL_STATE_DESTROYED)3979continue;39803981verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,3982import->policy) == 0);39833984if (!pool_specified) {3985if (first)3986first = B_FALSE;3987else if (!import->do_all)3988(void) fputc('\n', stdout);39893990if (import->do_all) {3991import_parameters_t *ip = safe_malloc(3992sizeof (import_parameters_t));39933994ip->ip_config = config;3995ip->ip_mntopts = mntopts;3996ip->ip_props = props;3997ip->ip_flags = flags;3998ip->ip_mntthreads = mount_tp_nthr / npools;3999ip->ip_err = &err;40004001(void) taskq_dispatch(tq, do_import_task,4002(void *)ip, TQ_SLEEP);4003} else {4004/*4005* If we're importing from cachefile, then4006* we don't want to report errors until we4007* are in the scan phase of the import. If4008* we get an error, then we return that error4009* to invoke the scan phase.4010*/4011if (import->cachefile && !import->scan)4012err = show_import(config, B_FALSE);4013else4014(void) show_import(config, B_TRUE);4015}4016} else if (import->poolname != NULL) {4017const char *name;40184019/*4020* We are searching for a pool based on name.4021*/4022verify(nvlist_lookup_string(config,4023ZPOOL_CONFIG_POOL_NAME, &name) == 0);40244025if (strcmp(name, import->poolname) == 0) {4026if (found_config != NULL) {4027(void) fprintf(stderr, gettext(4028"cannot import '%s': more than "4029"one matching pool\n"),4030import->poolname);4031(void) fprintf(stderr, gettext(4032"import by numeric ID instead\n"));4033err = B_TRUE;4034}4035found_config = config;4036}4037} else {4038uint64_t guid;40394040/*4041* Search for a pool by guid.4042*/4043verify(nvlist_lookup_uint64(config,4044ZPOOL_CONFIG_POOL_GUID, &guid) == 0);40454046if (guid == import->guid)4047found_config = config;4048}4049}4050if (import->do_all) {4051taskq_wait(tq);4052taskq_destroy(tq);4053}40544055/*4056* If we were searching for a specific pool, verify that we found a4057* pool, and then do the import.4058*/4059if (pool_specified && err == 0) {4060if (found_config == NULL) {4061(void) fprintf(stderr, gettext("cannot import '%s': "4062"no such pool available\n"), orig_name);4063err = B_TRUE;4064} else {4065err |= do_import(found_config, new_name,4066mntopts, props, flags, mount_tp_nthr);4067}4068}40694070/*4071* If we were just looking for pools, report an error if none were4072* found.4073*/4074if (!pool_specified && first)4075(void) fprintf(stderr,4076gettext("no pools available to import\n"));4077return (err);4078}40794080typedef struct target_exists_args {4081const char *poolname;4082uint64_t poolguid;4083} target_exists_args_t;40844085static int4086name_or_guid_exists(zpool_handle_t *zhp, void *data)4087{4088target_exists_args_t *args = data;4089nvlist_t *config = zpool_get_config(zhp, NULL);4090int found = 0;40914092if (config == NULL)4093return (0);40944095if (args->poolname != NULL) {4096const char *pool_name;40974098verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,4099&pool_name) == 0);4100if (strcmp(pool_name, args->poolname) == 0)4101found = 1;4102} else {4103uint64_t pool_guid;41044105verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,4106&pool_guid) == 0);4107if (pool_guid == args->poolguid)4108found = 1;4109}4110zpool_close(zhp);41114112return (found);4113}4114/*4115* zpool checkpoint <pool>4116* checkpoint --discard <pool>4117*4118* -d Discard the checkpoint from a checkpointed4119* --discard pool.4120*4121* -w Wait for discarding a checkpoint to complete.4122* --wait4123*4124* Checkpoints the specified pool, by taking a "snapshot" of its4125* current state. A pool can only have one checkpoint at a time.4126*/4127int4128zpool_do_checkpoint(int argc, char **argv)4129{4130boolean_t discard, wait;4131char *pool;4132zpool_handle_t *zhp;4133int c, err;41344135struct option long_options[] = {4136{"discard", no_argument, NULL, 'd'},4137{"wait", no_argument, NULL, 'w'},4138{0, 0, 0, 0}4139};41404141discard = B_FALSE;4142wait = B_FALSE;4143while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {4144switch (c) {4145case 'd':4146discard = B_TRUE;4147break;4148case 'w':4149wait = B_TRUE;4150break;4151case '?':4152(void) fprintf(stderr, gettext("invalid option '%c'\n"),4153optopt);4154usage(B_FALSE);4155}4156}41574158if (wait && !discard) {4159(void) fprintf(stderr, gettext("--wait only valid when "4160"--discard also specified\n"));4161usage(B_FALSE);4162}41634164argc -= optind;4165argv += optind;41664167if (argc < 1) {4168(void) fprintf(stderr, gettext("missing pool argument\n"));4169usage(B_FALSE);4170}41714172if (argc > 1) {4173(void) fprintf(stderr, gettext("too many arguments\n"));4174usage(B_FALSE);4175}41764177pool = argv[0];41784179if ((zhp = zpool_open(g_zfs, pool)) == NULL) {4180/* As a special case, check for use of '/' in the name */4181if (strchr(pool, '/') != NULL)4182(void) fprintf(stderr, gettext("'zpool checkpoint' "4183"doesn't work on datasets. To save the state "4184"of a dataset from a specific point in time "4185"please use 'zfs snapshot'\n"));4186return (1);4187}41884189if (discard) {4190err = (zpool_discard_checkpoint(zhp) != 0);4191if (err == 0 && wait)4192err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);4193} else {4194err = (zpool_checkpoint(zhp) != 0);4195}41964197zpool_close(zhp);41984199return (err);4200}42014202#define CHECKPOINT_OPT 102442034204/*4205* zpool prefetch [-t <type>] <pool>4206*4207* Prefetchs a particular type of data in the specified pool.4208*/4209int4210zpool_do_prefetch(int argc, char **argv)4211{4212int c;4213char *poolname;4214char *typestr = NULL;4215zpool_prefetch_type_t type;4216zpool_handle_t *zhp;4217int err = 0;42184219while ((c = getopt(argc, argv, "t:")) != -1) {4220switch (c) {4221case 't':4222typestr = optarg;4223break;4224case ':':4225(void) fprintf(stderr, gettext("missing argument for "4226"'%c' option\n"), optopt);4227usage(B_FALSE);4228break;4229case '?':4230(void) fprintf(stderr, gettext("invalid option '%c'\n"),4231optopt);4232usage(B_FALSE);4233}4234}4235argc -= optind;4236argv += optind;42374238if (argc < 1) {4239(void) fprintf(stderr, gettext("missing pool name argument\n"));4240usage(B_FALSE);4241}42424243if (argc > 1) {4244(void) fprintf(stderr, gettext("too many arguments\n"));4245usage(B_FALSE);4246}42474248poolname = argv[0];42494250if ((zhp = zpool_open(g_zfs, poolname)) == NULL)4251return (1);42524253if (typestr == NULL) {4254/* Prefetch all types */4255err = zpool_prefetch(zhp, ZPOOL_PREFETCH_DDT);4256if (err == 0)4257err = zpool_prefetch(zhp, ZPOOL_PREFETCH_BRT);4258} else {4259if (strcmp(typestr, "ddt") == 0) {4260type = ZPOOL_PREFETCH_DDT;4261} else if (strcmp(typestr, "brt") == 0) {4262type = ZPOOL_PREFETCH_BRT;4263} else {4264(void) fprintf(stderr,4265gettext("unsupported prefetch type\n"));4266zpool_close(zhp);4267usage(B_FALSE);4268}4269err = zpool_prefetch(zhp, type);4270}42714272zpool_close(zhp);42734274return (err);4275}42764277/*4278* zpool import [-d dir] [-D]4279* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]4280* [-d dir | -c cachefile | -s] [-f] -a4281* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]4282* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>4283* [newpool]4284*4285* -c Read pool information from a cachefile instead of searching4286* devices. If importing from a cachefile config fails, then4287* fallback to searching for devices only in the directories that4288* exist in the cachefile.4289*4290* -d Scan in a specific directory, other than /dev/. More than4291* one directory can be specified using multiple '-d' options.4292*4293* -D Scan for previously destroyed pools or import all or only4294* specified destroyed pools.4295*4296* -R Temporarily import the pool, with all mountpoints relative to4297* the given root. The pool will remain exported when the machine4298* is rebooted.4299*4300* -V Import even in the presence of faulted vdevs. This is an4301* intentionally undocumented option for testing purposes, and4302* treats the pool configuration as complete, leaving any bad4303* vdevs in the FAULTED state. In other words, it does verbatim4304* import.4305*4306* -f Force import, even if it appears that the pool is active.4307*4308* -F Attempt rewind if necessary.4309*4310* -n See if rewind would work, but don't actually rewind.4311*4312* -N Import the pool but don't mount datasets.4313*4314* -T Specify a starting txg to use for import. This option is4315* intentionally undocumented option for testing purposes.4316*4317* -a Import all pools found.4318*4319* -l Load encryption keys while importing.4320*4321* -o Set property=value and/or temporary mount options (without '=').4322*4323* -s Scan using the default search path, the libblkid cache will4324* not be consulted.4325*4326* --rewind-to-checkpoint4327* Import the pool and revert back to the checkpoint.4328*4329* The import command scans for pools to import, and import pools based on pool4330* name and GUID. The pool can also be renamed as part of the import process.4331*/4332int4333zpool_do_import(int argc, char **argv)4334{4335char **searchdirs = NULL;4336char *env, *envdup = NULL;4337int nsearch = 0;4338int c;4339int err = 0;4340nvlist_t *pools = NULL;4341boolean_t do_all = B_FALSE;4342boolean_t do_destroyed = B_FALSE;4343char *mntopts = NULL;4344uint64_t searchguid = 0;4345char *searchname = NULL;4346char *propval;4347nvlist_t *policy = NULL;4348nvlist_t *props = NULL;4349int flags = ZFS_IMPORT_NORMAL;4350uint32_t rewind_policy = ZPOOL_NO_REWIND;4351boolean_t dryrun = B_FALSE;4352boolean_t do_rewind = B_FALSE;4353boolean_t xtreme_rewind = B_FALSE;4354boolean_t do_scan = B_FALSE;4355boolean_t pool_exists = B_FALSE;4356uint64_t txg = -1ULL;4357char *cachefile = NULL;4358importargs_t idata = { 0 };4359char *endptr;43604361struct option long_options[] = {4362{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},4363{0, 0, 0, 0}4364};43654366/* check options */4367while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",4368long_options, NULL)) != -1) {4369switch (c) {4370case 'a':4371do_all = B_TRUE;4372break;4373case 'c':4374cachefile = optarg;4375break;4376case 'd':4377searchdirs = safe_realloc(searchdirs,4378(nsearch + 1) * sizeof (char *));4379searchdirs[nsearch++] = optarg;4380break;4381case 'D':4382do_destroyed = B_TRUE;4383break;4384case 'f':4385flags |= ZFS_IMPORT_ANY_HOST;4386break;4387case 'F':4388do_rewind = B_TRUE;4389break;4390case 'l':4391flags |= ZFS_IMPORT_LOAD_KEYS;4392break;4393case 'm':4394flags |= ZFS_IMPORT_MISSING_LOG;4395break;4396case 'n':4397dryrun = B_TRUE;4398break;4399case 'N':4400flags |= ZFS_IMPORT_ONLY;4401break;4402case 'o':4403if ((propval = strchr(optarg, '=')) != NULL) {4404*propval = '\0';4405propval++;4406if (add_prop_list(optarg, propval,4407&props, B_TRUE))4408goto error;4409} else {4410mntopts = optarg;4411}4412break;4413case 'R':4414if (add_prop_list(zpool_prop_to_name(4415ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))4416goto error;4417if (add_prop_list_default(zpool_prop_to_name(4418ZPOOL_PROP_CACHEFILE), "none", &props))4419goto error;4420break;4421case 's':4422do_scan = B_TRUE;4423break;4424case 't':4425flags |= ZFS_IMPORT_TEMP_NAME;4426if (add_prop_list_default(zpool_prop_to_name(4427ZPOOL_PROP_CACHEFILE), "none", &props))4428goto error;4429break;44304431case 'T':4432errno = 0;4433txg = strtoull(optarg, &endptr, 0);4434if (errno != 0 || *endptr != '\0') {4435(void) fprintf(stderr,4436gettext("invalid txg value\n"));4437usage(B_FALSE);4438}4439rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;4440break;4441case 'V':4442flags |= ZFS_IMPORT_VERBATIM;4443break;4444case 'X':4445xtreme_rewind = B_TRUE;4446break;4447case CHECKPOINT_OPT:4448flags |= ZFS_IMPORT_CHECKPOINT;4449break;4450case ':':4451(void) fprintf(stderr, gettext("missing argument for "4452"'%c' option\n"), optopt);4453usage(B_FALSE);4454break;4455case '?':4456(void) fprintf(stderr, gettext("invalid option '%c'\n"),4457optopt);4458usage(B_FALSE);4459}4460}44614462argc -= optind;4463argv += optind;44644465if (cachefile && nsearch != 0) {4466(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));4467usage(B_FALSE);4468}44694470if (cachefile && do_scan) {4471(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));4472usage(B_FALSE);4473}44744475if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {4476(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));4477usage(B_FALSE);4478}44794480if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {4481(void) fprintf(stderr, gettext("-l is only meaningful during "4482"an import\n"));4483usage(B_FALSE);4484}44854486if ((dryrun || xtreme_rewind) && !do_rewind) {4487(void) fprintf(stderr,4488gettext("-n or -X only meaningful with -F\n"));4489usage(B_FALSE);4490}4491if (dryrun)4492rewind_policy = ZPOOL_TRY_REWIND;4493else if (do_rewind)4494rewind_policy = ZPOOL_DO_REWIND;4495if (xtreme_rewind)4496rewind_policy |= ZPOOL_EXTREME_REWIND;44974498/* In the future, we can capture further policy and include it here */4499if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||4500nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||4501nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,4502rewind_policy) != 0)4503goto error;45044505/* check argument count */4506if (do_all) {4507if (argc != 0) {4508(void) fprintf(stderr, gettext("too many arguments\n"));4509usage(B_FALSE);4510}4511} else {4512if (argc > 2) {4513(void) fprintf(stderr, gettext("too many arguments\n"));4514usage(B_FALSE);4515}4516}45174518/*4519* Check for the effective uid. We do this explicitly here because4520* otherwise any attempt to discover pools will silently fail.4521*/4522if (argc == 0 && geteuid() != 0) {4523(void) fprintf(stderr, gettext("cannot "4524"discover pools: permission denied\n"));45254526free(searchdirs);4527nvlist_free(props);4528nvlist_free(policy);4529return (1);4530}45314532/*4533* Depending on the arguments given, we do one of the following:4534*4535* <none> Iterate through all pools and display information about4536* each one.4537*4538* -a Iterate through all pools and try to import each one.4539*4540* <id> Find the pool that corresponds to the given GUID/pool4541* name and import that one.4542*4543* -D Above options applies only to destroyed pools.4544*/4545if (argc != 0) {4546char *endptr;45474548errno = 0;4549searchguid = strtoull(argv[0], &endptr, 10);4550if (errno != 0 || *endptr != '\0') {4551searchname = argv[0];4552searchguid = 0;4553}45544555/*4556* User specified a name or guid. Ensure it's unique.4557*/4558target_exists_args_t search = {searchname, searchguid};4559pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);4560}45614562/*4563* Check the environment for the preferred search path.4564*/4565if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {4566char *dir, *tmp = NULL;45674568envdup = strdup(env);45694570for (dir = strtok_r(envdup, ":", &tmp);4571dir != NULL;4572dir = strtok_r(NULL, ":", &tmp)) {4573searchdirs = safe_realloc(searchdirs,4574(nsearch + 1) * sizeof (char *));4575searchdirs[nsearch++] = dir;4576}4577}45784579idata.path = searchdirs;4580idata.paths = nsearch;4581idata.poolname = searchname;4582idata.guid = searchguid;4583idata.cachefile = cachefile;4584idata.scan = do_scan;4585idata.policy = policy;4586idata.do_destroyed = do_destroyed;4587idata.do_all = do_all;45884589libpc_handle_t lpch = {4590.lpc_lib_handle = g_zfs,4591.lpc_ops = &libzfs_config_ops,4592.lpc_printerr = B_TRUE4593};4594pools = zpool_search_import(&lpch, &idata);45954596if (pools != NULL && pool_exists &&4597(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {4598(void) fprintf(stderr, gettext("cannot import '%s': "4599"a pool with that name already exists\n"),4600argv[0]);4601(void) fprintf(stderr, gettext("use the form '%s "4602"<pool | id> <newpool>' to give it a new name\n"),4603"zpool import");4604err = 1;4605} else if (pools == NULL && pool_exists) {4606(void) fprintf(stderr, gettext("cannot import '%s': "4607"a pool with that name is already created/imported,\n"),4608argv[0]);4609(void) fprintf(stderr, gettext("and no additional pools "4610"with that name were found\n"));4611err = 1;4612} else if (pools == NULL) {4613if (argc != 0) {4614(void) fprintf(stderr, gettext("cannot import '%s': "4615"no such pool available\n"), argv[0]);4616}4617err = 1;4618}46194620if (err == 1) {4621free(searchdirs);4622free(envdup);4623nvlist_free(policy);4624nvlist_free(pools);4625nvlist_free(props);4626return (1);4627}46284629err = import_pools(pools, props, mntopts, flags,4630argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);46314632/*4633* If we're using the cachefile and we failed to import, then4634* fallback to scanning the directory for pools that match4635* those in the cachefile.4636*/4637if (err != 0 && cachefile != NULL) {4638(void) printf(gettext("cachefile import failed, retrying\n"));46394640/*4641* We use the scan flag to gather the directories that exist4642* in the cachefile. If we need to fallback to searching for4643* the pool config, we will only search devices in these4644* directories.4645*/4646idata.scan = B_TRUE;4647nvlist_free(pools);4648pools = zpool_search_import(&lpch, &idata);46494650err = import_pools(pools, props, mntopts, flags,4651argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,4652&idata);4653}46544655error:4656nvlist_free(props);4657nvlist_free(pools);4658nvlist_free(policy);4659free(searchdirs);4660free(envdup);46614662return (err ? 1 : 0);4663}46644665/*4666* zpool sync [-f] [pool] ...4667*4668* -f (undocumented) force uberblock (and config including zpool cache file)4669* update.4670*4671* Sync the specified pool(s).4672* Without arguments "zpool sync" will sync all pools.4673* This command initiates TXG sync(s) and will return after the TXG(s) commit.4674*4675*/4676static int4677zpool_do_sync(int argc, char **argv)4678{4679int ret;4680boolean_t force = B_FALSE;46814682/* check options */4683while ((ret = getopt(argc, argv, "f")) != -1) {4684switch (ret) {4685case 'f':4686force = B_TRUE;4687break;4688case '?':4689(void) fprintf(stderr, gettext("invalid option '%c'\n"),4690optopt);4691usage(B_FALSE);4692}4693}46944695argc -= optind;4696argv += optind;46974698/* if argc == 0 we will execute zpool_sync_one on all pools */4699ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,4700B_FALSE, zpool_sync_one, &force);47014702return (ret);4703}47044705typedef struct iostat_cbdata {4706uint64_t cb_flags;4707int cb_namewidth;4708int cb_iteration;4709boolean_t cb_verbose;4710boolean_t cb_literal;4711boolean_t cb_scripted;4712zpool_list_t *cb_list;4713vdev_cmd_data_list_t *vcdl;4714vdev_cbdata_t cb_vdevs;4715} iostat_cbdata_t;47164717/* iostat labels */4718typedef struct name_and_columns {4719const char *name; /* Column name */4720unsigned int columns; /* Center name to this number of columns */4721} name_and_columns_t;47224723#define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */47244725static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =4726{4727[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},4728{NULL}},4729[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},4730{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},4731{NULL}},4732[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},4733{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},4734{"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},4735[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},4736{"asyncq_wait", 2}, {NULL}},4737[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},4738{"async_read", 2}, {"async_write", 2}, {"scrub", 2},4739{"trim", 2}, {"rebuild", 2}, {NULL}},4740};47414742/* Shorthand - if "columns" field not set, default to 1 column */4743static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =4744{4745[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},4746{"write"}, {NULL}},4747[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},4748{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},4749{NULL}},4750[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},4751{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},4752{"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},4753[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},4754{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},4755{NULL}},4756[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},4757{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},4758{"ind"}, {"agg"}, {NULL}},4759};47604761static const char *histo_to_title[] = {4762[IOS_L_HISTO] = "latency",4763[IOS_RQ_HISTO] = "req_size",4764};47654766/*4767* Return the number of labels in a null-terminated name_and_columns_t4768* array.4769*4770*/4771static unsigned int4772label_array_len(const name_and_columns_t *labels)4773{4774int i = 0;47754776while (labels[i].name)4777i++;47784779return (i);4780}47814782/*4783* Return the number of strings in a null-terminated string array.4784* For example:4785*4786* const char foo[] = {"bar", "baz", NULL}4787*4788* returns 24789*/4790static uint64_t4791str_array_len(const char *array[])4792{4793uint64_t i = 0;4794while (array[i])4795i++;47964797return (i);4798}479948004801/*4802* Return a default column width for default/latency/queue columns. This does4803* not include histograms, which have their columns autosized.4804*/4805static unsigned int4806default_column_width(iostat_cbdata_t *cb, enum iostat_type type)4807{4808unsigned long column_width = 5; /* Normal niceprint */4809static unsigned long widths[] = {4810/*4811* Choose some sane default column sizes for printing the4812* raw numbers.4813*/4814[IOS_DEFAULT] = 15, /* 1PB capacity */4815[IOS_LATENCY] = 10, /* 1B ns = 10sec */4816[IOS_QUEUES] = 6, /* 1M queue entries */4817[IOS_L_HISTO] = 10, /* 1B ns = 10sec */4818[IOS_RQ_HISTO] = 6, /* 1M queue entries */4819};48204821if (cb->cb_literal)4822column_width = widths[type];48234824return (column_width);4825}48264827/*4828* Print the column labels, i.e:4829*4830* capacity operations bandwidth4831* alloc free read write read write ...4832*4833* If force_column_width is set, use it for the column width. If not set, use4834* the default column width.4835*/4836static void4837print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,4838const name_and_columns_t labels[][IOSTAT_MAX_LABELS])4839{4840int i, idx, s;4841int text_start, rw_column_width, spaces_to_end;4842uint64_t flags = cb->cb_flags;4843uint64_t f;4844unsigned int column_width = force_column_width;48454846/* For each bit set in flags */4847for (f = flags; f; f &= ~(1ULL << idx)) {4848idx = lowbit64(f) - 1;4849if (!force_column_width)4850column_width = default_column_width(cb, idx);4851/* Print our top labels centered over "read write" label. */4852for (i = 0; i < label_array_len(labels[idx]); i++) {4853const char *name = labels[idx][i].name;4854/*4855* We treat labels[][].columns == 0 as shorthand4856* for one column. It makes writing out the label4857* tables more concise.4858*/4859unsigned int columns = MAX(1, labels[idx][i].columns);4860unsigned int slen = strlen(name);48614862rw_column_width = (column_width * columns) +4863(2 * (columns - 1));48644865text_start = (int)((rw_column_width) / columns -4866slen / columns);4867if (text_start < 0)4868text_start = 0;48694870printf(" "); /* Two spaces between columns */48714872/* Space from beginning of column to label */4873for (s = 0; s < text_start; s++)4874printf(" ");48754876printf("%s", name);48774878/* Print space after label to end of column */4879spaces_to_end = rw_column_width - text_start - slen;4880if (spaces_to_end < 0)4881spaces_to_end = 0;48824883for (s = 0; s < spaces_to_end; s++)4884printf(" ");4885}4886}4887}488848894890/*4891* print_cmd_columns - Print custom column titles from -c4892*4893* If the user specified the "zpool status|iostat -c" then print their custom4894* column titles in the header. For example, print_cmd_columns() would print4895* the " col1 col2" part of this:4896*4897* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'4898* ...4899* capacity operations bandwidth4900* pool alloc free read write read write col1 col24901* ---------- ----- ----- ----- ----- ----- ----- ---- ----4902* mypool 269K 1008M 0 0 107 9464903* mirror 269K 1008M 0 0 107 9464904* sdb - - 0 0 102 473 val1 val24905* sdc - - 0 0 5 473 val1 val24906* ---------- ----- ----- ----- ----- ----- ----- ---- ----4907*/4908static void4909print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)4910{4911int i, j;4912vdev_cmd_data_t *data = &vcdl->data[0];49134914if (vcdl->count == 0 || data == NULL)4915return;49164917/*4918* Each vdev cmd should have the same column names unless the user did4919* something weird with their cmd. Just take the column names from the4920* first vdev and assume it works for all of them.4921*/4922for (i = 0; i < vcdl->uniq_cols_cnt; i++) {4923printf(" ");4924if (use_dashes) {4925for (j = 0; j < vcdl->uniq_cols_width[i]; j++)4926printf("-");4927} else {4928(void) printf_color(ANSI_BOLD, "%*s",4929vcdl->uniq_cols_width[i],4930vcdl->uniq_cols[i]);4931}4932}4933}493449354936/*4937* Utility function to print out a line of dashes like:4938*4939* -------------------------------- ----- ----- ----- ----- -----4940*4941* ...or a dashed named-row line like:4942*4943* logs - - - - -4944*4945* @cb: iostat data4946*4947* @force_column_width If non-zero, use the value as the column width.4948* Otherwise use the default column widths.4949*4950* @name: Print a dashed named-row line starting4951* with @name. Otherwise, print a regular4952* dashed line.4953*/4954static void4955print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,4956const char *name)4957{4958int i;4959unsigned int namewidth;4960uint64_t flags = cb->cb_flags;4961uint64_t f;4962int idx;4963const name_and_columns_t *labels;4964const char *title;496549664967if (cb->cb_flags & IOS_ANYHISTO_M) {4968title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];4969} else if (cb->cb_vdevs.cb_names_count) {4970title = "vdev";4971} else {4972title = "pool";4973}49744975namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),4976name ? strlen(name) : 0);497749784979if (name) {4980printf("%-*s", namewidth, name);4981} else {4982for (i = 0; i < namewidth; i++)4983(void) printf("-");4984}49854986/* For each bit in flags */4987for (f = flags; f; f &= ~(1ULL << idx)) {4988unsigned int column_width;4989idx = lowbit64(f) - 1;4990if (force_column_width)4991column_width = force_column_width;4992else4993column_width = default_column_width(cb, idx);49944995labels = iostat_bottom_labels[idx];4996for (i = 0; i < label_array_len(labels); i++) {4997if (name)4998printf(" %*s-", column_width - 1, " ");4999else5000printf(" %.*s", column_width,5001"--------------------");5002}5003}5004}500550065007static void5008print_iostat_separator_impl(iostat_cbdata_t *cb,5009unsigned int force_column_width)5010{5011print_iostat_dashes(cb, force_column_width, NULL);5012}50135014static void5015print_iostat_separator(iostat_cbdata_t *cb)5016{5017print_iostat_separator_impl(cb, 0);5018}50195020static void5021print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,5022const char *histo_vdev_name)5023{5024unsigned int namewidth;5025const char *title;50265027color_start(ANSI_BOLD);50285029if (cb->cb_flags & IOS_ANYHISTO_M) {5030title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];5031} else if (cb->cb_vdevs.cb_names_count) {5032title = "vdev";5033} else {5034title = "pool";5035}50365037namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),5038histo_vdev_name ? strlen(histo_vdev_name) : 0);50395040if (histo_vdev_name)5041printf("%-*s", namewidth, histo_vdev_name);5042else5043printf("%*s", namewidth, "");504450455046print_iostat_labels(cb, force_column_width, iostat_top_labels);5047printf("\n");50485049printf("%-*s", namewidth, title);50505051print_iostat_labels(cb, force_column_width, iostat_bottom_labels);5052if (cb->vcdl != NULL)5053print_cmd_columns(cb->vcdl, 0);50545055printf("\n");50565057print_iostat_separator_impl(cb, force_column_width);50585059if (cb->vcdl != NULL)5060print_cmd_columns(cb->vcdl, 1);50615062color_end();50635064printf("\n");5065}50665067static void5068print_iostat_header(iostat_cbdata_t *cb)5069{5070print_iostat_header_impl(cb, 0, NULL);5071}50725073/*5074* Prints a size string (i.e. 120M) with the suffix ("M") colored5075* by order of magnitude. Uses column_size to add padding.5076*/5077static void5078print_stat_color(const char *statbuf, unsigned int column_size)5079{5080(void) fputs(" ", stdout);5081size_t len = strlen(statbuf);5082while (len < column_size) {5083(void) fputc(' ', stdout);5084column_size--;5085}5086if (*statbuf == '0') {5087color_start(ANSI_GRAY);5088(void) fputc('0', stdout);5089} else {5090for (; *statbuf; statbuf++) {5091if (*statbuf == 'K') color_start(ANSI_GREEN);5092else if (*statbuf == 'M') color_start(ANSI_YELLOW);5093else if (*statbuf == 'G') color_start(ANSI_RED);5094else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);5095else if (*statbuf == 'P') color_start(ANSI_MAGENTA);5096else if (*statbuf == 'E') color_start(ANSI_CYAN);5097(void) fputc(*statbuf, stdout);5098if (--column_size <= 0)5099break;5100}5101}5102color_end();5103}51045105/*5106* Display a single statistic.5107*/5108static void5109print_one_stat(uint64_t value, enum zfs_nicenum_format format,5110unsigned int column_size, boolean_t scripted)5111{5112char buf[64];51135114zfs_nicenum_format(value, buf, sizeof (buf), format);51155116if (scripted)5117printf("\t%s", buf);5118else5119print_stat_color(buf, column_size);5120}51215122/*5123* Calculate the default vdev stats5124*5125* Subtract oldvs from newvs, apply a scaling factor, and save the resulting5126* stats into calcvs.5127*/5128static void5129calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,5130vdev_stat_t *calcvs)5131{5132int i;51335134memcpy(calcvs, newvs, sizeof (*calcvs));5135for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)5136calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);51375138for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)5139calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);5140}51415142/*5143* Internal representation of the extended iostats data.5144*5145* The extended iostat stats are exported in nvlists as either uint64_t arrays5146* or single uint64_t's. We make both look like arrays to make them easier5147* to process. In order to make single uint64_t's look like arrays, we set5148* __data to the stat data, and then set *data = &__data with count = 1. Then,5149* we can just use *data and count.5150*/5151struct stat_array {5152uint64_t *data;5153uint_t count; /* Number of entries in data[] */5154uint64_t __data; /* Only used when data is a single uint64_t */5155};51565157static uint64_t5158stat_histo_max(struct stat_array *nva, unsigned int len)5159{5160uint64_t max = 0;5161int i;5162for (i = 0; i < len; i++)5163max = MAX(max, array64_max(nva[i].data, nva[i].count));51645165return (max);5166}51675168/*5169* Helper function to lookup a uint64_t array or uint64_t value and store its5170* data as a stat_array. If the nvpair is a single uint64_t value, then we make5171* it look like a one element array to make it easier to process.5172*/5173static int5174nvpair64_to_stat_array(nvlist_t *nvl, const char *name,5175struct stat_array *nva)5176{5177nvpair_t *tmp;5178int ret;51795180verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);5181switch (nvpair_type(tmp)) {5182case DATA_TYPE_UINT64_ARRAY:5183ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);5184break;5185case DATA_TYPE_UINT64:5186ret = nvpair_value_uint64(tmp, &nva->__data);5187nva->data = &nva->__data;5188nva->count = 1;5189break;5190default:5191/* Not a uint64_t */5192ret = EINVAL;5193break;5194}51955196return (ret);5197}51985199/*5200* Given a list of nvlist names, look up the extended stats in newnv and oldnv,5201* subtract them, and return the results in a newly allocated stat_array.5202* You must free the returned array after you are done with it with5203* free_calc_stats().5204*5205* Additionally, you can set "oldnv" to NULL if you simply want the newnv5206* values.5207*/5208static struct stat_array *5209calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,5210nvlist_t *newnv)5211{5212nvlist_t *oldnvx = NULL, *newnvx;5213struct stat_array *oldnva, *newnva, *calcnva;5214int i, j;5215unsigned int alloc_size = (sizeof (struct stat_array)) * len;52165217/* Extract our extended stats nvlist from the main list */5218verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,5219&newnvx) == 0);5220if (oldnv) {5221verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,5222&oldnvx) == 0);5223}52245225newnva = safe_malloc(alloc_size);5226oldnva = safe_malloc(alloc_size);5227calcnva = safe_malloc(alloc_size);52285229for (j = 0; j < len; j++) {5230verify(nvpair64_to_stat_array(newnvx, names[j],5231&newnva[j]) == 0);5232calcnva[j].count = newnva[j].count;5233alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);5234calcnva[j].data = safe_malloc(alloc_size);5235memcpy(calcnva[j].data, newnva[j].data, alloc_size);52365237if (oldnvx) {5238verify(nvpair64_to_stat_array(oldnvx, names[j],5239&oldnva[j]) == 0);5240for (i = 0; i < oldnva[j].count; i++)5241calcnva[j].data[i] -= oldnva[j].data[i];5242}5243}5244free(newnva);5245free(oldnva);5246return (calcnva);5247}52485249static void5250free_calc_stats(struct stat_array *nva, unsigned int len)5251{5252int i;5253for (i = 0; i < len; i++)5254free(nva[i].data);52555256free(nva);5257}52585259static void5260print_iostat_histo(struct stat_array *nva, unsigned int len,5261iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,5262double scale)5263{5264int i, j;5265char buf[6];5266uint64_t val;5267enum zfs_nicenum_format format;5268unsigned int buckets;5269unsigned int start_bucket;52705271if (cb->cb_literal)5272format = ZFS_NICENUM_RAW;5273else5274format = ZFS_NICENUM_1024;52755276/* All these histos are the same size, so just use nva[0].count */5277buckets = nva[0].count;52785279if (cb->cb_flags & IOS_RQ_HISTO_M) {5280/* Start at 512 - req size should never be lower than this */5281start_bucket = 9;5282} else {5283start_bucket = 0;5284}52855286for (j = start_bucket; j < buckets; j++) {5287/* Print histogram bucket label */5288if (cb->cb_flags & IOS_L_HISTO_M) {5289/* Ending range of this bucket */5290val = (1UL << (j + 1)) - 1;5291zfs_nicetime(val, buf, sizeof (buf));5292} else {5293/* Request size (starting range of bucket) */5294val = (1UL << j);5295zfs_nicenum(val, buf, sizeof (buf));5296}52975298if (cb->cb_scripted)5299printf("%llu", (u_longlong_t)val);5300else5301printf("%-*s", namewidth, buf);53025303/* Print the values on the line */5304for (i = 0; i < len; i++) {5305print_one_stat(nva[i].data[j] * scale, format,5306column_width, cb->cb_scripted);5307}5308printf("\n");5309}5310}53115312static void5313print_solid_separator(unsigned int length)5314{5315while (length--)5316printf("-");5317printf("\n");5318}53195320static void5321print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,5322nvlist_t *newnv, double scale, const char *name)5323{5324unsigned int column_width;5325unsigned int namewidth;5326unsigned int entire_width;5327enum iostat_type type;5328struct stat_array *nva;5329const char **names;5330unsigned int names_len;53315332/* What type of histo are we? */5333type = IOS_HISTO_IDX(cb->cb_flags);53345335/* Get NULL-terminated array of nvlist names for our histo */5336names = vsx_type_to_nvlist[type];5337names_len = str_array_len(names); /* num of names */53385339nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);53405341if (cb->cb_literal) {5342column_width = MAX(5,5343(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);5344} else {5345column_width = 5;5346}53475348namewidth = MAX(cb->cb_namewidth,5349strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));53505351/*5352* Calculate the entire line width of what we're printing. The5353* +2 is for the two spaces between columns:5354*/5355/* read write */5356/* ----- ----- */5357/* |___| <---------- column_width */5358/* */5359/* |__________| <--- entire_width */5360/* */5361entire_width = namewidth + (column_width + 2) *5362label_array_len(iostat_bottom_labels[type]);53635364if (cb->cb_scripted)5365printf("%s\n", name);5366else5367print_iostat_header_impl(cb, column_width, name);53685369print_iostat_histo(nva, names_len, cb, column_width,5370namewidth, scale);53715372free_calc_stats(nva, names_len);5373if (!cb->cb_scripted)5374print_solid_separator(entire_width);5375}53765377/*5378* Calculate the average latency of a power-of-two latency histogram5379*/5380static uint64_t5381single_histo_average(uint64_t *histo, unsigned int buckets)5382{5383int i;5384uint64_t count = 0, total = 0;53855386for (i = 0; i < buckets; i++) {5387/*5388* Our buckets are power-of-two latency ranges. Use the5389* midpoint latency of each bucket to calculate the average.5390* For example:5391*5392* Bucket Midpoint5393* 8ns-15ns: 12ns5394* 16ns-31ns: 24ns5395* ...5396*/5397if (histo[i] != 0) {5398total += histo[i] * (((1UL << i) + ((1UL << i)/2)));5399count += histo[i];5400}5401}54025403/* Prevent divide by zero */5404return (count == 0 ? 0 : total / count);5405}54065407static void5408print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)5409{5410const char *names[] = {5411ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,5412ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,5413ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,5414ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,5415ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,5416ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,5417ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,5418ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,5419ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,5420ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,5421ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,5422ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,5423ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,5424ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,5425};54265427struct stat_array *nva;54285429unsigned int column_width = default_column_width(cb, IOS_QUEUES);5430enum zfs_nicenum_format format;54315432nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);54335434if (cb->cb_literal)5435format = ZFS_NICENUM_RAW;5436else5437format = ZFS_NICENUM_1024;54385439for (int i = 0; i < ARRAY_SIZE(names); i++) {5440uint64_t val = nva[i].data[0];5441print_one_stat(val, format, column_width, cb->cb_scripted);5442}54435444free_calc_stats(nva, ARRAY_SIZE(names));5445}54465447static void5448print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,5449nvlist_t *newnv)5450{5451int i;5452uint64_t val;5453const char *names[] = {5454ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,5455ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,5456ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,5457ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,5458ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,5459ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,5460ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,5461ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,5462ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,5463ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,5464ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,5465};5466struct stat_array *nva;54675468unsigned int column_width = default_column_width(cb, IOS_LATENCY);5469enum zfs_nicenum_format format;54705471nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);54725473if (cb->cb_literal)5474format = ZFS_NICENUM_RAWTIME;5475else5476format = ZFS_NICENUM_TIME;54775478/* Print our avg latencies on the line */5479for (i = 0; i < ARRAY_SIZE(names); i++) {5480/* Compute average latency for a latency histo */5481val = single_histo_average(nva[i].data, nva[i].count);5482print_one_stat(val, format, column_width, cb->cb_scripted);5483}5484free_calc_stats(nva, ARRAY_SIZE(names));5485}54865487/*5488* Print default statistics (capacity/operations/bandwidth)5489*/5490static void5491print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)5492{5493unsigned int column_width = default_column_width(cb, IOS_DEFAULT);5494enum zfs_nicenum_format format;5495char na; /* char to print for "not applicable" values */54965497if (cb->cb_literal) {5498format = ZFS_NICENUM_RAW;5499na = '0';5500} else {5501format = ZFS_NICENUM_1024;5502na = '-';5503}55045505/* only toplevel vdevs have capacity stats */5506if (vs->vs_space == 0) {5507if (cb->cb_scripted)5508printf("\t%c\t%c", na, na);5509else5510printf(" %*c %*c", column_width, na, column_width,5511na);5512} else {5513print_one_stat(vs->vs_alloc, format, column_width,5514cb->cb_scripted);5515print_one_stat(vs->vs_space - vs->vs_alloc, format,5516column_width, cb->cb_scripted);5517}55185519print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),5520format, column_width, cb->cb_scripted);5521print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),5522format, column_width, cb->cb_scripted);5523print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),5524format, column_width, cb->cb_scripted);5525print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),5526format, column_width, cb->cb_scripted);5527}55285529static const char *const class_name[] = {5530VDEV_ALLOC_BIAS_DEDUP,5531VDEV_ALLOC_BIAS_SPECIAL,5532VDEV_ALLOC_CLASS_LOGS5533};55345535/*5536* Print out all the statistics for the given vdev. This can either be the5537* toplevel configuration, or called recursively. If 'name' is NULL, then this5538* is a verbose output, and we don't want to display the toplevel pool stats.5539*5540* Returns the number of stat lines printed.5541*/5542static unsigned int5543print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,5544nvlist_t *newnv, iostat_cbdata_t *cb, int depth)5545{5546nvlist_t **oldchild, **newchild;5547uint_t c, children, oldchildren;5548vdev_stat_t *oldvs, *newvs, *calcvs;5549vdev_stat_t zerovs = { 0 };5550char *vname;5551int i;5552int ret = 0;5553uint64_t tdelta;5554double scale;55555556if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)5557return (ret);55585559calcvs = safe_malloc(sizeof (*calcvs));55605561if (oldnv != NULL) {5562verify(nvlist_lookup_uint64_array(oldnv,5563ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);5564} else {5565oldvs = &zerovs;5566}55675568/* Do we only want to see a specific vdev? */5569for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {5570/* Yes we do. Is this the vdev? */5571if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {5572/*5573* This is our vdev. Since it is the only vdev we5574* will be displaying, make depth = 0 so that it5575* doesn't get indented.5576*/5577depth = 0;5578break;5579}5580}55815582if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {5583/* Couldn't match the name */5584goto children;5585}558655875588verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,5589(uint64_t **)&newvs, &c) == 0);55905591/*5592* Print the vdev name unless it's is a histogram. Histograms5593* display the vdev name in the header itself.5594*/5595if (!(cb->cb_flags & IOS_ANYHISTO_M)) {5596if (cb->cb_scripted) {5597printf("%s", name);5598} else {5599if (strlen(name) + depth > cb->cb_namewidth)5600(void) printf("%*s%s", depth, "", name);5601else5602(void) printf("%*s%s%*s", depth, "", name,5603(int)(cb->cb_namewidth - strlen(name) -5604depth), "");5605}5606}56075608/* Calculate our scaling factor */5609tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;5610if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {5611/*5612* If we specify printing histograms with no time interval, then5613* print the histogram numbers over the entire lifetime of the5614* vdev.5615*/5616scale = 1;5617} else {5618if (tdelta == 0)5619scale = 1.0;5620else5621scale = (double)NANOSEC / tdelta;5622}56235624if (cb->cb_flags & IOS_DEFAULT_M) {5625calc_default_iostats(oldvs, newvs, calcvs);5626print_iostat_default(calcvs, cb, scale);5627}5628if (cb->cb_flags & IOS_LATENCY_M)5629print_iostat_latency(cb, oldnv, newnv);5630if (cb->cb_flags & IOS_QUEUES_M)5631print_iostat_queues(cb, newnv);5632if (cb->cb_flags & IOS_ANYHISTO_M) {5633printf("\n");5634print_iostat_histos(cb, oldnv, newnv, scale, name);5635}56365637if (cb->vcdl != NULL) {5638const char *path;5639if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,5640&path) == 0) {5641printf(" ");5642zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);5643}5644}56455646if (!(cb->cb_flags & IOS_ANYHISTO_M))5647printf("\n");56485649ret++;56505651children:56525653free(calcvs);56545655if (!cb->cb_verbose)5656return (ret);56575658if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,5659&newchild, &children) != 0)5660return (ret);56615662if (oldnv) {5663if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,5664&oldchild, &oldchildren) != 0)5665return (ret);56665667children = MIN(oldchildren, children);5668}56695670/*5671* print normal top-level devices5672*/5673for (c = 0; c < children; c++) {5674uint64_t ishole = B_FALSE, islog = B_FALSE;56755676(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,5677&ishole);56785679(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,5680&islog);56815682if (ishole || islog)5683continue;56845685if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))5686continue;56875688vname = zpool_vdev_name(g_zfs, zhp, newchild[c],5689cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);5690ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,5691newchild[c], cb, depth + 2);5692free(vname);5693}56945695/*5696* print all other top-level devices5697*/5698for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {5699boolean_t printed = B_FALSE;57005701for (c = 0; c < children; c++) {5702uint64_t islog = B_FALSE;5703const char *bias = NULL;5704const char *type = NULL;57055706(void) nvlist_lookup_uint64(newchild[c],5707ZPOOL_CONFIG_IS_LOG, &islog);5708if (islog) {5709bias = VDEV_ALLOC_CLASS_LOGS;5710} else {5711(void) nvlist_lookup_string(newchild[c],5712ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);5713(void) nvlist_lookup_string(newchild[c],5714ZPOOL_CONFIG_TYPE, &type);5715}5716if (bias == NULL || strcmp(bias, class_name[n]) != 0)5717continue;5718if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)5719continue;57205721if (!printed) {5722if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&5723!cb->cb_scripted &&5724!cb->cb_vdevs.cb_names) {5725print_iostat_dashes(cb, 0,5726class_name[n]);5727}5728printf("\n");5729printed = B_TRUE;5730}57315732vname = zpool_vdev_name(g_zfs, zhp, newchild[c],5733cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);5734ret += print_vdev_stats(zhp, vname, oldnv ?5735oldchild[c] : NULL, newchild[c], cb, depth + 2);5736free(vname);5737}5738}57395740/*5741* Include level 2 ARC devices in iostat output5742*/5743if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,5744&newchild, &children) != 0)5745return (ret);57465747if (oldnv) {5748if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,5749&oldchild, &oldchildren) != 0)5750return (ret);57515752children = MIN(oldchildren, children);5753}57545755if (children > 0) {5756if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&5757!cb->cb_vdevs.cb_names) {5758print_iostat_dashes(cb, 0, "cache");5759}5760printf("\n");57615762for (c = 0; c < children; c++) {5763vname = zpool_vdev_name(g_zfs, zhp, newchild[c],5764cb->cb_vdevs.cb_name_flags);5765ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]5766: NULL, newchild[c], cb, depth + 2);5767free(vname);5768}5769}57705771return (ret);5772}57735774/*5775* Callback to print out the iostats for the given pool.5776*/5777static int5778print_iostat(zpool_handle_t *zhp, void *data)5779{5780iostat_cbdata_t *cb = data;5781nvlist_t *oldconfig, *newconfig;5782nvlist_t *oldnvroot, *newnvroot;5783int ret;57845785newconfig = zpool_get_config(zhp, &oldconfig);57865787if (cb->cb_iteration == 1)5788oldconfig = NULL;57895790verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,5791&newnvroot) == 0);57925793if (oldconfig == NULL)5794oldnvroot = NULL;5795else5796verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,5797&oldnvroot) == 0);57985799ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,5800cb, 0);5801if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&5802!cb->cb_scripted && cb->cb_verbose &&5803!cb->cb_vdevs.cb_names_count) {5804print_iostat_separator(cb);5805if (cb->vcdl != NULL) {5806print_cmd_columns(cb->vcdl, 1);5807}5808printf("\n");5809}58105811return (ret);5812}58135814static int5815get_columns(void)5816{5817struct winsize ws;5818int columns = 80;5819int error;58205821if (isatty(STDOUT_FILENO)) {5822error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);5823if (error == 0)5824columns = ws.ws_col;5825} else {5826columns = 999;5827}58285829return (columns);5830}58315832/*5833* Return the required length of the pool/vdev name column. The minimum5834* allowed width and output formatting flags must be provided.5835*/5836static int5837get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)5838{5839nvlist_t *config, *nvroot;5840int width = min_width;58415842if ((config = zpool_get_config(zhp, NULL)) != NULL) {5843verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,5844&nvroot) == 0);5845size_t poolname_len = strlen(zpool_get_name(zhp));5846if (verbose == B_FALSE) {5847width = MAX(poolname_len, min_width);5848} else {5849width = MAX(poolname_len,5850max_width(zhp, nvroot, 0, min_width, flags));5851}5852}58535854return (width);5855}58565857/*5858* Parse the input string, get the 'interval' and 'count' value if there is one.5859*/5860static void5861get_interval_count(int *argcp, char **argv, float *iv,5862unsigned long *cnt)5863{5864float interval = 0;5865unsigned long count = 0;5866int argc = *argcp;58675868/*5869* Determine if the last argument is an integer or a pool name5870*/5871if (argc > 0 && zfs_isnumber(argv[argc - 1])) {5872char *end;58735874errno = 0;5875interval = strtof(argv[argc - 1], &end);58765877if (*end == '\0' && errno == 0) {5878if (interval == 0) {5879(void) fprintf(stderr, gettext(5880"interval cannot be zero\n"));5881usage(B_FALSE);5882}5883/*5884* Ignore the last parameter5885*/5886argc--;5887} else {5888/*5889* If this is not a valid number, just plow on. The5890* user will get a more informative error message later5891* on.5892*/5893interval = 0;5894}5895}58965897/*5898* If the last argument is also an integer, then we have both a count5899* and an interval.5900*/5901if (argc > 0 && zfs_isnumber(argv[argc - 1])) {5902char *end;59035904errno = 0;5905count = interval;5906interval = strtof(argv[argc - 1], &end);59075908if (*end == '\0' && errno == 0) {5909if (interval == 0) {5910(void) fprintf(stderr, gettext(5911"interval cannot be zero\n"));5912usage(B_FALSE);5913}59145915/*5916* Ignore the last parameter5917*/5918argc--;5919} else {5920interval = 0;5921}5922}59235924*iv = interval;5925*cnt = count;5926*argcp = argc;5927}59285929static void5930get_timestamp_arg(char c)5931{5932if (c == 'u')5933timestamp_fmt = UDATE;5934else if (c == 'd')5935timestamp_fmt = DDATE;5936else5937usage(B_FALSE);5938}59395940/*5941* Return stat flags that are supported by all pools by both the module and5942* zpool iostat. "*data" should be initialized to all 0xFFs before running.5943* It will get ANDed down until only the flags that are supported on all pools5944* remain.5945*/5946static int5947get_stat_flags_cb(zpool_handle_t *zhp, void *data)5948{5949uint64_t *mask = data;5950nvlist_t *config, *nvroot, *nvx;5951uint64_t flags = 0;5952int i, j;59535954config = zpool_get_config(zhp, NULL);5955verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,5956&nvroot) == 0);59575958/* Default stats are always supported, but for completeness.. */5959if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))5960flags |= IOS_DEFAULT_M;59615962/* Get our extended stats nvlist from the main list */5963if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,5964&nvx) != 0) {5965/*5966* No extended stats; they're probably running an older5967* module. No big deal, we support that too.5968*/5969goto end;5970}59715972/* For each extended stat, make sure all its nvpairs are supported */5973for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {5974if (!vsx_type_to_nvlist[j][0])5975continue;59765977/* Start off by assuming the flag is supported, then check */5978flags |= (1ULL << j);5979for (i = 0; vsx_type_to_nvlist[j][i]; i++) {5980if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {5981/* flag isn't supported */5982flags = flags & ~(1ULL << j);5983break;5984}5985}5986}5987end:5988*mask = *mask & flags;5989return (0);5990}59915992/*5993* Return a bitmask of stats that are supported on all pools by both the module5994* and zpool iostat.5995*/5996static uint64_t5997get_stat_flags(zpool_list_t *list)5998{5999uint64_t mask = -1;60006001/*6002* get_stat_flags_cb() will lop off bits from "mask" until only the6003* flags that are supported on all pools remain.6004*/6005(void) pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);6006return (mask);6007}60086009/*6010* Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.6011*/6012static int6013is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)6014{6015uint64_t guid;6016vdev_cbdata_t *cb = cb_data;6017zpool_handle_t *zhp = zhp_data;60186019if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)6020return (0);60216022return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));6023}60246025/*6026* Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.6027*/6028static int6029is_vdev(zpool_handle_t *zhp, void *cb_data)6030{6031return (for_each_vdev(zhp, is_vdev_cb, cb_data));6032}60336034/*6035* Check if vdevs are in a pool6036*6037* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise6038* return 0. If pool_name is NULL, then search all pools.6039*/6040static int6041are_vdevs_in_pool(int argc, char **argv, char *pool_name,6042vdev_cbdata_t *cb)6043{6044char **tmp_name;6045int ret = 0;6046int i;6047int pool_count = 0;60486049if ((argc == 0) || !*argv)6050return (0);60516052if (pool_name)6053pool_count = 1;60546055/* Temporarily hijack cb_names for a second... */6056tmp_name = cb->cb_names;60576058/* Go though our list of prospective vdev names */6059for (i = 0; i < argc; i++) {6060cb->cb_names = argv + i;60616062/* Is this name a vdev in our pools? */6063ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,6064ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);6065if (!ret) {6066/* No match */6067break;6068}6069}60706071cb->cb_names = tmp_name;60726073return (ret);6074}60756076static int6077is_pool_cb(zpool_handle_t *zhp, void *data)6078{6079char *name = data;6080if (strcmp(name, zpool_get_name(zhp)) == 0)6081return (1);60826083return (0);6084}60856086/*6087* Do we have a pool named *name? If so, return 1, otherwise 0.6088*/6089static int6090is_pool(char *name)6091{6092return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,6093is_pool_cb, name));6094}60956096/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */6097static int6098are_all_pools(int argc, char **argv)6099{6100if ((argc == 0) || !*argv)6101return (0);61026103while (--argc >= 0)6104if (!is_pool(argv[argc]))6105return (0);61066107return (1);6108}61096110/*6111* Helper function to print out vdev/pool names we can't resolve. Used for an6112* error message.6113*/6114static void6115error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,6116vdev_cbdata_t *cb)6117{6118int i;6119char *name;6120char *str;6121for (i = 0; i < argc; i++) {6122name = argv[i];61236124if (is_pool(name))6125str = gettext("pool");6126else if (are_vdevs_in_pool(1, &name, pool_name, cb))6127str = gettext("vdev in this pool");6128else if (are_vdevs_in_pool(1, &name, NULL, cb))6129str = gettext("vdev in another pool");6130else6131str = gettext("unknown");61326133fprintf(stderr, "\t%s (%s)\n", name, str);6134}6135}61366137/*6138* Same as get_interval_count(), but with additional checks to not misinterpret6139* guids as interval/count values. Assumes VDEV_NAME_GUID is set in6140* cb.cb_vdevs.cb_name_flags.6141*/6142static void6143get_interval_count_filter_guids(int *argc, char **argv, float *interval,6144unsigned long *count, iostat_cbdata_t *cb)6145{6146int argc_for_interval = 0;61476148/* Is the last arg an interval value? Or a guid? */6149if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,6150&cb->cb_vdevs)) {6151/*6152* The last arg is not a guid, so it's probably an6153* interval value.6154*/6155argc_for_interval++;61566157if (*argc >= 2 &&6158!are_vdevs_in_pool(1, &argv[*argc - 2], NULL,6159&cb->cb_vdevs)) {6160/*6161* The 2nd to last arg is not a guid, so it's probably6162* an interval value.6163*/6164argc_for_interval++;6165}6166}61676168/* Point to our list of possible intervals */6169char **tmpargv = &argv[*argc - argc_for_interval];61706171*argc = *argc - argc_for_interval;6172get_interval_count(&argc_for_interval, tmpargv,6173interval, count);6174}61756176/*6177* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or6178* if we were unable to determine its size.6179*/6180static int6181terminal_height(void)6182{6183struct winsize win;61846185if (isatty(STDOUT_FILENO) == 0)6186return (-1);61876188if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)6189return (win.ws_row);61906191return (-1);6192}61936194/*6195* Run one of the zpool status/iostat -c scripts with the help (-h) option and6196* print the result.6197*6198* name: Short name of the script ('iostat').6199* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');6200*/6201static void6202print_zpool_script_help(char *name, char *path)6203{6204char *argv[] = {path, (char *)"-h", NULL};6205char **lines = NULL;6206int lines_cnt = 0;6207int rc;62086209rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,6210&lines_cnt);6211if (rc != 0 || lines == NULL || lines_cnt <= 0) {6212if (lines != NULL)6213libzfs_free_str_array(lines, lines_cnt);6214return;6215}62166217for (int i = 0; i < lines_cnt; i++)6218if (!is_blank_str(lines[i]))6219printf(" %-14s %s\n", name, lines[i]);62206221libzfs_free_str_array(lines, lines_cnt);6222}62236224/*6225* Go though the zpool status/iostat -c scripts in the user's path, run their6226* help option (-h), and print out the results.6227*/6228static void6229print_zpool_dir_scripts(char *dirpath)6230{6231DIR *dir;6232struct dirent *ent;6233char fullpath[MAXPATHLEN];6234struct stat dir_stat;62356236if ((dir = opendir(dirpath)) != NULL) {6237/* print all the files and directories within directory */6238while ((ent = readdir(dir)) != NULL) {6239if (snprintf(fullpath, sizeof (fullpath), "%s/%s",6240dirpath, ent->d_name) >= sizeof (fullpath)) {6241(void) fprintf(stderr,6242gettext("internal error: "6243"ZPOOL_SCRIPTS_PATH too large.\n"));6244exit(1);6245}62466247/* Print the scripts */6248if (stat(fullpath, &dir_stat) == 0)6249if (dir_stat.st_mode & S_IXUSR &&6250S_ISREG(dir_stat.st_mode))6251print_zpool_script_help(ent->d_name,6252fullpath);6253}6254(void) closedir(dir);6255}6256}62576258/*6259* Print out help text for all zpool status/iostat -c scripts.6260*/6261static void6262print_zpool_script_list(const char *subcommand)6263{6264char *dir, *sp, *tmp;62656266printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);62676268sp = zpool_get_cmd_search_path();6269if (sp == NULL)6270return;62716272for (dir = strtok_r(sp, ":", &tmp);6273dir != NULL;6274dir = strtok_r(NULL, ":", &tmp))6275print_zpool_dir_scripts(dir);62766277free(sp);6278}62796280/*6281* Set the minimum pool/vdev name column width. The width must be at least 10,6282* but may be as large as the column width - 42 so it still fits on one line.6283* NOTE: 42 is the width of the default capacity/operations/bandwidth output6284*/6285static int6286get_namewidth_iostat(zpool_handle_t *zhp, void *data)6287{6288iostat_cbdata_t *cb = data;6289int width, available_width;62906291/*6292* get_namewidth() returns the maximum width of any name in that column6293* for any pool/vdev/device line that will be output.6294*/6295width = get_namewidth(zhp, cb->cb_namewidth,6296cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);62976298/*6299* The width we are calculating is the width of the header and also the6300* padding width for names that are less than maximum width. The stats6301* take up 42 characters, so the width available for names is:6302*/6303available_width = get_columns() - 42;63046305/*6306* If the maximum width fits on a screen, then great! Make everything6307* line up by justifying all lines to the same width. If that max6308* width is larger than what's available, the name plus stats won't fit6309* on one line, and justifying to that width would cause every line to6310* wrap on the screen. We only want lines with long names to wrap.6311* Limit the padding to what won't wrap.6312*/6313if (width > available_width)6314width = available_width;63156316/*6317* And regardless of whatever the screen width is (get_columns can6318* return 0 if the width is not known or less than 42 for a narrow6319* terminal) have the width be a minimum of 10.6320*/6321if (width < 10)6322width = 10;63236324/* Save the calculated width */6325cb->cb_namewidth = width;63266327return (0);6328}63296330/*6331* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]6332* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]6333* [interval [count]]6334*6335* -c CMD For each vdev, run command CMD6336* -g Display guid for individual vdev name.6337* -L Follow links when resolving vdev path name.6338* -P Display full path for vdev name.6339* -v Display statistics for individual vdevs6340* -h Display help6341* -p Display values in parsable (exact) format.6342* -H Scripted mode. Don't display headers, and separate properties6343* by a single tab.6344* -l Display average latency6345* -q Display queue depths6346* -w Display latency histograms6347* -r Display request size histogram6348* -T Display a timestamp in date(1) or Unix format6349* -n Only print headers once6350*6351* This command can be tricky because we want to be able to deal with pool6352* creation/destruction as well as vdev configuration changes. The bulk of this6353* processing is handled by the pool_list_* routines in zpool_iter.c. We rely6354* on pool_list_refresh() to detect the addition and removal of pools.6355* Configuration changes are all handled within libzfs.6356*/6357int6358zpool_do_iostat(int argc, char **argv)6359{6360int c;6361int ret;6362float interval = 0;6363unsigned long count = 0;6364zpool_list_t *list;6365boolean_t verbose = B_FALSE;6366boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;6367boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;6368boolean_t omit_since_boot = B_FALSE;6369boolean_t guid = B_FALSE;6370boolean_t follow_links = B_FALSE;6371boolean_t full_name = B_FALSE;6372boolean_t headers_once = B_FALSE;6373iostat_cbdata_t cb = { 0 };6374char *cmd = NULL;63756376/* Used for printing error message */6377const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',6378[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};63796380uint64_t unsupported_flags;63816382/* check options */6383while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {6384switch (c) {6385case 'c':6386if (cmd != NULL) {6387fprintf(stderr,6388gettext("Can't set -c flag twice\n"));6389exit(1);6390}63916392if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&6393!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {6394fprintf(stderr, gettext(6395"Can't run -c, disabled by "6396"ZPOOL_SCRIPTS_ENABLED.\n"));6397exit(1);6398}63996400if ((getuid() <= 0 || geteuid() <= 0) &&6401!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {6402fprintf(stderr, gettext(6403"Can't run -c with root privileges "6404"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));6405exit(1);6406}6407cmd = optarg;6408verbose = B_TRUE;6409break;6410case 'g':6411guid = B_TRUE;6412break;6413case 'L':6414follow_links = B_TRUE;6415break;6416case 'P':6417full_name = B_TRUE;6418break;6419case 'T':6420get_timestamp_arg(*optarg);6421break;6422case 'v':6423verbose = B_TRUE;6424break;6425case 'p':6426parsable = B_TRUE;6427break;6428case 'l':6429latency = B_TRUE;6430break;6431case 'q':6432queues = B_TRUE;6433break;6434case 'H':6435scripted = B_TRUE;6436break;6437case 'w':6438l_histo = B_TRUE;6439break;6440case 'r':6441rq_histo = B_TRUE;6442break;6443case 'y':6444omit_since_boot = B_TRUE;6445break;6446case 'n':6447headers_once = B_TRUE;6448break;6449case 'h':6450usage(B_FALSE);6451break;6452case '?':6453if (optopt == 'c') {6454print_zpool_script_list("iostat");6455exit(0);6456} else {6457fprintf(stderr,6458gettext("invalid option '%c'\n"), optopt);6459}6460usage(B_FALSE);6461}6462}64636464argc -= optind;6465argv += optind;64666467cb.cb_literal = parsable;6468cb.cb_scripted = scripted;64696470if (guid)6471cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;6472if (follow_links)6473cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;6474if (full_name)6475cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;6476cb.cb_iteration = 0;6477cb.cb_namewidth = 0;6478cb.cb_verbose = verbose;64796480/* Get our interval and count values (if any) */6481if (guid) {6482get_interval_count_filter_guids(&argc, argv, &interval,6483&count, &cb);6484} else {6485get_interval_count(&argc, argv, &interval, &count);6486}64876488if (argc == 0) {6489/* No args, so just print the defaults. */6490} else if (are_all_pools(argc, argv)) {6491/* All the args are pool names */6492} else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {6493/* All the args are vdevs */6494cb.cb_vdevs.cb_names = argv;6495cb.cb_vdevs.cb_names_count = argc;6496argc = 0; /* No pools to process */6497} else if (are_all_pools(1, argv)) {6498/* The first arg is a pool name */6499if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],6500&cb.cb_vdevs)) {6501/* ...and the rest are vdev names */6502cb.cb_vdevs.cb_names = argv + 1;6503cb.cb_vdevs.cb_names_count = argc - 1;6504argc = 1; /* One pool to process */6505} else {6506fprintf(stderr, gettext("Expected either a list of "));6507fprintf(stderr, gettext("pools, or list of vdevs in"));6508fprintf(stderr, " \"%s\", ", argv[0]);6509fprintf(stderr, gettext("but got:\n"));6510error_list_unresolved_vdevs(argc - 1, argv + 1,6511argv[0], &cb.cb_vdevs);6512fprintf(stderr, "\n");6513usage(B_FALSE);6514}6515} else {6516/*6517* The args don't make sense. The first arg isn't a pool name,6518* nor are all the args vdevs.6519*/6520fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));6521fprintf(stderr, "\n");6522return (1);6523}65246525if (cb.cb_vdevs.cb_names_count != 0) {6526/*6527* If user specified vdevs, it implies verbose.6528*/6529cb.cb_verbose = B_TRUE;6530}65316532/*6533* Construct the list of all interesting pools.6534*/6535ret = 0;6536if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,6537&ret)) == NULL)6538return (1);65396540if (pool_list_count(list) == 0 && argc != 0) {6541pool_list_free(list);6542return (1);6543}65446545if (pool_list_count(list) == 0 && interval == 0) {6546pool_list_free(list);6547(void) fprintf(stderr, gettext("no pools available\n"));6548return (1);6549}65506551if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {6552pool_list_free(list);6553(void) fprintf(stderr,6554gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));6555usage(B_FALSE);6556}65576558if (l_histo && rq_histo) {6559pool_list_free(list);6560(void) fprintf(stderr,6561gettext("Only one of [-r|-w] can be passed at a time\n"));6562usage(B_FALSE);6563}65646565/*6566* Enter the main iostat loop.6567*/6568cb.cb_list = list;65696570if (l_histo) {6571/*6572* Histograms tables look out of place when you try to display6573* them with the other stats, so make a rule that you can only6574* print histograms by themselves.6575*/6576cb.cb_flags = IOS_L_HISTO_M;6577} else if (rq_histo) {6578cb.cb_flags = IOS_RQ_HISTO_M;6579} else {6580cb.cb_flags = IOS_DEFAULT_M;6581if (latency)6582cb.cb_flags |= IOS_LATENCY_M;6583if (queues)6584cb.cb_flags |= IOS_QUEUES_M;6585}65866587/*6588* See if the module supports all the stats we want to display.6589*/6590unsupported_flags = cb.cb_flags & ~get_stat_flags(list);6591if (unsupported_flags) {6592uint64_t f;6593int idx;6594fprintf(stderr,6595gettext("The loaded zfs module doesn't support:"));65966597/* for each bit set in unsupported_flags */6598for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {6599idx = lowbit64(f) - 1;6600fprintf(stderr, " -%c", flag_to_arg[idx]);6601}66026603fprintf(stderr, ". Try running a newer module.\n");6604pool_list_free(list);66056606return (1);6607}66086609int last_npools = 0;6610for (;;) {6611/*6612* Refresh all pools in list, adding or removing pools as6613* necessary.6614*/6615int npools = pool_list_refresh(list);6616if (npools == 0) {6617(void) fprintf(stderr, gettext("no pools available\n"));6618} else {6619/*6620* If the list of pools has changed since last time6621* around, reset the iteration count to force the6622* header to be redisplayed.6623*/6624if (last_npools != npools)6625cb.cb_iteration = 0;66266627/*6628* If this is the first iteration and -y was supplied6629* we skip any printing.6630*/6631boolean_t skip = (omit_since_boot &&6632cb.cb_iteration == 0);66336634/*6635* Iterate over all pools to determine the maximum width6636* for the pool / device name column across all pools.6637*/6638cb.cb_namewidth = 0;6639(void) pool_list_iter(list, B_FALSE,6640get_namewidth_iostat, &cb);66416642if (timestamp_fmt != NODATE)6643print_timestamp(timestamp_fmt);66446645if (cmd != NULL && cb.cb_verbose &&6646!(cb.cb_flags & IOS_ANYHISTO_M)) {6647cb.vcdl = all_pools_for_each_vdev_run(argc,6648argv, cmd, g_zfs, cb.cb_vdevs.cb_names,6649cb.cb_vdevs.cb_names_count,6650cb.cb_vdevs.cb_name_flags);6651} else {6652cb.vcdl = NULL;6653}665466556656/*6657* Check terminal size so we can print headers6658* even when terminal window has its height6659* changed.6660*/6661int winheight = terminal_height();6662/*6663* Are we connected to TTY? If not, headers_once6664* should be true, to avoid breaking scripts.6665*/6666if (winheight < 0)6667headers_once = B_TRUE;66686669/*6670* If it's the first time and we're not skipping it,6671* or either skip or verbose mode, print the header.6672*6673* The histogram code explicitly prints its header on6674* every vdev, so skip this for histograms.6675*/6676if (((++cb.cb_iteration == 1 && !skip) ||6677(skip != verbose) ||6678(!headers_once &&6679(cb.cb_iteration % winheight) == 0)) &&6680(!(cb.cb_flags & IOS_ANYHISTO_M)) &&6681!cb.cb_scripted)6682print_iostat_header(&cb);66836684if (skip) {6685(void) fflush(stdout);6686(void) fsleep(interval);6687last_npools = npools;6688continue;6689}66906691(void) pool_list_iter(list, B_FALSE, print_iostat, &cb);66926693/*6694* If there's more than one pool, and we're not in6695* verbose mode (which prints a separator for us),6696* then print a separator.6697*6698* In addition, if we're printing specific vdevs then6699* we also want an ending separator.6700*/6701if (((npools > 1 && !verbose &&6702!(cb.cb_flags & IOS_ANYHISTO_M)) ||6703(!(cb.cb_flags & IOS_ANYHISTO_M) &&6704cb.cb_vdevs.cb_names_count)) &&6705!cb.cb_scripted) {6706print_iostat_separator(&cb);6707if (cb.vcdl != NULL)6708print_cmd_columns(cb.vcdl, 1);6709printf("\n");6710}67116712if (cb.vcdl != NULL)6713free_vdev_cmd_data_list(cb.vcdl);67146715}67166717if (interval == 0)6718break;67196720if (count != 0 && --count == 0)6721break;67226723(void) fflush(stdout);6724(void) fsleep(interval);67256726last_npools = npools;6727}67286729pool_list_free(list);67306731return (ret);6732}67336734typedef struct list_cbdata {6735boolean_t cb_verbose;6736int cb_name_flags;6737int cb_namewidth;6738boolean_t cb_json;6739boolean_t cb_scripted;6740zprop_list_t *cb_proplist;6741boolean_t cb_literal;6742nvlist_t *cb_jsobj;6743boolean_t cb_json_as_int;6744boolean_t cb_json_pool_key_guid;6745} list_cbdata_t;674667476748/*6749* Given a list of columns to display, print an appropriate line. If6750* `vdev_name` is not NULL, we print `vdev_name` followed by a line of dashes.6751* If `vdev_name` is NULL, we print a line of the headers.6752*/6753static void6754print_line(list_cbdata_t *cb, const char *vdev_name)6755{6756zprop_list_t *pl = cb->cb_proplist;6757char headerbuf[ZPOOL_MAXPROPLEN];6758const char *header;6759boolean_t first = B_TRUE;6760boolean_t right_justify;6761size_t width = 0;67626763boolean_t print_header = (vdev_name == NULL);67646765for (; pl != NULL; pl = pl->pl_next) {6766width = pl->pl_width;6767if (first && cb->cb_verbose) {6768/*6769* Reset the width to accommodate the verbose listing6770* of devices.6771*/6772width = cb->cb_namewidth;6773}67746775if (!first)6776(void) fputs(" ", stdout);67776778if (print_header) {6779right_justify = B_FALSE;6780if (pl->pl_prop != ZPROP_USERPROP) {6781header = zpool_prop_column_name(pl->pl_prop);6782right_justify = zpool_prop_align_right(6783pl->pl_prop);6784} else {6785int i;67866787for (i = 0; pl->pl_user_prop[i] != '\0'; i++)6788headerbuf[i] = toupper(6789pl->pl_user_prop[i]);6790headerbuf[i] = '\0';6791header = headerbuf;6792}67936794}6795/*6796* If `print_header` is false, we want to print a line of6797* dashes.6798*/6799else {6800if (first) {6801header = vdev_name;6802right_justify = B_FALSE;6803} else {6804header = "-";6805right_justify = B_TRUE;6806}6807}68086809if (pl->pl_next == NULL && !right_justify)6810(void) fputs(header, stdout);6811else if (right_justify)6812(void) printf("%*s", (int)width, header);6813else6814(void) printf("%-*s", (int)width, header);68156816if (first)6817first = B_FALSE;6818}68196820(void) fputc('\n', stdout);6821}68226823/*6824* Given a pool and a list of properties, print out all the properties according6825* to the described layout. Used by zpool_do_list().6826*/6827static void6828collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)6829{6830zprop_list_t *pl = cb->cb_proplist;6831boolean_t first = B_TRUE;6832char property[ZPOOL_MAXPROPLEN];6833const char *propstr;6834boolean_t right_justify;6835size_t width;6836zprop_source_t sourcetype = ZPROP_SRC_NONE;6837nvlist_t *item, *d, *props;6838item = d = props = NULL;68396840if (cb->cb_json) {6841item = fnvlist_alloc();6842props = fnvlist_alloc();6843d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");6844if (d == NULL) {6845fprintf(stderr, "pools obj not found.\n");6846exit(1);6847}6848fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);6849}68506851for (; pl != NULL; pl = pl->pl_next) {68526853width = pl->pl_width;6854if (first && cb->cb_verbose) {6855/*6856* Reset the width to accommodate the verbose listing6857* of devices.6858*/6859width = cb->cb_namewidth;6860}68616862if (!cb->cb_json && !first) {6863if (cb->cb_scripted)6864(void) fputc('\t', stdout);6865else6866(void) fputs(" ", stdout);6867} else {6868first = B_FALSE;6869}68706871right_justify = B_FALSE;6872if (pl->pl_prop != ZPROP_USERPROP) {6873if (zpool_get_prop(zhp, pl->pl_prop, property,6874sizeof (property), &sourcetype,6875cb->cb_literal) != 0)6876propstr = "-";6877else6878propstr = property;68796880right_justify = zpool_prop_align_right(pl->pl_prop);6881} else if ((zpool_prop_feature(pl->pl_user_prop) ||6882zpool_prop_unsupported(pl->pl_user_prop)) &&6883zpool_prop_get_feature(zhp, pl->pl_user_prop, property,6884sizeof (property)) == 0) {6885propstr = property;6886sourcetype = ZPROP_SRC_LOCAL;6887} else if (zfs_prop_user(pl->pl_user_prop) &&6888zpool_get_userprop(zhp, pl->pl_user_prop, property,6889sizeof (property), &sourcetype) == 0) {6890propstr = property;6891} else {6892propstr = "-";6893}68946895if (cb->cb_json) {6896if (pl->pl_prop == ZPOOL_PROP_NAME)6897continue;6898const char *prop_name;6899if (pl->pl_prop != ZPROP_USERPROP)6900prop_name = zpool_prop_to_name(pl->pl_prop);6901else6902prop_name = pl->pl_user_prop;6903(void) zprop_nvlist_one_property(6904prop_name, propstr,6905sourcetype, NULL, NULL, props, cb->cb_json_as_int);6906} else {6907/*6908* If this is being called in scripted mode, or if this6909* is the last column and it is left-justified, don't6910* include a width format specifier.6911*/6912if (cb->cb_scripted || (pl->pl_next == NULL &&6913!right_justify))6914(void) fputs(propstr, stdout);6915else if (right_justify)6916(void) printf("%*s", (int)width, propstr);6917else6918(void) printf("%-*s", (int)width, propstr);6919}6920}69216922if (cb->cb_json) {6923fnvlist_add_nvlist(item, "properties", props);6924if (cb->cb_json_pool_key_guid) {6925char pool_guid[256];6926uint64_t guid = fnvlist_lookup_uint64(6927zpool_get_config(zhp, NULL),6928ZPOOL_CONFIG_POOL_GUID);6929(void) snprintf(pool_guid, 256, "%llu",6930(u_longlong_t)guid);6931fnvlist_add_nvlist(d, pool_guid, item);6932} else {6933fnvlist_add_nvlist(d, zpool_get_name(zhp),6934item);6935}6936fnvlist_free(props);6937fnvlist_free(item);6938} else6939(void) fputc('\n', stdout);6940}69416942static void6943collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,6944boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,6945boolean_t json, nvlist_t *nvl, boolean_t as_int)6946{6947char propval[64];6948boolean_t fixed;6949size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);69506951switch (prop) {6952case ZPOOL_PROP_SIZE:6953case ZPOOL_PROP_EXPANDSZ:6954case ZPOOL_PROP_CHECKPOINT:6955case ZPOOL_PROP_DEDUPRATIO:6956case ZPOOL_PROP_DEDUPCACHED:6957if (value == 0)6958(void) strlcpy(propval, "-", sizeof (propval));6959else6960zfs_nicenum_format(value, propval, sizeof (propval),6961format);6962break;6963case ZPOOL_PROP_FRAGMENTATION:6964if (value == ZFS_FRAG_INVALID) {6965(void) strlcpy(propval, "-", sizeof (propval));6966} else if (format == ZFS_NICENUM_RAW) {6967(void) snprintf(propval, sizeof (propval), "%llu",6968(unsigned long long)value);6969} else {6970(void) snprintf(propval, sizeof (propval), "%llu%%",6971(unsigned long long)value);6972}6973break;6974case ZPOOL_PROP_CAPACITY:6975/* capacity value is in parts-per-10,000 (aka permyriad) */6976if (format == ZFS_NICENUM_RAW)6977(void) snprintf(propval, sizeof (propval), "%llu",6978(unsigned long long)value / 100);6979else6980(void) snprintf(propval, sizeof (propval),6981value < 1000 ? "%1.2f%%" : value < 10000 ?6982"%2.1f%%" : "%3.0f%%", value / 100.0);6983break;6984case ZPOOL_PROP_HEALTH:6985width = 8;6986(void) strlcpy(propval, str, sizeof (propval));6987break;6988default:6989zfs_nicenum_format(value, propval, sizeof (propval), format);6990}69916992if (!valid)6993(void) strlcpy(propval, "-", sizeof (propval));69946995if (json) {6996(void) zprop_nvlist_one_property(zpool_prop_to_name(prop),6997propval, ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);6998} else {6999if (scripted)7000(void) printf("\t%s", propval);7001else7002(void) printf(" %*s", (int)width, propval);7003}7004}70057006/*7007* print static default line per vdev7008*/7009static void7010collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,7011list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)7012{7013nvlist_t **child;7014vdev_stat_t *vs;7015uint_t c, children = 0;7016char *vname;7017boolean_t scripted = cb->cb_scripted;7018uint64_t islog = B_FALSE;7019nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;7020props = ent = ch = obj = sp = l2c = NULL;70217022verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,7023(uint64_t **)&vs, &c) == 0);70247025if (name != NULL) {7026boolean_t toplevel = (vs->vs_space != 0);7027uint64_t cap;7028enum zfs_nicenum_format format;7029const char *state;70307031if (cb->cb_literal)7032format = ZFS_NICENUM_RAW;7033else7034format = ZFS_NICENUM_1024;70357036if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)7037return;70387039if (cb->cb_json) {7040props = fnvlist_alloc();7041ent = fnvlist_alloc();7042fill_vdev_info(ent, zhp, (char *)name, B_FALSE,7043cb->cb_json_as_int);7044} else {7045if (scripted)7046(void) printf("\t%s", name);7047else if (strlen(name) + depth > cb->cb_namewidth)7048(void) printf("%*s%s", depth, "", name);7049else7050(void) printf("%*s%s%*s", depth, "", name,7051(int)(cb->cb_namewidth - strlen(name) -7052depth), "");7053}70547055/*7056* Print the properties for the individual vdevs. Some7057* properties are only applicable to toplevel vdevs. The7058* 'toplevel' boolean value is passed to the print_one_column()7059* to indicate that the value is valid.7060*/7061for (zprop_list_t *pl = cb->cb_proplist; pl != NULL;7062pl = pl->pl_next) {7063switch (pl->pl_prop) {7064case ZPOOL_PROP_SIZE:7065if (VDEV_STAT_VALID(vs_pspace, c) &&7066vs->vs_pspace) {7067collect_vdev_prop(7068ZPOOL_PROP_SIZE, vs->vs_pspace,7069NULL, scripted, B_TRUE, format,7070cb->cb_json, props,7071cb->cb_json_as_int);7072} else {7073collect_vdev_prop(7074ZPOOL_PROP_SIZE, vs->vs_space, NULL,7075scripted, toplevel, format,7076cb->cb_json, props,7077cb->cb_json_as_int);7078}7079break;7080case ZPOOL_PROP_ALLOCATED:7081collect_vdev_prop(ZPOOL_PROP_ALLOCATED,7082vs->vs_alloc, NULL, scripted, toplevel,7083format, cb->cb_json, props,7084cb->cb_json_as_int);7085break;70867087case ZPOOL_PROP_FREE:7088collect_vdev_prop(ZPOOL_PROP_FREE,7089vs->vs_space - vs->vs_alloc, NULL, scripted,7090toplevel, format, cb->cb_json, props,7091cb->cb_json_as_int);7092break;70937094case ZPOOL_PROP_CHECKPOINT:7095collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,7096vs->vs_checkpoint_space, NULL, scripted,7097toplevel, format, cb->cb_json, props,7098cb->cb_json_as_int);7099break;71007101case ZPOOL_PROP_EXPANDSZ:7102collect_vdev_prop(ZPOOL_PROP_EXPANDSZ,7103vs->vs_esize, NULL, scripted, B_TRUE,7104format, cb->cb_json, props,7105cb->cb_json_as_int);7106break;71077108case ZPOOL_PROP_FRAGMENTATION:7109collect_vdev_prop(7110ZPOOL_PROP_FRAGMENTATION,7111vs->vs_fragmentation, NULL, scripted,7112(vs->vs_fragmentation != ZFS_FRAG_INVALID &&7113toplevel),7114format, cb->cb_json, props,7115cb->cb_json_as_int);7116break;71177118case ZPOOL_PROP_CAPACITY:7119cap = (vs->vs_space == 0) ?71200 : (vs->vs_alloc * 10000 / vs->vs_space);7121collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap,7122NULL, scripted, toplevel, format,7123cb->cb_json, props, cb->cb_json_as_int);7124break;71257126case ZPOOL_PROP_HEALTH:7127state = zpool_state_to_name(vs->vs_state,7128vs->vs_aux);7129if (isspare) {7130if (vs->vs_aux == VDEV_AUX_SPARED)7131state = "INUSE";7132else if (vs->vs_state ==7133VDEV_STATE_HEALTHY)7134state = "AVAIL";7135}7136collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state,7137scripted, B_TRUE, format, cb->cb_json,7138props, cb->cb_json_as_int);7139break;71407141case ZPOOL_PROP_NAME:7142break;71437144default:7145collect_vdev_prop(pl->pl_prop, 0,7146NULL, scripted, B_FALSE, format,7147cb->cb_json, props, cb->cb_json_as_int);71487149}715071517152}71537154if (cb->cb_json) {7155fnvlist_add_nvlist(ent, "properties", props);7156fnvlist_free(props);7157} else7158(void) fputc('\n', stdout);7159}71607161if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,7162&child, &children) != 0) {7163if (cb->cb_json) {7164fnvlist_add_nvlist(item, name, ent);7165fnvlist_free(ent);7166}7167return;7168}71697170if (cb->cb_json) {7171ch = fnvlist_alloc();7172}71737174/* list the normal vdevs first */7175for (c = 0; c < children; c++) {7176uint64_t ishole = B_FALSE;71777178if (nvlist_lookup_uint64(child[c],7179ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)7180continue;71817182if (nvlist_lookup_uint64(child[c],7183ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)7184continue;71857186if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))7187continue;71887189vname = zpool_vdev_name(g_zfs, zhp, child[c],7190cb->cb_name_flags | VDEV_NAME_TYPE_ID);71917192if (name == NULL || cb->cb_json != B_TRUE)7193collect_list_stats(zhp, vname, child[c], cb, depth + 2,7194B_FALSE, item);7195else if (cb->cb_json) {7196collect_list_stats(zhp, vname, child[c], cb, depth + 2,7197B_FALSE, ch);7198}7199free(vname);7200}72017202if (cb->cb_json) {7203if (!nvlist_empty(ch))7204fnvlist_add_nvlist(ent, "vdevs", ch);7205fnvlist_free(ch);7206}72077208/* list the classes: 'logs', 'dedup', and 'special' */7209for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {7210boolean_t printed = B_FALSE;7211if (cb->cb_json)7212obj = fnvlist_alloc();7213for (c = 0; c < children; c++) {7214const char *bias = NULL;7215const char *type = NULL;72167217if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,7218&islog) == 0 && islog) {7219bias = VDEV_ALLOC_CLASS_LOGS;7220} else {7221(void) nvlist_lookup_string(child[c],7222ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);7223(void) nvlist_lookup_string(child[c],7224ZPOOL_CONFIG_TYPE, &type);7225}7226if (bias == NULL || strcmp(bias, class_name[n]) != 0)7227continue;7228if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)7229continue;72307231if (!printed && !cb->cb_json) {7232print_line(cb, class_name[n]);7233printed = B_TRUE;7234}7235vname = zpool_vdev_name(g_zfs, zhp, child[c],7236cb->cb_name_flags | VDEV_NAME_TYPE_ID);7237collect_list_stats(zhp, vname, child[c], cb, depth + 2,7238B_FALSE, obj);7239free(vname);7240}7241if (cb->cb_json) {7242if (!nvlist_empty(obj))7243fnvlist_add_nvlist(item, class_name[n], obj);7244fnvlist_free(obj);7245}7246}72477248if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,7249&child, &children) == 0 && children > 0) {7250if (cb->cb_json) {7251l2c = fnvlist_alloc();7252} else {7253print_line(cb, "cache");7254}7255for (c = 0; c < children; c++) {7256vname = zpool_vdev_name(g_zfs, zhp, child[c],7257cb->cb_name_flags);7258collect_list_stats(zhp, vname, child[c], cb, depth + 2,7259B_FALSE, l2c);7260free(vname);7261}7262if (cb->cb_json) {7263if (!nvlist_empty(l2c))7264fnvlist_add_nvlist(item, "l2cache", l2c);7265fnvlist_free(l2c);7266}7267}72687269if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,7270&children) == 0 && children > 0) {7271if (cb->cb_json) {7272sp = fnvlist_alloc();7273} else {7274print_line(cb, "spare");7275}7276for (c = 0; c < children; c++) {7277vname = zpool_vdev_name(g_zfs, zhp, child[c],7278cb->cb_name_flags);7279collect_list_stats(zhp, vname, child[c], cb, depth + 2,7280B_TRUE, sp);7281free(vname);7282}7283if (cb->cb_json) {7284if (!nvlist_empty(sp))7285fnvlist_add_nvlist(item, "spares", sp);7286fnvlist_free(sp);7287}7288}72897290if (name != NULL && cb->cb_json) {7291fnvlist_add_nvlist(item, name, ent);7292fnvlist_free(ent);7293}7294}72957296/*7297* Generic callback function to list a pool.7298*/7299static int7300list_callback(zpool_handle_t *zhp, void *data)7301{7302nvlist_t *p, *d, *nvdevs;7303uint64_t guid;7304char pool_guid[256];7305const char *pool_name = zpool_get_name(zhp);7306list_cbdata_t *cbp = data;7307p = d = nvdevs = NULL;73087309collect_pool(zhp, cbp);73107311if (cbp->cb_verbose) {7312nvlist_t *config, *nvroot;7313config = zpool_get_config(zhp, NULL);7314verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,7315&nvroot) == 0);7316if (cbp->cb_json) {7317d = fnvlist_lookup_nvlist(cbp->cb_jsobj,7318"pools");7319if (cbp->cb_json_pool_key_guid) {7320guid = fnvlist_lookup_uint64(config,7321ZPOOL_CONFIG_POOL_GUID);7322(void) snprintf(pool_guid, 256, "%llu",7323(u_longlong_t)guid);7324p = fnvlist_lookup_nvlist(d, pool_guid);7325} else {7326p = fnvlist_lookup_nvlist(d, pool_name);7327}7328nvdevs = fnvlist_alloc();7329}7330collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);7331if (cbp->cb_json) {7332fnvlist_add_nvlist(p, "vdevs", nvdevs);7333if (cbp->cb_json_pool_key_guid)7334fnvlist_add_nvlist(d, pool_guid, p);7335else7336fnvlist_add_nvlist(d, pool_name, p);7337fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);7338fnvlist_free(nvdevs);7339}7340}73417342return (0);7343}73447345/*7346* Set the minimum pool/vdev name column width. The width must be at least 9,7347* but may be as large as needed.7348*/7349static int7350get_namewidth_list(zpool_handle_t *zhp, void *data)7351{7352list_cbdata_t *cb = data;7353int width;73547355width = get_namewidth(zhp, cb->cb_namewidth,7356cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);73577358if (width < 9)7359width = 9;73607361cb->cb_namewidth = width;73627363return (0);7364}73657366/*7367* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]7368*7369* -g Display guid for individual vdev name.7370* -H Scripted mode. Don't display headers, and separate properties7371* by a single tab.7372* -L Follow links when resolving vdev path name.7373* -o List of properties to display. Defaults to7374* "name,size,allocated,free,expandsize,fragmentation,capacity,"7375* "dedupratio,health,altroot"7376* -p Display values in parsable (exact) format.7377* -P Display full path for vdev name.7378* -T Display a timestamp in date(1) or Unix format7379* -j Display the output in JSON format7380* --json-int Display the numbers as integer instead of strings.7381* --json-pool-key-guid Set pool GUID as key for pool objects.7382*7383* List all pools in the system, whether or not they're healthy. Output space7384* statistics for each one, as well as health status summary.7385*/7386int7387zpool_do_list(int argc, char **argv)7388{7389int c;7390int ret = 0;7391list_cbdata_t cb = { 0 };7392static char default_props[] =7393"name,size,allocated,free,checkpoint,expandsize,fragmentation,"7394"capacity,dedupratio,health,altroot";7395char *props = default_props;7396float interval = 0;7397unsigned long count = 0;7398zpool_list_t *list;7399boolean_t first = B_TRUE;7400nvlist_t *data = NULL;7401current_prop_type = ZFS_TYPE_POOL;74027403struct option long_options[] = {7404{"json", no_argument, NULL, 'j'},7405{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},7406{"json-pool-key-guid", no_argument, NULL,7407ZPOOL_OPTION_POOL_KEY_GUID},7408{0, 0, 0, 0}7409};74107411/* check options */7412while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,7413NULL)) != -1) {7414switch (c) {7415case 'g':7416cb.cb_name_flags |= VDEV_NAME_GUID;7417break;7418case 'H':7419cb.cb_scripted = B_TRUE;7420break;7421case 'L':7422cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;7423break;7424case 'o':7425props = optarg;7426break;7427case 'P':7428cb.cb_name_flags |= VDEV_NAME_PATH;7429break;7430case 'p':7431cb.cb_literal = B_TRUE;7432break;7433case 'j':7434cb.cb_json = B_TRUE;7435break;7436case ZPOOL_OPTION_JSON_NUMS_AS_INT:7437cb.cb_json_as_int = B_TRUE;7438cb.cb_literal = B_TRUE;7439break;7440case ZPOOL_OPTION_POOL_KEY_GUID:7441cb.cb_json_pool_key_guid = B_TRUE;7442break;7443case 'T':7444get_timestamp_arg(*optarg);7445break;7446case 'v':7447cb.cb_verbose = B_TRUE;7448cb.cb_namewidth = 8; /* 8 until precalc is avail */7449break;7450case ':':7451(void) fprintf(stderr, gettext("missing argument for "7452"'%c' option\n"), optopt);7453usage(B_FALSE);7454break;7455case '?':7456(void) fprintf(stderr, gettext("invalid option '%c'\n"),7457optopt);7458usage(B_FALSE);7459}7460}74617462argc -= optind;7463argv += optind;74647465if (!cb.cb_json && cb.cb_json_as_int) {7466(void) fprintf(stderr, gettext("'--json-int' only works with"7467" '-j' option\n"));7468usage(B_FALSE);7469}74707471if (!cb.cb_json && cb.cb_json_pool_key_guid) {7472(void) fprintf(stderr, gettext("'json-pool-key-guid' only"7473" works with '-j' option\n"));7474usage(B_FALSE);7475}74767477get_interval_count(&argc, argv, &interval, &count);74787479if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)7480usage(B_FALSE);74817482for (;;) {7483if ((list = pool_list_get(argc, argv, &cb.cb_proplist,7484ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)7485return (1);74867487if (pool_list_count(list) == 0)7488break;74897490if (cb.cb_json) {7491cb.cb_jsobj = zpool_json_schema(0, 1);7492data = fnvlist_alloc();7493fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);7494fnvlist_free(data);7495}74967497cb.cb_namewidth = 0;7498(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);74997500if (timestamp_fmt != NODATE) {7501if (cb.cb_json) {7502if (cb.cb_json_as_int) {7503fnvlist_add_uint64(cb.cb_jsobj, "time",7504time(NULL));7505} else {7506char ts[128];7507get_timestamp(timestamp_fmt, ts, 128);7508fnvlist_add_string(cb.cb_jsobj, "time",7509ts);7510}7511} else7512print_timestamp(timestamp_fmt);7513}75147515if (!cb.cb_scripted && (first || cb.cb_verbose) &&7516!cb.cb_json) {7517print_line(&cb, NULL);7518first = B_FALSE;7519}7520ret = pool_list_iter(list, B_TRUE, list_callback, &cb);75217522if (ret == 0 && cb.cb_json)7523zcmd_print_json(cb.cb_jsobj);7524else if (ret != 0 && cb.cb_json)7525nvlist_free(cb.cb_jsobj);75267527if (interval == 0)7528break;75297530if (count != 0 && --count == 0)7531break;75327533pool_list_free(list);75347535(void) fflush(stdout);7536(void) fsleep(interval);7537}75387539if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&7540pool_list_count(list) == 0) {7541(void) printf(gettext("no pools available\n"));7542ret = 0;7543}75447545pool_list_free(list);7546zprop_free_list(cb.cb_proplist);7547return (ret);7548}75497550static int7551zpool_do_attach_or_replace(int argc, char **argv, int replacing)7552{7553boolean_t force = B_FALSE;7554boolean_t rebuild = B_FALSE;7555boolean_t wait = B_FALSE;7556int c;7557nvlist_t *nvroot;7558char *poolname, *old_disk, *new_disk;7559zpool_handle_t *zhp;7560nvlist_t *props = NULL;7561char *propval;7562int ret;75637564/* check options */7565while ((c = getopt(argc, argv, "fo:sw")) != -1) {7566switch (c) {7567case 'f':7568force = B_TRUE;7569break;7570case 'o':7571if ((propval = strchr(optarg, '=')) == NULL) {7572(void) fprintf(stderr, gettext("missing "7573"'=' for -o option\n"));7574usage(B_FALSE);7575}7576*propval = '\0';7577propval++;75787579if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||7580(add_prop_list(optarg, propval, &props, B_TRUE)))7581usage(B_FALSE);7582break;7583case 's':7584rebuild = B_TRUE;7585break;7586case 'w':7587wait = B_TRUE;7588break;7589case '?':7590(void) fprintf(stderr, gettext("invalid option '%c'\n"),7591optopt);7592usage(B_FALSE);7593}7594}75957596argc -= optind;7597argv += optind;75987599/* get pool name and check number of arguments */7600if (argc < 1) {7601(void) fprintf(stderr, gettext("missing pool name argument\n"));7602usage(B_FALSE);7603}76047605poolname = argv[0];76067607if (argc < 2) {7608(void) fprintf(stderr,7609gettext("missing <device> specification\n"));7610usage(B_FALSE);7611}76127613old_disk = argv[1];76147615if (argc < 3) {7616if (!replacing) {7617(void) fprintf(stderr,7618gettext("missing <new_device> specification\n"));7619usage(B_FALSE);7620}7621new_disk = old_disk;7622argc -= 1;7623argv += 1;7624} else {7625new_disk = argv[2];7626argc -= 2;7627argv += 2;7628}76297630if (argc > 1) {7631(void) fprintf(stderr, gettext("too many arguments\n"));7632usage(B_FALSE);7633}76347635if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {7636nvlist_free(props);7637return (1);7638}76397640if (zpool_get_config(zhp, NULL) == NULL) {7641(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),7642poolname);7643zpool_close(zhp);7644nvlist_free(props);7645return (1);7646}76477648/* unless manually specified use "ashift" pool property (if set) */7649if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {7650int intval;7651zprop_source_t src;7652char strval[ZPOOL_MAXPROPLEN];76537654intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);7655if (src != ZPROP_SRC_DEFAULT) {7656(void) sprintf(strval, "%" PRId32, intval);7657verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,7658&props, B_TRUE) == 0);7659}7660}76617662nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,7663argc, argv);7664if (nvroot == NULL) {7665zpool_close(zhp);7666nvlist_free(props);7667return (1);7668}76697670ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,7671rebuild);76727673if (ret == 0 && wait) {7674zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;7675char raidz_prefix[] = "raidz";7676if (replacing) {7677activity = ZPOOL_WAIT_REPLACE;7678} else if (strncmp(old_disk,7679raidz_prefix, strlen(raidz_prefix)) == 0) {7680activity = ZPOOL_WAIT_RAIDZ_EXPAND;7681}7682ret = zpool_wait(zhp, activity);7683}76847685nvlist_free(props);7686nvlist_free(nvroot);7687zpool_close(zhp);76887689return (ret);7690}76917692/*7693* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>7694*7695* -f Force attach, even if <new_device> appears to be in use.7696* -s Use sequential instead of healing reconstruction for resilver.7697* -o Set property=value.7698* -w Wait for replacing to complete before returning7699*7700* Replace <device> with <new_device>.7701*/7702int7703zpool_do_replace(int argc, char **argv)7704{7705return (zpool_do_attach_or_replace(argc, argv, B_TRUE));7706}77077708/*7709* zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device>7710*7711* -f Force attach, even if <new_device> appears to be in use.7712* -s Use sequential instead of healing reconstruction for resilver.7713* -o Set property=value.7714* -w Wait for resilvering (mirror) or expansion (raidz) to complete7715* before returning.7716*7717* Attach <new_device> to a <vdev>, where the vdev can be of type7718* device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will7719* be transformed into a mirror of <vdev> and <new_device>. When a mirror7720* is involved, <new_device> will begin life with a DTL of [0, now], and will7721* immediately begin to resilver itself. For the raidz case, a expansion will7722* commence and reflow the raidz data across all the disks including the7723* <new_device>.7724*/7725int7726zpool_do_attach(int argc, char **argv)7727{7728return (zpool_do_attach_or_replace(argc, argv, B_FALSE));7729}77307731/*7732* zpool detach [-f] <pool> <device>7733*7734* -f Force detach of <device>, even if DTLs argue against it7735* (not supported yet)7736*7737* Detach a device from a mirror. The operation will be refused if <device>7738* is the last device in the mirror, or if the DTLs indicate that this device7739* has the only valid copy of some data.7740*/7741int7742zpool_do_detach(int argc, char **argv)7743{7744int c;7745char *poolname, *path;7746zpool_handle_t *zhp;7747int ret;77487749/* check options */7750while ((c = getopt(argc, argv, "")) != -1) {7751switch (c) {7752case '?':7753(void) fprintf(stderr, gettext("invalid option '%c'\n"),7754optopt);7755usage(B_FALSE);7756}7757}77587759argc -= optind;7760argv += optind;77617762/* get pool name and check number of arguments */7763if (argc < 1) {7764(void) fprintf(stderr, gettext("missing pool name argument\n"));7765usage(B_FALSE);7766}77677768if (argc < 2) {7769(void) fprintf(stderr,7770gettext("missing <device> specification\n"));7771usage(B_FALSE);7772}77737774poolname = argv[0];7775path = argv[1];77767777if ((zhp = zpool_open(g_zfs, poolname)) == NULL)7778return (1);77797780ret = zpool_vdev_detach(zhp, path);77817782zpool_close(zhp);77837784return (ret);7785}77867787/*7788* zpool split [-gLnP] [-o prop=val] ...7789* [-o mntopt] ...7790* [-R altroot] <pool> <newpool> [<device> ...]7791*7792* -g Display guid for individual vdev name.7793* -L Follow links when resolving vdev path name.7794* -n Do not split the pool, but display the resulting layout if7795* it were to be split.7796* -o Set property=value, or set mount options.7797* -P Display full path for vdev name.7798* -R Mount the split-off pool under an alternate root.7799* -l Load encryption keys while importing.7800*7801* Splits the named pool and gives it the new pool name. Devices to be split7802* off may be listed, provided that no more than one device is specified7803* per top-level vdev mirror. The newly split pool is left in an exported7804* state unless -R is specified.7805*7806* Restrictions: the top-level of the pool pool must only be made up of7807* mirrors; all devices in the pool must be healthy; no device may be7808* undergoing a resilvering operation.7809*/7810int7811zpool_do_split(int argc, char **argv)7812{7813char *srcpool, *newpool, *propval;7814char *mntopts = NULL;7815splitflags_t flags;7816int c, ret = 0;7817int ms_status = 0;7818boolean_t loadkeys = B_FALSE;7819zpool_handle_t *zhp;7820nvlist_t *config, *props = NULL;78217822flags.dryrun = B_FALSE;7823flags.import = B_FALSE;7824flags.name_flags = 0;78257826/* check options */7827while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {7828switch (c) {7829case 'g':7830flags.name_flags |= VDEV_NAME_GUID;7831break;7832case 'L':7833flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;7834break;7835case 'R':7836flags.import = B_TRUE;7837if (add_prop_list(7838zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,7839&props, B_TRUE) != 0) {7840nvlist_free(props);7841usage(B_FALSE);7842}7843break;7844case 'l':7845loadkeys = B_TRUE;7846break;7847case 'n':7848flags.dryrun = B_TRUE;7849break;7850case 'o':7851if ((propval = strchr(optarg, '=')) != NULL) {7852*propval = '\0';7853propval++;7854if (add_prop_list(optarg, propval,7855&props, B_TRUE) != 0) {7856nvlist_free(props);7857usage(B_FALSE);7858}7859} else {7860mntopts = optarg;7861}7862break;7863case 'P':7864flags.name_flags |= VDEV_NAME_PATH;7865break;7866case ':':7867(void) fprintf(stderr, gettext("missing argument for "7868"'%c' option\n"), optopt);7869usage(B_FALSE);7870break;7871case '?':7872(void) fprintf(stderr, gettext("invalid option '%c'\n"),7873optopt);7874usage(B_FALSE);7875break;7876}7877}78787879if (!flags.import && mntopts != NULL) {7880(void) fprintf(stderr, gettext("setting mntopts is only "7881"valid when importing the pool\n"));7882usage(B_FALSE);7883}78847885if (!flags.import && loadkeys) {7886(void) fprintf(stderr, gettext("loading keys is only "7887"valid when importing the pool\n"));7888usage(B_FALSE);7889}78907891argc -= optind;7892argv += optind;78937894if (argc < 1) {7895(void) fprintf(stderr, gettext("Missing pool name\n"));7896usage(B_FALSE);7897}7898if (argc < 2) {7899(void) fprintf(stderr, gettext("Missing new pool name\n"));7900usage(B_FALSE);7901}79027903srcpool = argv[0];7904newpool = argv[1];79057906argc -= 2;7907argv += 2;79087909if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {7910nvlist_free(props);7911return (1);7912}79137914config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);7915if (config == NULL) {7916ret = 1;7917} else {7918if (flags.dryrun) {7919(void) printf(gettext("would create '%s' with the "7920"following layout:\n\n"), newpool);7921print_vdev_tree(NULL, newpool, config, 0, "",7922flags.name_flags);7923print_vdev_tree(NULL, "dedup", config, 0,7924VDEV_ALLOC_BIAS_DEDUP, 0);7925print_vdev_tree(NULL, "special", config, 0,7926VDEV_ALLOC_BIAS_SPECIAL, 0);7927}7928}79297930zpool_close(zhp);79317932if (ret != 0 || flags.dryrun || !flags.import) {7933nvlist_free(config);7934nvlist_free(props);7935return (ret);7936}79377938/*7939* The split was successful. Now we need to open the new7940* pool and import it.7941*/7942if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {7943nvlist_free(config);7944nvlist_free(props);7945return (1);7946}79477948if (loadkeys) {7949ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);7950if (ret != 0)7951ret = 1;7952}79537954if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {7955ms_status = zpool_enable_datasets(zhp, mntopts, 0,7956mount_tp_nthr);7957if (ms_status == EZFS_SHAREFAILED) {7958(void) fprintf(stderr, gettext("Split was successful, "7959"datasets are mounted but sharing of some datasets "7960"has failed\n"));7961} else if (ms_status == EZFS_MOUNTFAILED) {7962(void) fprintf(stderr, gettext("Split was successful"7963", but some datasets could not be mounted\n"));7964(void) fprintf(stderr, gettext("Try doing '%s' with a "7965"different altroot\n"), "zpool import");7966}7967}7968zpool_close(zhp);7969nvlist_free(config);7970nvlist_free(props);79717972return (ret);7973}797479757976/*7977* zpool online [--power] <pool> <device> ...7978*7979* --power: Power on the enclosure slot to the drive (if possible)7980*/7981int7982zpool_do_online(int argc, char **argv)7983{7984int c, i;7985char *poolname;7986zpool_handle_t *zhp;7987int ret = 0;7988vdev_state_t newstate;7989int flags = 0;7990boolean_t is_power_on = B_FALSE;7991struct option long_options[] = {7992{"power", no_argument, NULL, ZPOOL_OPTION_POWER},7993{0, 0, 0, 0}7994};79957996/* check options */7997while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {7998switch (c) {7999case 'e':8000flags |= ZFS_ONLINE_EXPAND;8001break;8002case ZPOOL_OPTION_POWER:8003is_power_on = B_TRUE;8004break;8005case '?':8006(void) fprintf(stderr, gettext("invalid option '%c'\n"),8007optopt);8008usage(B_FALSE);8009}8010}80118012if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))8013is_power_on = B_TRUE;80148015argc -= optind;8016argv += optind;80178018/* get pool name and check number of arguments */8019if (argc < 1) {8020(void) fprintf(stderr, gettext("missing pool name\n"));8021usage(B_FALSE);8022}8023if (argc < 2) {8024(void) fprintf(stderr, gettext("missing device name\n"));8025usage(B_FALSE);8026}80278028poolname = argv[0];80298030if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {8031(void) fprintf(stderr, gettext("failed to open pool "8032"\"%s\""), poolname);8033return (1);8034}80358036for (i = 1; i < argc; i++) {8037vdev_state_t oldstate;8038boolean_t avail_spare, l2cache;8039int rc;80408041if (is_power_on) {8042rc = zpool_power_on_and_disk_wait(zhp, argv[i]);8043if (rc == ENOTSUP) {8044(void) fprintf(stderr,8045gettext("Power control not supported\n"));8046}8047if (rc != 0)8048return (rc);8049}80508051nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,8052&l2cache, NULL);8053if (tgt == NULL) {8054ret = 1;8055(void) fprintf(stderr, gettext("couldn't find device "8056"\"%s\" in pool \"%s\"\n"), argv[i], poolname);8057continue;8058}8059uint_t vsc;8060oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,8061ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;8062if ((rc = zpool_vdev_online(zhp, argv[i], flags,8063&newstate)) == 0) {8064if (newstate != VDEV_STATE_HEALTHY) {8065(void) printf(gettext("warning: device '%s' "8066"onlined, but remains in faulted state\n"),8067argv[i]);8068if (newstate == VDEV_STATE_FAULTED)8069(void) printf(gettext("use 'zpool "8070"clear' to restore a faulted "8071"device\n"));8072else8073(void) printf(gettext("use 'zpool "8074"replace' to replace devices "8075"that are no longer present\n"));8076if ((flags & ZFS_ONLINE_EXPAND)) {8077(void) printf(gettext("%s: failed "8078"to expand usable space on "8079"unhealthy device '%s'\n"),8080(oldstate >= VDEV_STATE_DEGRADED ?8081"error" : "warning"), argv[i]);8082if (oldstate >= VDEV_STATE_DEGRADED) {8083ret = 1;8084break;8085}8086}8087}8088} else {8089(void) fprintf(stderr, gettext("Failed to online "8090"\"%s\" in pool \"%s\": %d\n"),8091argv[i], poolname, rc);8092ret = 1;8093}8094}80958096zpool_close(zhp);80978098return (ret);8099}81008101/*8102* zpool offline [-ft]|[--power] <pool> <device> ...8103*8104*8105* -f Force the device into a faulted state.8106*8107* -t Only take the device off-line temporarily. The offline/faulted8108* state will not be persistent across reboots.8109*8110* --power Power off the enclosure slot to the drive (if possible)8111*/8112int8113zpool_do_offline(int argc, char **argv)8114{8115int c, i;8116char *poolname;8117zpool_handle_t *zhp;8118int ret = 0;8119boolean_t istmp = B_FALSE;8120boolean_t fault = B_FALSE;8121boolean_t is_power_off = B_FALSE;81228123struct option long_options[] = {8124{"power", no_argument, NULL, ZPOOL_OPTION_POWER},8125{0, 0, 0, 0}8126};81278128/* check options */8129while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {8130switch (c) {8131case 'f':8132fault = B_TRUE;8133break;8134case 't':8135istmp = B_TRUE;8136break;8137case ZPOOL_OPTION_POWER:8138is_power_off = B_TRUE;8139break;8140case '?':8141(void) fprintf(stderr, gettext("invalid option '%c'\n"),8142optopt);8143usage(B_FALSE);8144}8145}81468147if (is_power_off && fault) {8148(void) fprintf(stderr,8149gettext("-0 and -f cannot be used together\n"));8150usage(B_FALSE);8151}81528153if (is_power_off && istmp) {8154(void) fprintf(stderr,8155gettext("-0 and -t cannot be used together\n"));8156usage(B_FALSE);8157}81588159argc -= optind;8160argv += optind;81618162/* get pool name and check number of arguments */8163if (argc < 1) {8164(void) fprintf(stderr, gettext("missing pool name\n"));8165usage(B_FALSE);8166}8167if (argc < 2) {8168(void) fprintf(stderr, gettext("missing device name\n"));8169usage(B_FALSE);8170}81718172poolname = argv[0];81738174if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {8175(void) fprintf(stderr, gettext("failed to open pool "8176"\"%s\""), poolname);8177return (1);8178}81798180for (i = 1; i < argc; i++) {8181uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);8182if (is_power_off) {8183/*8184* Note: we have to power off first, then set REMOVED,8185* or else zpool_vdev_set_removed_state() returns8186* EAGAIN.8187*/8188ret = zpool_power_off(zhp, argv[i]);8189if (ret != 0) {8190(void) fprintf(stderr, "%s %s %d\n",8191gettext("unable to power off slot for"),8192argv[i], ret);8193}8194(void) zpool_vdev_set_removed_state(zhp, guid,8195VDEV_AUX_NONE);81968197} else if (fault) {8198vdev_aux_t aux;8199if (istmp == B_FALSE) {8200/* Force the fault to persist across imports */8201aux = VDEV_AUX_EXTERNAL_PERSIST;8202} else {8203aux = VDEV_AUX_EXTERNAL;8204}82058206if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)8207ret = 1;8208} else {8209if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)8210ret = 1;8211}8212}82138214zpool_close(zhp);82158216return (ret);8217}82188219/*8220* zpool clear [-nF]|[--power] <pool> [device]8221*8222* Clear all errors associated with a pool or a particular device.8223*/8224int8225zpool_do_clear(int argc, char **argv)8226{8227int c;8228int ret = 0;8229boolean_t dryrun = B_FALSE;8230boolean_t do_rewind = B_FALSE;8231boolean_t xtreme_rewind = B_FALSE;8232boolean_t is_power_on = B_FALSE;8233uint32_t rewind_policy = ZPOOL_NO_REWIND;8234nvlist_t *policy = NULL;8235zpool_handle_t *zhp;8236char *pool, *device;82378238struct option long_options[] = {8239{"power", no_argument, NULL, ZPOOL_OPTION_POWER},8240{0, 0, 0, 0}8241};82428243/* check options */8244while ((c = getopt_long(argc, argv, "FnX", long_options,8245NULL)) != -1) {8246switch (c) {8247case 'F':8248do_rewind = B_TRUE;8249break;8250case 'n':8251dryrun = B_TRUE;8252break;8253case 'X':8254xtreme_rewind = B_TRUE;8255break;8256case ZPOOL_OPTION_POWER:8257is_power_on = B_TRUE;8258break;8259case '?':8260(void) fprintf(stderr, gettext("invalid option '%c'\n"),8261optopt);8262usage(B_FALSE);8263}8264}82658266if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))8267is_power_on = B_TRUE;82688269argc -= optind;8270argv += optind;82718272if (argc < 1) {8273(void) fprintf(stderr, gettext("missing pool name\n"));8274usage(B_FALSE);8275}82768277if (argc > 2) {8278(void) fprintf(stderr, gettext("too many arguments\n"));8279usage(B_FALSE);8280}82818282if ((dryrun || xtreme_rewind) && !do_rewind) {8283(void) fprintf(stderr,8284gettext("-n or -X only meaningful with -F\n"));8285usage(B_FALSE);8286}8287if (dryrun)8288rewind_policy = ZPOOL_TRY_REWIND;8289else if (do_rewind)8290rewind_policy = ZPOOL_DO_REWIND;8291if (xtreme_rewind)8292rewind_policy |= ZPOOL_EXTREME_REWIND;82938294/* In future, further rewind policy choices can be passed along here */8295if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||8296nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,8297rewind_policy) != 0) {8298return (1);8299}83008301pool = argv[0];8302device = argc == 2 ? argv[1] : NULL;83038304if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {8305nvlist_free(policy);8306return (1);8307}83088309if (is_power_on) {8310if (device == NULL) {8311(void) zpool_power_on_pool_and_wait_for_devices(zhp);8312} else {8313(void) zpool_power_on_and_disk_wait(zhp, device);8314}8315}83168317if (zpool_clear(zhp, device, policy) != 0)8318ret = 1;83198320zpool_close(zhp);83218322nvlist_free(policy);83238324return (ret);8325}83268327/*8328* zpool reguid [-g <guid>] <pool>8329*/8330int8331zpool_do_reguid(int argc, char **argv)8332{8333uint64_t guid;8334uint64_t *guidp = NULL;8335int c;8336char *endptr;8337char *poolname;8338zpool_handle_t *zhp;8339int ret = 0;83408341/* check options */8342while ((c = getopt(argc, argv, "g:")) != -1) {8343switch (c) {8344case 'g':8345errno = 0;8346guid = strtoull(optarg, &endptr, 10);8347if (errno != 0 || *endptr != '\0') {8348(void) fprintf(stderr,8349gettext("invalid GUID: %s\n"), optarg);8350usage(B_FALSE);8351}8352guidp = &guid;8353break;8354case '?':8355(void) fprintf(stderr, gettext("invalid option '%c'\n"),8356optopt);8357usage(B_FALSE);8358}8359}83608361argc -= optind;8362argv += optind;83638364/* get pool name and check number of arguments */8365if (argc < 1) {8366(void) fprintf(stderr, gettext("missing pool name\n"));8367usage(B_FALSE);8368}83698370if (argc > 1) {8371(void) fprintf(stderr, gettext("too many arguments\n"));8372usage(B_FALSE);8373}83748375poolname = argv[0];8376if ((zhp = zpool_open(g_zfs, poolname)) == NULL)8377return (1);83788379ret = zpool_set_guid(zhp, guidp);83808381zpool_close(zhp);8382return (ret);8383}838483858386/*8387* zpool reopen <pool>8388*8389* Reopen the pool so that the kernel can update the sizes of all vdevs.8390*/8391int8392zpool_do_reopen(int argc, char **argv)8393{8394int c;8395int ret = 0;8396boolean_t scrub_restart = B_TRUE;83978398/* check options */8399while ((c = getopt(argc, argv, "n")) != -1) {8400switch (c) {8401case 'n':8402scrub_restart = B_FALSE;8403break;8404case '?':8405(void) fprintf(stderr, gettext("invalid option '%c'\n"),8406optopt);8407usage(B_FALSE);8408}8409}84108411argc -= optind;8412argv += optind;84138414/* if argc == 0 we will execute zpool_reopen_one on all pools */8415ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8416B_FALSE, zpool_reopen_one, &scrub_restart);84178418return (ret);8419}84208421typedef struct scrub_cbdata {8422int cb_type;8423pool_scrub_cmd_t cb_scrub_cmd;8424time_t cb_date_start;8425time_t cb_date_end;8426} scrub_cbdata_t;84278428static boolean_t8429zpool_has_checkpoint(zpool_handle_t *zhp)8430{8431nvlist_t *config, *nvroot;84328433config = zpool_get_config(zhp, NULL);84348435if (config != NULL) {8436pool_checkpoint_stat_t *pcs = NULL;8437uint_t c;84388439nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);8440(void) nvlist_lookup_uint64_array(nvroot,8441ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);84428443if (pcs == NULL || pcs->pcs_state == CS_NONE)8444return (B_FALSE);84458446assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||8447pcs->pcs_state == CS_CHECKPOINT_DISCARDING);8448return (B_TRUE);8449}84508451return (B_FALSE);8452}84538454static int8455scrub_callback(zpool_handle_t *zhp, void *data)8456{8457scrub_cbdata_t *cb = data;8458int err;84598460/*8461* Ignore faulted pools.8462*/8463if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {8464(void) fprintf(stderr, gettext("cannot scan '%s': pool is "8465"currently unavailable\n"), zpool_get_name(zhp));8466return (1);8467}84688469err = zpool_scan_range(zhp, cb->cb_type, cb->cb_scrub_cmd,8470cb->cb_date_start, cb->cb_date_end);8471if (err == 0 && zpool_has_checkpoint(zhp) &&8472cb->cb_type == POOL_SCAN_SCRUB) {8473(void) printf(gettext("warning: will not scrub state that "8474"belongs to the checkpoint of pool '%s'\n"),8475zpool_get_name(zhp));8476}84778478return (err != 0);8479}84808481static int8482wait_callback(zpool_handle_t *zhp, void *data)8483{8484zpool_wait_activity_t *act = data;8485return (zpool_wait(zhp, *act));8486}84878488static time_t8489date_string_to_sec(const char *timestr, boolean_t rounding)8490{8491struct tm tm = {0};8492int adjustment = rounding ? 1 : 0;84938494/* Allow mktime to determine timezone. */8495tm.tm_isdst = -1;84968497if (strptime(timestr, "%Y-%m-%d %H:%M", &tm) == NULL) {8498if (strptime(timestr, "%Y-%m-%d", &tm) == NULL) {8499fprintf(stderr, gettext("Failed to parse the date.\n"));8500usage(B_FALSE);8501}8502adjustment *= 24 * 60 * 60;8503} else {8504adjustment *= 60;8505}85068507return (mktime(&tm) + adjustment);8508}85098510/*8511* zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...]8512*8513* -a Scrub all pools.8514* -e Only scrub blocks in the error log.8515* -E End date of scrub.8516* -S Start date of scrub.8517* -s Stop. Stops any in-progress scrub.8518* -p Pause. Pause in-progress scrub.8519* -w Wait. Blocks until scrub has completed.8520* -C Scrub from last saved txg.8521*/8522int8523zpool_do_scrub(int argc, char **argv)8524{8525int c;8526scrub_cbdata_t cb;8527boolean_t wait = B_FALSE;8528int error;85298530cb.cb_type = POOL_SCAN_SCRUB;8531cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;8532cb.cb_date_start = cb.cb_date_end = 0;85338534boolean_t is_error_scrub = B_FALSE;8535boolean_t is_pause = B_FALSE;8536boolean_t is_stop = B_FALSE;8537boolean_t is_txg_continue = B_FALSE;8538boolean_t scrub_all = B_FALSE;85398540/* check options */8541while ((c = getopt(argc, argv, "aspweCE:S:")) != -1) {8542switch (c) {8543case 'a':8544scrub_all = B_TRUE;8545break;8546case 'e':8547is_error_scrub = B_TRUE;8548break;8549case 'E':8550/*8551* Round the date. It's better to scrub more data than8552* less. This also makes the date inclusive.8553*/8554cb.cb_date_end = date_string_to_sec(optarg, B_TRUE);8555break;8556case 's':8557is_stop = B_TRUE;8558break;8559case 'S':8560cb.cb_date_start = date_string_to_sec(optarg, B_FALSE);8561break;8562case 'p':8563is_pause = B_TRUE;8564break;8565case 'w':8566wait = B_TRUE;8567break;8568case 'C':8569is_txg_continue = B_TRUE;8570break;8571case '?':8572(void) fprintf(stderr, gettext("invalid option '%c'\n"),8573optopt);8574usage(B_FALSE);8575}8576}85778578if (is_pause && is_stop) {8579(void) fprintf(stderr, gettext("invalid option "8580"combination: -s and -p are mutually exclusive\n"));8581usage(B_FALSE);8582} else if (is_pause && is_txg_continue) {8583(void) fprintf(stderr, gettext("invalid option "8584"combination: -p and -C are mutually exclusive\n"));8585usage(B_FALSE);8586} else if (is_stop && is_txg_continue) {8587(void) fprintf(stderr, gettext("invalid option "8588"combination: -s and -C are mutually exclusive\n"));8589usage(B_FALSE);8590} else if (is_error_scrub && is_txg_continue) {8591(void) fprintf(stderr, gettext("invalid option "8592"combination: -e and -C are mutually exclusive\n"));8593usage(B_FALSE);8594} else {8595if (is_error_scrub)8596cb.cb_type = POOL_SCAN_ERRORSCRUB;85978598if (is_pause) {8599cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;8600} else if (is_stop) {8601cb.cb_type = POOL_SCAN_NONE;8602} else if (is_txg_continue) {8603cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;8604} else {8605cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;8606}8607}86088609if ((cb.cb_date_start != 0 || cb.cb_date_end != 0) &&8610cb.cb_scrub_cmd != POOL_SCRUB_NORMAL) {8611(void) fprintf(stderr, gettext("invalid option combination: "8612"start/end date is available only with normal scrub\n"));8613usage(B_FALSE);8614}8615if (cb.cb_date_start != 0 && cb.cb_date_end != 0 &&8616cb.cb_date_start > cb.cb_date_end) {8617(void) fprintf(stderr, gettext("invalid arguments: "8618"end date has to be later than start date\n"));8619usage(B_FALSE);8620}86218622if (wait && (cb.cb_type == POOL_SCAN_NONE ||8623cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {8624(void) fprintf(stderr, gettext("invalid option combination: "8625"-w cannot be used with -p or -s\n"));8626usage(B_FALSE);8627}86288629argc -= optind;8630argv += optind;86318632if (argc < 1 && !scrub_all) {8633(void) fprintf(stderr, gettext("missing pool name argument\n"));8634usage(B_FALSE);8635}86368637error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8638B_FALSE, scrub_callback, &cb);86398640if (wait && !error) {8641zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;8642error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8643B_FALSE, wait_callback, &act);8644}86458646return (error);8647}86488649/*8650* zpool resilver <pool> ...8651*8652* Restarts any in-progress resilver8653*/8654int8655zpool_do_resilver(int argc, char **argv)8656{8657int c;8658scrub_cbdata_t cb;86598660cb.cb_type = POOL_SCAN_RESILVER;8661cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;8662cb.cb_date_start = cb.cb_date_end = 0;86638664/* check options */8665while ((c = getopt(argc, argv, "")) != -1) {8666switch (c) {8667case '?':8668(void) fprintf(stderr, gettext("invalid option '%c'\n"),8669optopt);8670usage(B_FALSE);8671}8672}86738674argc -= optind;8675argv += optind;86768677if (argc < 1) {8678(void) fprintf(stderr, gettext("missing pool name argument\n"));8679usage(B_FALSE);8680}86818682return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8683B_FALSE, scrub_callback, &cb));8684}86858686/*8687* zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...]8688*8689* -a Trim all pools.8690* -c Cancel. Ends any in-progress trim.8691* -d Secure trim. Requires kernel and device support.8692* -r <rate> Sets the TRIM rate in bytes (per second). Supports8693* adding a multiplier suffix such as 'k' or 'm'.8694* -s Suspend. TRIM can then be restarted with no flags.8695* -w Wait. Blocks until trimming has completed.8696*/8697int8698zpool_do_trim(int argc, char **argv)8699{8700struct option long_options[] = {8701{"cancel", no_argument, NULL, 'c'},8702{"secure", no_argument, NULL, 'd'},8703{"rate", required_argument, NULL, 'r'},8704{"suspend", no_argument, NULL, 's'},8705{"wait", no_argument, NULL, 'w'},8706{"all", no_argument, NULL, 'a'},8707{0, 0, 0, 0}8708};87098710pool_trim_func_t cmd_type = POOL_TRIM_START;8711uint64_t rate = 0;8712boolean_t secure = B_FALSE;8713boolean_t wait = B_FALSE;8714boolean_t trimall = B_FALSE;8715int error;87168717int c;8718while ((c = getopt_long(argc, argv, "acdr:sw", long_options, NULL))8719!= -1) {8720switch (c) {8721case 'a':8722trimall = B_TRUE;8723break;8724case 'c':8725if (cmd_type != POOL_TRIM_START &&8726cmd_type != POOL_TRIM_CANCEL) {8727(void) fprintf(stderr, gettext("-c cannot be "8728"combined with other options\n"));8729usage(B_FALSE);8730}8731cmd_type = POOL_TRIM_CANCEL;8732break;8733case 'd':8734if (cmd_type != POOL_TRIM_START) {8735(void) fprintf(stderr, gettext("-d cannot be "8736"combined with the -c or -s options\n"));8737usage(B_FALSE);8738}8739secure = B_TRUE;8740break;8741case 'r':8742if (cmd_type != POOL_TRIM_START) {8743(void) fprintf(stderr, gettext("-r cannot be "8744"combined with the -c or -s options\n"));8745usage(B_FALSE);8746}8747if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {8748(void) fprintf(stderr, "%s: %s\n",8749gettext("invalid value for rate"),8750libzfs_error_description(g_zfs));8751usage(B_FALSE);8752}8753break;8754case 's':8755if (cmd_type != POOL_TRIM_START &&8756cmd_type != POOL_TRIM_SUSPEND) {8757(void) fprintf(stderr, gettext("-s cannot be "8758"combined with other options\n"));8759usage(B_FALSE);8760}8761cmd_type = POOL_TRIM_SUSPEND;8762break;8763case 'w':8764wait = B_TRUE;8765break;8766case '?':8767if (optopt != 0) {8768(void) fprintf(stderr,8769gettext("invalid option '%c'\n"), optopt);8770} else {8771(void) fprintf(stderr,8772gettext("invalid option '%s'\n"),8773argv[optind - 1]);8774}8775usage(B_FALSE);8776}8777}87788779argc -= optind;8780argv += optind;87818782trimflags_t trim_flags = {8783.secure = secure,8784.rate = rate,8785.wait = wait,8786};87878788trim_cbdata_t cbdata = {8789.trim_flags = trim_flags,8790.cmd_type = cmd_type8791};87928793if (argc < 1 && !trimall) {8794(void) fprintf(stderr, gettext("missing pool name argument\n"));8795usage(B_FALSE);8796}87978798if (wait && (cmd_type != POOL_TRIM_START)) {8799(void) fprintf(stderr, gettext("-w cannot be used with -c or "8800"-s options\n"));8801usage(B_FALSE);8802}88038804if (trimall && argc > 0) {8805(void) fprintf(stderr, gettext("-a cannot be combined with "8806"individual zpools or vdevs\n"));8807usage(B_FALSE);8808}88098810if (argc == 0 && trimall) {8811cbdata.trim_flags.fullpool = B_TRUE;8812/* Trim each pool recursively */8813error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8814B_FALSE, zpool_trim_one, &cbdata);8815} else if (argc == 1) {8816char *poolname = argv[0];8817zpool_handle_t *zhp = zpool_open(g_zfs, poolname);8818if (zhp == NULL)8819return (-1);8820/* no individual leaf vdevs specified, so add them all */8821error = zpool_trim_one(zhp, &cbdata);8822zpool_close(zhp);8823} else {8824char *poolname = argv[0];8825zpool_handle_t *zhp = zpool_open(g_zfs, poolname);8826if (zhp == NULL)8827return (-1);8828/* leaf vdevs specified, trim only those */8829cbdata.trim_flags.fullpool = B_FALSE;8830nvlist_t *vdevs = fnvlist_alloc();8831for (int i = 1; i < argc; i++) {8832fnvlist_add_boolean(vdevs, argv[i]);8833}8834error = zpool_trim(zhp, cbdata.cmd_type, vdevs,8835&cbdata.trim_flags);8836fnvlist_free(vdevs);8837zpool_close(zhp);8838}88398840return (error);8841}88428843/*8844* Converts a total number of seconds to a human readable string broken8845* down in to days/hours/minutes/seconds.8846*/8847static void8848secs_to_dhms(uint64_t total, char *buf)8849{8850uint64_t days = total / 60 / 60 / 24;8851uint64_t hours = (total / 60 / 60) % 24;8852uint64_t mins = (total / 60) % 60;8853uint64_t secs = (total % 60);88548855if (days > 0) {8856(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",8857(u_longlong_t)days, (u_longlong_t)hours,8858(u_longlong_t)mins, (u_longlong_t)secs);8859} else {8860(void) sprintf(buf, "%02llu:%02llu:%02llu",8861(u_longlong_t)hours, (u_longlong_t)mins,8862(u_longlong_t)secs);8863}8864}88658866/*8867* Print out detailed error scrub status.8868*/8869static void8870print_err_scrub_status(pool_scan_stat_t *ps)8871{8872time_t start, end, pause;8873uint64_t total_secs_left;8874uint64_t secs_left, mins_left, hours_left, days_left;8875uint64_t examined, to_be_examined;88768877if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {8878return;8879}88808881(void) printf(gettext(" scrub: "));88828883start = ps->pss_error_scrub_start;8884end = ps->pss_error_scrub_end;8885pause = ps->pss_pass_error_scrub_pause;8886examined = ps->pss_error_scrub_examined;8887to_be_examined = ps->pss_error_scrub_to_be_examined;88888889assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);88908891if (ps->pss_error_scrub_state == DSS_FINISHED) {8892total_secs_left = end - start;8893days_left = total_secs_left / 60 / 60 / 24;8894hours_left = (total_secs_left / 60 / 60) % 24;8895mins_left = (total_secs_left / 60) % 60;8896secs_left = (total_secs_left % 60);88978898(void) printf(gettext("scrubbed %llu error blocks in %llu days "8899"%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,8900(u_longlong_t)days_left, (u_longlong_t)hours_left,8901(u_longlong_t)mins_left, (u_longlong_t)secs_left,8902ctime(&end));89038904return;8905} else if (ps->pss_error_scrub_state == DSS_CANCELED) {8906(void) printf(gettext("error scrub canceled on %s"),8907ctime(&end));8908return;8909}8910assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);89118912/* Error scrub is in progress. */8913if (pause == 0) {8914(void) printf(gettext("error scrub in progress since %s"),8915ctime(&start));8916} else {8917(void) printf(gettext("error scrub paused since %s"),8918ctime(&pause));8919(void) printf(gettext("\terror scrub started on %s"),8920ctime(&start));8921}89228923double fraction_done = (double)examined / (to_be_examined + examined);8924(void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"8925" blocks"), 100 * fraction_done, (u_longlong_t)examined);89268927(void) printf("\n");8928}89298930/*8931* Print out detailed scrub status.8932*/8933static void8934print_scan_scrub_resilver_status(pool_scan_stat_t *ps)8935{8936time_t start, end, pause;8937uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;8938uint64_t elapsed, scan_rate, issue_rate;8939double fraction_done;8940char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];8941char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];89428943printf(" ");8944(void) printf_color(ANSI_BOLD, gettext("scan:"));8945printf(" ");89468947/* If there's never been a scan, there's not much to say. */8948if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||8949ps->pss_func >= POOL_SCAN_FUNCS) {8950(void) printf(gettext("none requested\n"));8951return;8952}89538954start = ps->pss_start_time;8955end = ps->pss_end_time;8956pause = ps->pss_pass_scrub_pause;89578958zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));89598960int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;8961int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;8962assert(is_resilver || is_scrub);89638964/* Scan is finished or canceled. */8965if (ps->pss_state == DSS_FINISHED) {8966secs_to_dhms(end - start, time_buf);89678968if (is_scrub) {8969(void) printf(gettext("scrub repaired %s "8970"in %s with %llu errors on %s"), processed_buf,8971time_buf, (u_longlong_t)ps->pss_errors,8972ctime(&end));8973} else if (is_resilver) {8974(void) printf(gettext("resilvered %s "8975"in %s with %llu errors on %s"), processed_buf,8976time_buf, (u_longlong_t)ps->pss_errors,8977ctime(&end));8978}8979return;8980} else if (ps->pss_state == DSS_CANCELED) {8981if (is_scrub) {8982(void) printf(gettext("scrub canceled on %s"),8983ctime(&end));8984} else if (is_resilver) {8985(void) printf(gettext("resilver canceled on %s"),8986ctime(&end));8987}8988return;8989}89908991assert(ps->pss_state == DSS_SCANNING);89928993/* Scan is in progress. Resilvers can't be paused. */8994if (is_scrub) {8995if (pause == 0) {8996(void) printf(gettext("scrub in progress since %s"),8997ctime(&start));8998} else {8999(void) printf(gettext("scrub paused since %s"),9000ctime(&pause));9001(void) printf(gettext("\tscrub started on %s"),9002ctime(&start));9003}9004} else if (is_resilver) {9005(void) printf(gettext("resilver in progress since %s"),9006ctime(&start));9007}90089009scanned = ps->pss_examined;9010pass_scanned = ps->pss_pass_exam;9011issued = ps->pss_issued;9012pass_issued = ps->pss_pass_issued;9013total_s = ps->pss_to_examine;9014total_i = ps->pss_to_examine - ps->pss_skipped;90159016/* we are only done with a block once we have issued the IO for it */9017fraction_done = (double)issued / total_i;90189019/* elapsed time for this pass, rounding up to 1 if it's 0 */9020elapsed = time(NULL) - ps->pss_pass_start;9021elapsed -= ps->pss_pass_scrub_spent_paused;9022elapsed = (elapsed != 0) ? elapsed : 1;90239024scan_rate = pass_scanned / elapsed;9025issue_rate = pass_issued / elapsed;90269027/* format all of the numbers we will be reporting */9028zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));9029zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));9030zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));9031zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));90329033/* do not print estimated time if we have a paused scrub */9034(void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);9035if (pause == 0 && scan_rate > 0) {9036zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));9037(void) printf(gettext(" at %s/s"), srate_buf);9038}9039(void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);9040if (pause == 0 && issue_rate > 0) {9041zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));9042(void) printf(gettext(" at %s/s"), irate_buf);9043}9044(void) printf(gettext("\n"));90459046if (is_resilver) {9047(void) printf(gettext("\t%s resilvered, %.2f%% done"),9048processed_buf, 100 * fraction_done);9049} else if (is_scrub) {9050(void) printf(gettext("\t%s repaired, %.2f%% done"),9051processed_buf, 100 * fraction_done);9052}90539054if (pause == 0) {9055/*9056* Only provide an estimate iff:9057* 1) we haven't yet issued all we expected, and9058* 2) the issue rate exceeds 10 MB/s, and9059* 3) it's either:9060* a) a resilver which has started repairs, or9061* b) a scrub which has entered the issue phase.9062*/9063if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&9064((is_resilver && ps->pss_processed > 0) ||9065(is_scrub && issued > 0))) {9066secs_to_dhms((total_i - issued) / issue_rate, time_buf);9067(void) printf(gettext(", %s to go\n"), time_buf);9068} else {9069(void) printf(gettext(", no estimated "9070"completion time\n"));9071}9072} else {9073(void) printf(gettext("\n"));9074}9075}90769077static void9078print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)9079{9080if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)9081return;90829083printf(" ");9084(void) printf_color(ANSI_BOLD, gettext("scan:"));9085printf(" ");90869087uint64_t bytes_scanned = vrs->vrs_bytes_scanned;9088uint64_t bytes_issued = vrs->vrs_bytes_issued;9089uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;9090uint64_t bytes_est_s = vrs->vrs_bytes_est;9091uint64_t bytes_est_i = vrs->vrs_bytes_est;9092if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)9093bytes_est_i -= vrs->vrs_pass_bytes_skipped;9094uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /9095(vrs->vrs_pass_time_ms + 1)) * 1000;9096uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /9097(vrs->vrs_pass_time_ms + 1)) * 1000;9098double scan_pct = MIN((double)bytes_scanned * 100 /9099(bytes_est_s + 1), 100);91009101/* Format all of the numbers we will be reporting */9102char bytes_scanned_buf[7], bytes_issued_buf[7];9103char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];9104char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];9105zfs_nicebytes(bytes_scanned, bytes_scanned_buf,9106sizeof (bytes_scanned_buf));9107zfs_nicebytes(bytes_issued, bytes_issued_buf,9108sizeof (bytes_issued_buf));9109zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,9110sizeof (bytes_rebuilt_buf));9111zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));9112zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));91139114time_t start = vrs->vrs_start_time;9115time_t end = vrs->vrs_end_time;91169117/* Rebuild is finished or canceled. */9118if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {9119secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);9120(void) printf(gettext("resilvered (%s) %s in %s "9121"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,9122time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));9123return;9124} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {9125(void) printf(gettext("resilver (%s) canceled on %s"),9126vdev_name, ctime(&end));9127return;9128} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {9129(void) printf(gettext("resilver (%s) in progress since %s"),9130vdev_name, ctime(&start));9131}91329133assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);91349135(void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,9136bytes_est_s_buf);9137if (scan_rate > 0) {9138zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));9139(void) printf(gettext(" at %s/s"), scan_rate_buf);9140}9141(void) printf(gettext(", %s / %s issued"), bytes_issued_buf,9142bytes_est_i_buf);9143if (issue_rate > 0) {9144zfs_nicebytes(issue_rate, issue_rate_buf,9145sizeof (issue_rate_buf));9146(void) printf(gettext(" at %s/s"), issue_rate_buf);9147}9148(void) printf(gettext("\n"));91499150(void) printf(gettext("\t%s resilvered, %.2f%% done"),9151bytes_rebuilt_buf, scan_pct);91529153if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {9154if (bytes_est_s >= bytes_scanned &&9155scan_rate >= 10 * 1024 * 1024) {9156secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,9157time_buf);9158(void) printf(gettext(", %s to go\n"), time_buf);9159} else {9160(void) printf(gettext(", no estimated "9161"completion time\n"));9162}9163} else {9164(void) printf(gettext("\n"));9165}9166}91679168/*9169* Print rebuild status for top-level vdevs.9170*/9171static void9172print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)9173{9174nvlist_t **child;9175uint_t children;91769177if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,9178&child, &children) != 0)9179children = 0;91809181for (uint_t c = 0; c < children; c++) {9182vdev_rebuild_stat_t *vrs;9183uint_t i;91849185if (nvlist_lookup_uint64_array(child[c],9186ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {9187char *name = zpool_vdev_name(g_zfs, zhp,9188child[c], VDEV_NAME_TYPE_ID);9189print_rebuild_status_impl(vrs, i, name);9190free(name);9191}9192}9193}91949195/*9196* As we don't scrub checkpointed blocks, we want to warn the user that we9197* skipped scanning some blocks if a checkpoint exists or existed at any9198* time during the scan. If a sequential instead of healing reconstruction9199* was performed then the blocks were reconstructed. However, their checksums9200* have not been verified so we still print the warning.9201*/9202static void9203print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)9204{9205if (ps == NULL || pcs == NULL)9206return;92079208if (pcs->pcs_state == CS_NONE ||9209pcs->pcs_state == CS_CHECKPOINT_DISCARDING)9210return;92119212assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);92139214if (ps->pss_state == DSS_NONE)9215return;92169217if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&9218ps->pss_end_time < pcs->pcs_start_time)9219return;92209221if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {9222(void) printf(gettext(" scan warning: skipped blocks "9223"that are only referenced by the checkpoint.\n"));9224} else {9225assert(ps->pss_state == DSS_SCANNING);9226(void) printf(gettext(" scan warning: skipping blocks "9227"that are only referenced by the checkpoint.\n"));9228}9229}92309231/*9232* Returns B_TRUE if there is an active rebuild in progress. Otherwise,9233* B_FALSE is returned and 'rebuild_end_time' is set to the end time for9234* the last completed (or cancelled) rebuild.9235*/9236static boolean_t9237check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)9238{9239nvlist_t **child;9240uint_t children;9241boolean_t rebuilding = B_FALSE;9242uint64_t end_time = 0;92439244if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,9245&child, &children) != 0)9246children = 0;92479248for (uint_t c = 0; c < children; c++) {9249vdev_rebuild_stat_t *vrs;9250uint_t i;92519252if (nvlist_lookup_uint64_array(child[c],9253ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {92549255if (vrs->vrs_end_time > end_time)9256end_time = vrs->vrs_end_time;92579258if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {9259rebuilding = B_TRUE;9260end_time = 0;9261break;9262}9263}9264}92659266if (rebuild_end_time != NULL)9267*rebuild_end_time = end_time;92689269return (rebuilding);9270}92719272static void9273vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,9274int depth, boolean_t isspare, char *parent, nvlist_t *item)9275{9276nvlist_t *vds, **child, *ch = NULL;9277uint_t vsc, children;9278vdev_stat_t *vs;9279char *vname;9280uint64_t notpresent;9281const char *type, *path;92829283if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,9284&child, &children) != 0)9285children = 0;9286verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,9287(uint64_t **)&vs, &vsc) == 0);9288verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);9289if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)9290return;92919292if (cb->cb_print_unhealthy && depth > 0 &&9293for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {9294return;9295}9296vname = zpool_vdev_name(g_zfs, zhp, nv,9297cb->cb_name_flags | VDEV_NAME_TYPE_ID);9298vds = fnvlist_alloc();9299fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);9300if (cb->cb_flat_vdevs && parent != NULL) {9301fnvlist_add_string(vds, "parent", parent);9302}93039304if (isspare) {9305if (vs->vs_aux == VDEV_AUX_SPARED) {9306fnvlist_add_string(vds, "state", "INUSE");9307used_by_other(zhp, nv, vds);9308} else if (vs->vs_state == VDEV_STATE_HEALTHY)9309fnvlist_add_string(vds, "state", "AVAIL");9310} else {9311if (vs->vs_alloc) {9312nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,9313cb->cb_literal, cb->cb_json_as_int,9314ZFS_NICENUM_BYTES);9315}9316if (vs->vs_space) {9317nice_num_str_nvlist(vds, "total_space", vs->vs_space,9318cb->cb_literal, cb->cb_json_as_int,9319ZFS_NICENUM_BYTES);9320}9321if (vs->vs_dspace) {9322nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,9323cb->cb_literal, cb->cb_json_as_int,9324ZFS_NICENUM_BYTES);9325}9326if (vs->vs_rsize) {9327nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,9328cb->cb_literal, cb->cb_json_as_int,9329ZFS_NICENUM_BYTES);9330}9331if (vs->vs_esize) {9332nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,9333cb->cb_literal, cb->cb_json_as_int,9334ZFS_NICENUM_BYTES);9335}9336if (vs->vs_self_healed) {9337nice_num_str_nvlist(vds, "self_healed",9338vs->vs_self_healed, cb->cb_literal,9339cb->cb_json_as_int, ZFS_NICENUM_BYTES);9340}9341if (vs->vs_pspace) {9342nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,9343cb->cb_literal, cb->cb_json_as_int,9344ZFS_NICENUM_BYTES);9345}9346nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,9347cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9348nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,9349cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9350nice_num_str_nvlist(vds, "checksum_errors",9351vs->vs_checksum_errors, cb->cb_literal,9352cb->cb_json_as_int, ZFS_NICENUM_1024);9353if (vs->vs_scan_processed) {9354nice_num_str_nvlist(vds, "scan_processed",9355vs->vs_scan_processed, cb->cb_literal,9356cb->cb_json_as_int, ZFS_NICENUM_BYTES);9357}9358if (vs->vs_checkpoint_space) {9359nice_num_str_nvlist(vds, "checkpoint_space",9360vs->vs_checkpoint_space, cb->cb_literal,9361cb->cb_json_as_int, ZFS_NICENUM_BYTES);9362}9363if (vs->vs_resilver_deferred) {9364nice_num_str_nvlist(vds, "resilver_deferred",9365vs->vs_resilver_deferred, B_TRUE,9366cb->cb_json_as_int, ZFS_NICENUM_1024);9367}9368if (children == 0) {9369nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,9370cb->cb_literal, cb->cb_json_as_int,9371ZFS_NICENUM_1024);9372}9373if (cb->cb_print_power) {9374if (children == 0) {9375/* Only leaf vdevs have physical slots */9376switch (zpool_power_current_state(zhp, (char *)9377fnvlist_lookup_string(nv,9378ZPOOL_CONFIG_PATH))) {9379case 0:9380fnvlist_add_string(vds, "power_state",9381"off");9382break;9383case 1:9384fnvlist_add_string(vds, "power_state",9385"on");9386break;9387default:9388fnvlist_add_string(vds, "power_state",9389"-");9390}9391} else {9392fnvlist_add_string(vds, "power_state", "-");9393}9394}9395}93969397if (cb->cb_print_dio_verify) {9398nice_num_str_nvlist(vds, "dio_verify_errors",9399vs->vs_dio_verify_errors, cb->cb_literal,9400cb->cb_json_as_int, ZFS_NICENUM_1024);9401}94029403if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,9404¬present) == 0) {9405nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,94061, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9407fnvlist_add_string(vds, "was",9408fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));9409} else if (vs->vs_aux != VDEV_AUX_NONE) {9410fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);9411} else if (children == 0 && !isspare &&9412getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&9413VDEV_STAT_VALID(vs_physical_ashift, vsc) &&9414vs->vs_configured_ashift < vs->vs_physical_ashift) {9415nice_num_str_nvlist(vds, "configured_ashift",9416vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,9417ZFS_NICENUM_1024);9418nice_num_str_nvlist(vds, "physical_ashift",9419vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,9420ZFS_NICENUM_1024);9421}9422if (vs->vs_scan_removing != 0) {9423nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,9424B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9425} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {9426nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,9427B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9428}94299430if (cb->vcdl != NULL) {9431if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {9432zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),9433path, vds);9434}9435}94369437if (children == 0) {9438if (cb->cb_print_vdev_init) {9439if (vs->vs_initialize_state != 0) {9440uint64_t st = vs->vs_initialize_state;9441fnvlist_add_string(vds, "init_state",9442vdev_init_state_str[st]);9443nice_num_str_nvlist(vds, "initialized",9444vs->vs_initialize_bytes_done,9445cb->cb_literal, cb->cb_json_as_int,9446ZFS_NICENUM_BYTES);9447nice_num_str_nvlist(vds, "to_initialize",9448vs->vs_initialize_bytes_est,9449cb->cb_literal, cb->cb_json_as_int,9450ZFS_NICENUM_BYTES);9451nice_num_str_nvlist(vds, "init_time",9452vs->vs_initialize_action_time,9453cb->cb_literal, cb->cb_json_as_int,9454ZFS_NICE_TIMESTAMP);9455nice_num_str_nvlist(vds, "init_errors",9456vs->vs_initialize_errors,9457cb->cb_literal, cb->cb_json_as_int,9458ZFS_NICENUM_1024);9459} else {9460fnvlist_add_string(vds, "init_state",9461"UNINITIALIZED");9462}9463}9464if (cb->cb_print_vdev_trim) {9465if (vs->vs_trim_notsup == 0) {9466if (vs->vs_trim_state != 0) {9467uint64_t st = vs->vs_trim_state;9468fnvlist_add_string(vds, "trim_state",9469vdev_trim_state_str[st]);9470nice_num_str_nvlist(vds, "trimmed",9471vs->vs_trim_bytes_done,9472cb->cb_literal, cb->cb_json_as_int,9473ZFS_NICENUM_BYTES);9474nice_num_str_nvlist(vds, "to_trim",9475vs->vs_trim_bytes_est,9476cb->cb_literal, cb->cb_json_as_int,9477ZFS_NICENUM_BYTES);9478nice_num_str_nvlist(vds, "trim_time",9479vs->vs_trim_action_time,9480cb->cb_literal, cb->cb_json_as_int,9481ZFS_NICE_TIMESTAMP);9482nice_num_str_nvlist(vds, "trim_errors",9483vs->vs_trim_errors,9484cb->cb_literal, cb->cb_json_as_int,9485ZFS_NICENUM_1024);9486} else9487fnvlist_add_string(vds, "trim_state",9488"UNTRIMMED");9489}9490nice_num_str_nvlist(vds, "trim_notsup",9491vs->vs_trim_notsup, B_TRUE,9492cb->cb_json_as_int, ZFS_NICENUM_1024);9493}9494} else {9495ch = fnvlist_alloc();9496}94979498if (cb->cb_flat_vdevs && children == 0) {9499fnvlist_add_nvlist(item, vname, vds);9500}95019502for (int c = 0; c < children; c++) {9503uint64_t islog = B_FALSE, ishole = B_FALSE;9504(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,9505&islog);9506(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,9507&ishole);9508if (islog || ishole)9509continue;9510if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))9511continue;9512if (cb->cb_flat_vdevs) {9513vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,9514vname, item);9515}9516vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,9517vname, ch);9518}95199520if (ch != NULL) {9521if (!nvlist_empty(ch))9522fnvlist_add_nvlist(vds, "vdevs", ch);9523fnvlist_free(ch);9524}9525fnvlist_add_nvlist(item, vname, vds);9526fnvlist_free(vds);9527free(vname);9528}95299530static void9531class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,9532const char *class, nvlist_t *item)9533{9534uint_t c, children;9535nvlist_t **child;9536nvlist_t *class_obj = NULL;95379538if (!cb->cb_flat_vdevs)9539class_obj = fnvlist_alloc();95409541assert(zhp != NULL || !cb->cb_verbose);95429543if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,9544&children) != 0)9545return;95469547for (c = 0; c < children; c++) {9548uint64_t is_log = B_FALSE;9549const char *bias = NULL;9550const char *type = NULL;9551char *name = zpool_vdev_name(g_zfs, zhp, child[c],9552cb->cb_name_flags | VDEV_NAME_TYPE_ID);95539554(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,9555&is_log);95569557if (is_log) {9558bias = (char *)VDEV_ALLOC_CLASS_LOGS;9559} else {9560(void) nvlist_lookup_string(child[c],9561ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);9562(void) nvlist_lookup_string(child[c],9563ZPOOL_CONFIG_TYPE, &type);9564}95659566if (bias == NULL || strcmp(bias, class) != 0)9567continue;9568if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)9569continue;95709571if (cb->cb_flat_vdevs) {9572vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,9573NULL, item);9574} else {9575vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,9576NULL, class_obj);9577}9578free(name);9579}9580if (!cb->cb_flat_vdevs) {9581if (!nvlist_empty(class_obj))9582fnvlist_add_nvlist(item, class, class_obj);9583fnvlist_free(class_obj);9584}9585}95869587static void9588l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,9589nvlist_t *item)9590{9591nvlist_t *l2c = NULL, **l2cache;9592uint_t nl2cache;9593if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,9594&l2cache, &nl2cache) == 0) {9595if (nl2cache == 0)9596return;9597if (!cb->cb_flat_vdevs)9598l2c = fnvlist_alloc();9599for (int i = 0; i < nl2cache; i++) {9600if (cb->cb_flat_vdevs) {9601vdev_stats_nvlist(zhp, cb, l2cache[i], 2,9602B_FALSE, NULL, item);9603} else {9604vdev_stats_nvlist(zhp, cb, l2cache[i], 2,9605B_FALSE, NULL, l2c);9606}9607}9608}9609if (!cb->cb_flat_vdevs) {9610if (!nvlist_empty(l2c))9611fnvlist_add_nvlist(item, "l2cache", l2c);9612fnvlist_free(l2c);9613}9614}96159616static void9617spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,9618nvlist_t *item)9619{9620nvlist_t *sp = NULL, **spares;9621uint_t nspares;9622if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,9623&spares, &nspares) == 0) {9624if (nspares == 0)9625return;9626if (!cb->cb_flat_vdevs)9627sp = fnvlist_alloc();9628for (int i = 0; i < nspares; i++) {9629if (cb->cb_flat_vdevs) {9630vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,9631NULL, item);9632} else {9633vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,9634NULL, sp);9635}9636}9637}9638if (!cb->cb_flat_vdevs) {9639if (!nvlist_empty(sp))9640fnvlist_add_nvlist(item, "spares", sp);9641fnvlist_free(sp);9642}9643}96449645static void9646errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)9647{9648uint64_t nerr;9649nvlist_t *config = zpool_get_config(zhp, NULL);9650if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,9651&nerr) == 0) {9652nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,9653cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9654if (nerr != 0 && cb->cb_verbose) {9655nvlist_t *nverrlist = NULL;9656if (zpool_get_errlog(zhp, &nverrlist) == 0) {9657int i = 0;9658int count = 0;9659size_t len = MAXPATHLEN * 2;9660nvpair_t *elem = NULL;96619662for (nvpair_t *pair =9663nvlist_next_nvpair(nverrlist, NULL);9664pair != NULL;9665pair = nvlist_next_nvpair(nverrlist, pair))9666count++;9667char **errl = (char **)malloc(9668count * sizeof (char *));96699670while ((elem = nvlist_next_nvpair(nverrlist,9671elem)) != NULL) {9672nvlist_t *nv;9673uint64_t dsobj, obj;96749675verify(nvpair_value_nvlist(elem,9676&nv) == 0);9677verify(nvlist_lookup_uint64(nv,9678ZPOOL_ERR_DATASET, &dsobj) == 0);9679verify(nvlist_lookup_uint64(nv,9680ZPOOL_ERR_OBJECT, &obj) == 0);9681errl[i] = safe_malloc(len);9682zpool_obj_to_path(zhp, dsobj, obj,9683errl[i++], len);9684}9685nvlist_free(nverrlist);9686fnvlist_add_string_array(item, "errlist",9687(const char **)errl, count);9688for (int i = 0; i < count; ++i)9689free(errl[i]);9690free(errl);9691} else9692fnvlist_add_string(item, "errlist",9693strerror(errno));9694}9695}9696}96979698static void9699ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)9700{9701nice_num_str_nvlist(item, "blocks", dds->dds_blocks,9702cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9703nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,9704cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9705nice_num_str_nvlist(item, "physical_size", dds->dds_psize,9706cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9707nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,9708cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9709nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,9710cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9711nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,9712cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9713nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,9714cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9715nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,9716cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9717}97189719static void9720dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)9721{9722nvlist_t *config;9723if (cb->cb_dedup_stats) {9724ddt_histogram_t *ddh;9725ddt_stat_t *dds;9726ddt_object_t *ddo;9727nvlist_t *ddt_stat, *ddt_obj, *dedup;9728uint_t c;9729uint64_t cspace_prop;97309731config = zpool_get_config(zhp, NULL);9732if (nvlist_lookup_uint64_array(config,9733ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)9734return;97359736dedup = fnvlist_alloc();9737ddt_obj = fnvlist_alloc();9738nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,9739cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9740if (ddo->ddo_count == 0) {9741fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,9742ddt_obj);9743fnvlist_add_nvlist(item, "dedup_stats", dedup);9744fnvlist_free(ddt_obj);9745fnvlist_free(dedup);9746return;9747} else {9748nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,9749cb->cb_literal, cb->cb_json_as_int,9750ZFS_NICENUM_1024);9751nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,9752cb->cb_literal, cb->cb_json_as_int,9753ZFS_NICENUM_1024);9754/*9755* Squash cached size into in-core size to handle race.9756* Only include cached size if it is available.9757*/9758cspace_prop = zpool_get_prop_int(zhp,9759ZPOOL_PROP_DEDUPCACHED, NULL);9760cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);9761nice_num_str_nvlist(dedup, "cspace", cspace_prop,9762cb->cb_literal, cb->cb_json_as_int,9763ZFS_NICENUM_1024);9764}97659766ddt_stat = fnvlist_alloc();9767if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,9768(uint64_t **)&dds, &c) == 0) {9769nvlist_t *total = fnvlist_alloc();9770if (dds->dds_blocks == 0)9771fnvlist_add_string(total, "blocks", "0");9772else9773ddt_stats_nvlist(dds, cb, total);9774fnvlist_add_nvlist(ddt_stat, "total", total);9775fnvlist_free(total);9776}9777if (nvlist_lookup_uint64_array(config,9778ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {9779nvlist_t *hist = fnvlist_alloc();9780nvlist_t *entry = NULL;9781char buf[16];9782for (int h = 0; h < 64; h++) {9783if (ddh->ddh_stat[h].dds_blocks != 0) {9784entry = fnvlist_alloc();9785ddt_stats_nvlist(&ddh->ddh_stat[h], cb,9786entry);9787(void) snprintf(buf, 16, "%d", h);9788fnvlist_add_nvlist(hist, buf, entry);9789fnvlist_free(entry);9790}9791}9792if (!nvlist_empty(hist))9793fnvlist_add_nvlist(ddt_stat, "histogram", hist);9794fnvlist_free(hist);9795}97969797if (!nvlist_empty(ddt_obj)) {9798fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,9799ddt_obj);9800}9801fnvlist_free(ddt_obj);9802if (!nvlist_empty(ddt_stat)) {9803fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,9804ddt_stat);9805}9806fnvlist_free(ddt_stat);9807if (!nvlist_empty(dedup))9808fnvlist_add_nvlist(item, "dedup_stats", dedup);9809fnvlist_free(dedup);9810}9811}98129813static void9814raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,9815nvlist_t *nvroot, nvlist_t *item)9816{9817uint_t c;9818pool_raidz_expand_stat_t *pres = NULL;9819if (nvlist_lookup_uint64_array(nvroot,9820ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {9821nvlist_t **child;9822uint_t children;9823nvlist_t *nv = fnvlist_alloc();9824verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,9825&child, &children) == 0);9826assert(pres->pres_expanding_vdev < children);9827char *name =9828zpool_vdev_name(g_zfs, zhp,9829child[pres->pres_expanding_vdev], 0);9830fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);9831fnvlist_add_string(nv, "state",9832pool_scan_state_str[pres->pres_state]);9833nice_num_str_nvlist(nv, "expanding_vdev",9834pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,9835ZFS_NICENUM_1024);9836nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,9837cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9838nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,9839cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9840nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,9841cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9842nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,9843cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9844nice_num_str_nvlist(nv, "waiting_for_resilver",9845pres->pres_waiting_for_resilver, B_TRUE,9846cb->cb_json_as_int, ZFS_NICENUM_1024);9847fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);9848fnvlist_free(nv);9849free(name);9850}9851}98529853static void9854checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,9855nvlist_t *item)9856{9857uint_t c;9858pool_checkpoint_stat_t *pcs = NULL;9859if (nvlist_lookup_uint64_array(nvroot,9860ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {9861nvlist_t *nv = fnvlist_alloc();9862fnvlist_add_string(nv, "state",9863checkpoint_state_str[pcs->pcs_state]);9864nice_num_str_nvlist(nv, "start_time",9865pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,9866ZFS_NICE_TIMESTAMP);9867nice_num_str_nvlist(nv, "space",9868pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,9869ZFS_NICENUM_BYTES);9870fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);9871fnvlist_free(nv);9872}9873}98749875static void9876removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,9877nvlist_t *nvroot, nvlist_t *item)9878{9879uint_t c;9880pool_removal_stat_t *prs = NULL;9881if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,9882(uint64_t **)&prs, &c) == 0) {9883if (prs->prs_state != DSS_NONE) {9884nvlist_t **child;9885uint_t children;9886verify(nvlist_lookup_nvlist_array(nvroot,9887ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);9888assert(prs->prs_removing_vdev < children);9889char *vdev_name = zpool_vdev_name(g_zfs, zhp,9890child[prs->prs_removing_vdev], B_TRUE);9891nvlist_t *nv = fnvlist_alloc();9892fill_vdev_info(nv, zhp, vdev_name, B_FALSE,9893cb->cb_json_as_int);9894fnvlist_add_string(nv, "state",9895pool_scan_state_str[prs->prs_state]);9896nice_num_str_nvlist(nv, "removing_vdev",9897prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,9898ZFS_NICENUM_1024);9899nice_num_str_nvlist(nv, "start_time",9900prs->prs_start_time, cb->cb_literal,9901cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9902nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,9903cb->cb_literal, cb->cb_json_as_int,9904ZFS_NICE_TIMESTAMP);9905nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,9906cb->cb_literal, cb->cb_json_as_int,9907ZFS_NICENUM_BYTES);9908nice_num_str_nvlist(nv, "copied", prs->prs_copied,9909cb->cb_literal, cb->cb_json_as_int,9910ZFS_NICENUM_BYTES);9911nice_num_str_nvlist(nv, "mapping_memory",9912prs->prs_mapping_memory, cb->cb_literal,9913cb->cb_json_as_int, ZFS_NICENUM_BYTES);9914fnvlist_add_nvlist(item,9915ZPOOL_CONFIG_REMOVAL_STATS, nv);9916fnvlist_free(nv);9917free(vdev_name);9918}9919}9920}99219922static void9923scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,9924nvlist_t *nvroot, nvlist_t *item)9925{9926pool_scan_stat_t *ps = NULL;9927uint_t c;9928nvlist_t *scan = fnvlist_alloc();9929nvlist_t **child;9930uint_t children;99319932if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,9933(uint64_t **)&ps, &c) == 0) {9934fnvlist_add_string(scan, "function",9935pool_scan_func_str[ps->pss_func]);9936fnvlist_add_string(scan, "state",9937pool_scan_state_str[ps->pss_state]);9938nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,9939cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9940nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,9941cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9942nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,9943cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9944nice_num_str_nvlist(scan, "examined", ps->pss_examined,9945cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9946nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,9947cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9948nice_num_str_nvlist(scan, "processed", ps->pss_processed,9949cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9950nice_num_str_nvlist(scan, "errors", ps->pss_errors,9951cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9952nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,9953cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9954nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,9955B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9956nice_num_str_nvlist(scan, "scrub_pause",9957ps->pss_pass_scrub_pause, cb->cb_literal,9958cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9959nice_num_str_nvlist(scan, "scrub_spent_paused",9960ps->pss_pass_scrub_spent_paused,9961B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9962nice_num_str_nvlist(scan, "issued_bytes_per_scan",9963ps->pss_pass_issued, cb->cb_literal,9964cb->cb_json_as_int, ZFS_NICENUM_BYTES);9965nice_num_str_nvlist(scan, "issued", ps->pss_issued,9966cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9967if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&9968ps->pss_error_scrub_start > ps->pss_start_time) {9969fnvlist_add_string(scan, "err_scrub_func",9970pool_scan_func_str[ps->pss_error_scrub_func]);9971fnvlist_add_string(scan, "err_scrub_state",9972pool_scan_state_str[ps->pss_error_scrub_state]);9973nice_num_str_nvlist(scan, "err_scrub_start_time",9974ps->pss_error_scrub_start,9975cb->cb_literal, cb->cb_json_as_int,9976ZFS_NICE_TIMESTAMP);9977nice_num_str_nvlist(scan, "err_scrub_end_time",9978ps->pss_error_scrub_end,9979cb->cb_literal, cb->cb_json_as_int,9980ZFS_NICE_TIMESTAMP);9981nice_num_str_nvlist(scan, "err_scrub_examined",9982ps->pss_error_scrub_examined,9983cb->cb_literal, cb->cb_json_as_int,9984ZFS_NICENUM_1024);9985nice_num_str_nvlist(scan, "err_scrub_to_examine",9986ps->pss_error_scrub_to_be_examined,9987cb->cb_literal, cb->cb_json_as_int,9988ZFS_NICENUM_1024);9989nice_num_str_nvlist(scan, "err_scrub_pause",9990ps->pss_pass_error_scrub_pause,9991B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9992}9993}99949995if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,9996&child, &children) == 0) {9997vdev_rebuild_stat_t *vrs;9998uint_t i;9999char *name;10000nvlist_t *nv;10001nvlist_t *rebuild = fnvlist_alloc();10002uint64_t st;10003for (uint_t c = 0; c < children; c++) {10004if (nvlist_lookup_uint64_array(child[c],10005ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,10006&i) == 0) {10007if (vrs->vrs_state != VDEV_REBUILD_NONE) {10008nv = fnvlist_alloc();10009name = zpool_vdev_name(g_zfs, zhp,10010child[c], VDEV_NAME_TYPE_ID);10011fill_vdev_info(nv, zhp, name, B_FALSE,10012cb->cb_json_as_int);10013st = vrs->vrs_state;10014fnvlist_add_string(nv, "state",10015vdev_rebuild_state_str[st]);10016nice_num_str_nvlist(nv, "start_time",10017vrs->vrs_start_time, cb->cb_literal,10018cb->cb_json_as_int,10019ZFS_NICE_TIMESTAMP);10020nice_num_str_nvlist(nv, "end_time",10021vrs->vrs_end_time, cb->cb_literal,10022cb->cb_json_as_int,10023ZFS_NICE_TIMESTAMP);10024nice_num_str_nvlist(nv, "scan_time",10025vrs->vrs_scan_time_ms * 1000000,10026cb->cb_literal, cb->cb_json_as_int,10027ZFS_NICENUM_TIME);10028nice_num_str_nvlist(nv, "scanned",10029vrs->vrs_bytes_scanned,10030cb->cb_literal, cb->cb_json_as_int,10031ZFS_NICENUM_BYTES);10032nice_num_str_nvlist(nv, "issued",10033vrs->vrs_bytes_issued,10034cb->cb_literal, cb->cb_json_as_int,10035ZFS_NICENUM_BYTES);10036nice_num_str_nvlist(nv, "rebuilt",10037vrs->vrs_bytes_rebuilt,10038cb->cb_literal, cb->cb_json_as_int,10039ZFS_NICENUM_BYTES);10040nice_num_str_nvlist(nv, "to_scan",10041vrs->vrs_bytes_est, cb->cb_literal,10042cb->cb_json_as_int,10043ZFS_NICENUM_BYTES);10044nice_num_str_nvlist(nv, "errors",10045vrs->vrs_errors, cb->cb_literal,10046cb->cb_json_as_int,10047ZFS_NICENUM_1024);10048nice_num_str_nvlist(nv, "pass_time",10049vrs->vrs_pass_time_ms * 1000000,10050cb->cb_literal, cb->cb_json_as_int,10051ZFS_NICENUM_TIME);10052nice_num_str_nvlist(nv, "pass_scanned",10053vrs->vrs_pass_bytes_scanned,10054cb->cb_literal, cb->cb_json_as_int,10055ZFS_NICENUM_BYTES);10056nice_num_str_nvlist(nv, "pass_issued",10057vrs->vrs_pass_bytes_issued,10058cb->cb_literal, cb->cb_json_as_int,10059ZFS_NICENUM_BYTES);10060nice_num_str_nvlist(nv, "pass_skipped",10061vrs->vrs_pass_bytes_skipped,10062cb->cb_literal, cb->cb_json_as_int,10063ZFS_NICENUM_BYTES);10064fnvlist_add_nvlist(rebuild, name, nv);10065free(name);10066}10067}10068}10069if (!nvlist_empty(rebuild))10070fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);10071fnvlist_free(rebuild);10072}1007310074if (!nvlist_empty(scan))10075fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);10076fnvlist_free(scan);10077}1007810079/*10080* Print the scan status.10081*/10082static void10083print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)10084{10085uint64_t rebuild_end_time = 0, resilver_end_time = 0;10086boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;10087boolean_t have_errorscrub = B_FALSE;10088boolean_t active_resilver = B_FALSE;10089pool_checkpoint_stat_t *pcs = NULL;10090pool_scan_stat_t *ps = NULL;10091uint_t c;10092time_t scrub_start = 0, errorscrub_start = 0;1009310094if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,10095(uint64_t **)&ps, &c) == 0) {10096if (ps->pss_func == POOL_SCAN_RESILVER) {10097resilver_end_time = ps->pss_end_time;10098active_resilver = (ps->pss_state == DSS_SCANNING);10099}1010010101have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);10102have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);10103scrub_start = ps->pss_start_time;10104if (c > offsetof(pool_scan_stat_t,10105pss_pass_error_scrub_pause) / 8) {10106have_errorscrub = (ps->pss_error_scrub_func ==10107POOL_SCAN_ERRORSCRUB);10108errorscrub_start = ps->pss_error_scrub_start;10109}10110}1011110112boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);10113boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));1011410115/* Always print the scrub status when available. */10116if (have_scrub && scrub_start > errorscrub_start)10117print_scan_scrub_resilver_status(ps);10118else if (have_errorscrub && errorscrub_start >= scrub_start)10119print_err_scrub_status(ps);1012010121/*10122* When there is an active resilver or rebuild print its status.10123* Otherwise print the status of the last resilver or rebuild.10124*/10125if (active_resilver || (!active_rebuild && have_resilver &&10126resilver_end_time && resilver_end_time > rebuild_end_time)) {10127print_scan_scrub_resilver_status(ps);10128} else if (active_rebuild || (!active_resilver && have_rebuild &&10129rebuild_end_time && rebuild_end_time > resilver_end_time)) {10130print_rebuild_status(zhp, nvroot);10131}1013210133(void) nvlist_lookup_uint64_array(nvroot,10134ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);10135print_checkpoint_scan_warning(ps, pcs);10136}1013710138/*10139* Print out detailed removal status.10140*/10141static void10142print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)10143{10144char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];10145time_t start, end;10146nvlist_t *config, *nvroot;10147nvlist_t **child;10148uint_t children;10149char *vdev_name;1015010151if (prs == NULL || prs->prs_state == DSS_NONE)10152return;1015310154/*10155* Determine name of vdev.10156*/10157config = zpool_get_config(zhp, NULL);10158nvroot = fnvlist_lookup_nvlist(config,10159ZPOOL_CONFIG_VDEV_TREE);10160verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,10161&child, &children) == 0);10162assert(prs->prs_removing_vdev < children);10163vdev_name = zpool_vdev_name(g_zfs, zhp,10164child[prs->prs_removing_vdev], B_TRUE);1016510166(void) printf_color(ANSI_BOLD, gettext("remove: "));1016710168start = prs->prs_start_time;10169end = prs->prs_end_time;10170zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));1017110172/*10173* Removal is finished or canceled.10174*/10175if (prs->prs_state == DSS_FINISHED) {10176uint64_t minutes_taken = (end - start) / 60;1017710178(void) printf(gettext("Removal of vdev %llu copied %s "10179"in %lluh%um, completed on %s"),10180(longlong_t)prs->prs_removing_vdev,10181copied_buf,10182(u_longlong_t)(minutes_taken / 60),10183(uint_t)(minutes_taken % 60),10184ctime((time_t *)&end));10185} else if (prs->prs_state == DSS_CANCELED) {10186(void) printf(gettext("Removal of %s canceled on %s"),10187vdev_name, ctime(&end));10188} else {10189uint64_t copied, total, elapsed, rate, mins_left, hours_left;10190double fraction_done;1019110192assert(prs->prs_state == DSS_SCANNING);1019310194/*10195* Removal is in progress.10196*/10197(void) printf(gettext(10198"Evacuation of %s in progress since %s"),10199vdev_name, ctime(&start));1020010201copied = prs->prs_copied > 0 ? prs->prs_copied : 1;10202total = prs->prs_to_copy;10203fraction_done = (double)copied / total;1020410205/* elapsed time for this pass */10206elapsed = time(NULL) - prs->prs_start_time;10207elapsed = elapsed > 0 ? elapsed : 1;10208rate = copied / elapsed;10209rate = rate > 0 ? rate : 1;10210mins_left = ((total - copied) / rate) / 60;10211hours_left = mins_left / 60;1021210213zfs_nicenum(copied, examined_buf, sizeof (examined_buf));10214zfs_nicenum(total, total_buf, sizeof (total_buf));10215zfs_nicenum(rate, rate_buf, sizeof (rate_buf));1021610217/*10218* do not print estimated time if hours_left is more than10219* 30 days10220*/10221(void) printf(gettext(10222"\t%s copied out of %s at %s/s, %.2f%% done"),10223examined_buf, total_buf, rate_buf, 100 * fraction_done);10224if (hours_left < (30 * 24)) {10225(void) printf(gettext(", %lluh%um to go\n"),10226(u_longlong_t)hours_left, (uint_t)(mins_left % 60));10227} else {10228(void) printf(gettext(10229", (copy is slow, no estimated time)\n"));10230}10231}10232free(vdev_name);1023310234if (prs->prs_mapping_memory > 0) {10235char mem_buf[7];10236zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));10237(void) printf(gettext(10238"\t%s memory used for removed device mappings\n"),10239mem_buf);10240}10241}1024210243/*10244* Print out detailed raidz expansion status.10245*/10246static void10247print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)10248{10249char copied_buf[7];1025010251if (pres == NULL || pres->pres_state == DSS_NONE)10252return;1025310254/*10255* Determine name of vdev.10256*/10257nvlist_t *config = zpool_get_config(zhp, NULL);10258nvlist_t *nvroot = fnvlist_lookup_nvlist(config,10259ZPOOL_CONFIG_VDEV_TREE);10260nvlist_t **child;10261uint_t children;10262verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,10263&child, &children) == 0);10264assert(pres->pres_expanding_vdev < children);1026510266(void) printf_color(ANSI_BOLD, gettext("expand: "));1026710268time_t start = pres->pres_start_time;10269time_t end = pres->pres_end_time;10270char *vname =10271zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);10272zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));1027310274/*10275* Expansion is finished or canceled.10276*/10277if (pres->pres_state == DSS_FINISHED) {10278char time_buf[32];10279secs_to_dhms(end - start, time_buf);1028010281(void) printf(gettext("expanded %s-%u copied %s in %s, "10282"on %s"), vname, (int)pres->pres_expanding_vdev,10283copied_buf, time_buf, ctime((time_t *)&end));10284} else {10285char examined_buf[7], total_buf[7], rate_buf[7];10286uint64_t copied, total, elapsed, rate, secs_left;10287double fraction_done;1028810289assert(pres->pres_state == DSS_SCANNING);1029010291/*10292* Expansion is in progress.10293*/10294(void) printf(gettext(10295"expansion of %s-%u in progress since %s"),10296vname, (int)pres->pres_expanding_vdev, ctime(&start));1029710298copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;10299total = pres->pres_to_reflow;10300fraction_done = (double)copied / total;1030110302/* elapsed time for this pass */10303elapsed = time(NULL) - pres->pres_start_time;10304elapsed = elapsed > 0 ? elapsed : 1;10305rate = copied / elapsed;10306rate = rate > 0 ? rate : 1;10307secs_left = (total - copied) / rate;1030810309zfs_nicenum(copied, examined_buf, sizeof (examined_buf));10310zfs_nicenum(total, total_buf, sizeof (total_buf));10311zfs_nicenum(rate, rate_buf, sizeof (rate_buf));1031210313/*10314* do not print estimated time if hours_left is more than10315* 30 days10316*/10317(void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),10318examined_buf, total_buf, rate_buf, 100 * fraction_done);10319if (pres->pres_waiting_for_resilver) {10320(void) printf(gettext(", paused for resilver or "10321"clear\n"));10322} else if (secs_left < (30 * 24 * 3600)) {10323char time_buf[32];10324secs_to_dhms(secs_left, time_buf);10325(void) printf(gettext(", %s to go\n"), time_buf);10326} else {10327(void) printf(gettext(10328", (copy is slow, no estimated time)\n"));10329}10330}10331free(vname);10332}10333static void10334print_checkpoint_status(pool_checkpoint_stat_t *pcs)10335{10336time_t start;10337char space_buf[7];1033810339if (pcs == NULL || pcs->pcs_state == CS_NONE)10340return;1034110342(void) printf(gettext("checkpoint: "));1034310344start = pcs->pcs_start_time;10345zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));1034610347if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {10348char *date = ctime(&start);1034910350/*10351* ctime() adds a newline at the end of the generated10352* string, thus the weird format specifier and the10353* strlen() call used to chop it off from the output.10354*/10355(void) printf(gettext("created %.*s, consumes %s\n"),10356(int)(strlen(date) - 1), date, space_buf);10357return;10358}1035910360assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);1036110362(void) printf(gettext("discarding, %s remaining.\n"),10363space_buf);10364}1036510366static void10367print_error_log(zpool_handle_t *zhp)10368{10369nvlist_t *nverrlist = NULL;10370nvpair_t *elem;10371char *pathname;10372size_t len = MAXPATHLEN * 2;1037310374if (zpool_get_errlog(zhp, &nverrlist) != 0)10375return;1037610377(void) printf("errors: Permanent errors have been "10378"detected in the following files:\n\n");1037910380pathname = safe_malloc(len);10381elem = NULL;10382while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {10383nvlist_t *nv;10384uint64_t dsobj, obj;1038510386verify(nvpair_value_nvlist(elem, &nv) == 0);10387verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,10388&dsobj) == 0);10389verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,10390&obj) == 0);10391zpool_obj_to_path(zhp, dsobj, obj, pathname, len);10392(void) printf("%7s %s\n", "", pathname);10393}10394free(pathname);10395nvlist_free(nverrlist);10396}1039710398static void10399print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,10400uint_t nspares)10401{10402uint_t i;10403char *name;1040410405if (nspares == 0)10406return;1040710408(void) printf(gettext("\tspares\n"));1040910410for (i = 0; i < nspares; i++) {10411name = zpool_vdev_name(g_zfs, zhp, spares[i],10412cb->cb_name_flags);10413print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);10414free(name);10415}10416}1041710418static void10419print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,10420uint_t nl2cache)10421{10422uint_t i;10423char *name;1042410425if (nl2cache == 0)10426return;1042710428(void) printf(gettext("\tcache\n"));1042910430for (i = 0; i < nl2cache; i++) {10431name = zpool_vdev_name(g_zfs, zhp, l2cache[i],10432cb->cb_name_flags);10433print_status_config(zhp, cb, name, l2cache[i], 2,10434B_FALSE, NULL);10435free(name);10436}10437}1043810439static void10440print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)10441{10442ddt_histogram_t *ddh;10443ddt_stat_t *dds;10444ddt_object_t *ddo;10445uint_t c;10446/* Extra space provided for literal display */10447char dspace[32], mspace[32], cspace[32];10448uint64_t cspace_prop;10449enum zfs_nicenum_format format;10450zprop_source_t src;1045110452/*10453* If the pool was faulted then we may not have been able to10454* obtain the config. Otherwise, if we have anything in the dedup10455* table continue processing the stats.10456*/10457if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,10458(uint64_t **)&ddo, &c) != 0)10459return;1046010461(void) printf("\n");10462(void) printf(gettext(" dedup: "));10463if (ddo->ddo_count == 0) {10464(void) printf(gettext("no DDT entries\n"));10465return;10466}1046710468/*10469* Squash cached size into in-core size to handle race.10470* Only include cached size if it is available.10471*/10472cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);10473cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);10474format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;10475zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);10476zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);10477zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);10478(void) printf("DDT entries %llu, size %s on disk, %s in core",10479(u_longlong_t)ddo->ddo_count,10480dspace,10481mspace);10482if (src != ZPROP_SRC_DEFAULT) {10483(void) printf(", %s cached (%.02f%%)",10484cspace,10485(double)cspace_prop / (double)ddo->ddo_mspace * 100.0);10486}10487(void) printf("\n");1048810489verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,10490(uint64_t **)&dds, &c) == 0);10491verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,10492(uint64_t **)&ddh, &c) == 0);10493zpool_dump_ddt(dds, ddh);10494}1049510496#define ST_SIZE 409610497#define AC_SIZE 20481049810499static void10500print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,10501zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)10502{10503char status[ST_SIZE];10504char action[AC_SIZE];10505memset(status, 0, ST_SIZE);10506memset(action, 0, AC_SIZE);1050710508switch (reason) {10509case ZPOOL_STATUS_MISSING_DEV_R:10510(void) snprintf(status, ST_SIZE,10511gettext("One or more devices could "10512"not be opened. Sufficient replicas exist for\n\tthe pool "10513"to continue functioning in a degraded state.\n"));10514(void) snprintf(action, AC_SIZE,10515gettext("Attach the missing device "10516"and online it using 'zpool online'.\n"));10517break;1051810519case ZPOOL_STATUS_MISSING_DEV_NR:10520(void) snprintf(status, ST_SIZE,10521gettext("One or more devices could "10522"not be opened. There are insufficient\n\treplicas for the"10523" pool to continue functioning.\n"));10524(void) snprintf(action, AC_SIZE,10525gettext("Attach the missing device "10526"and online it using 'zpool online'.\n"));10527break;1052810529case ZPOOL_STATUS_CORRUPT_LABEL_R:10530(void) snprintf(status, ST_SIZE,10531gettext("One or more devices could "10532"not be used because the label is missing or\n\tinvalid. "10533"Sufficient replicas exist for the pool to continue\n\t"10534"functioning in a degraded state.\n"));10535(void) snprintf(action, AC_SIZE,10536gettext("Replace the device using 'zpool replace'.\n"));10537break;1053810539case ZPOOL_STATUS_CORRUPT_LABEL_NR:10540(void) snprintf(status, ST_SIZE,10541gettext("One or more devices could "10542"not be used because the label is missing \n\tor invalid. "10543"There are insufficient replicas for the pool to "10544"continue\n\tfunctioning.\n"));10545zpool_explain_recover(zpool_get_handle(zhp),10546zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),10547action, AC_SIZE);10548break;1054910550case ZPOOL_STATUS_FAILING_DEV:10551(void) snprintf(status, ST_SIZE,10552gettext("One or more devices has "10553"experienced an unrecoverable error. An\n\tattempt was "10554"made to correct the error. Applications are "10555"unaffected.\n"));10556(void) snprintf(action, AC_SIZE, gettext("Determine if the "10557"device needs to be replaced, and clear the errors\n\tusing"10558" 'zpool clear' or replace the device with 'zpool "10559"replace'.\n"));10560break;1056110562case ZPOOL_STATUS_OFFLINE_DEV:10563(void) snprintf(status, ST_SIZE,10564gettext("One or more devices has "10565"been taken offline by the administrator.\n\tSufficient "10566"replicas exist for the pool to continue functioning in "10567"a\n\tdegraded state.\n"));10568(void) snprintf(action, AC_SIZE, gettext("Online the device "10569"using 'zpool online' or replace the device with\n\t'zpool "10570"replace'.\n"));10571break;1057210573case ZPOOL_STATUS_REMOVED_DEV:10574(void) snprintf(status, ST_SIZE,10575gettext("One or more devices have "10576"been removed.\n\tSufficient replicas exist for the pool "10577"to continue functioning in a\n\tdegraded state.\n"));10578(void) snprintf(action, AC_SIZE, gettext("Online the device "10579"using zpool online' or replace the device with\n\t'zpool "10580"replace'.\n"));10581break;1058210583case ZPOOL_STATUS_RESILVERING:10584case ZPOOL_STATUS_REBUILDING:10585(void) snprintf(status, ST_SIZE,10586gettext("One or more devices is "10587"currently being resilvered. The pool will\n\tcontinue "10588"to function, possibly in a degraded state.\n"));10589(void) snprintf(action, AC_SIZE,10590gettext("Wait for the resilver to complete.\n"));10591break;1059210593case ZPOOL_STATUS_REBUILD_SCRUB:10594(void) snprintf(status, ST_SIZE,10595gettext("One or more devices have "10596"been sequentially resilvered, scrubbing\n\tthe pool "10597"is recommended.\n"));10598(void) snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "10599"verify all data checksums.\n"));10600break;1060110602case ZPOOL_STATUS_CORRUPT_DATA:10603(void) snprintf(status, ST_SIZE,10604gettext("One or more devices has "10605"experienced an error resulting in data\n\tcorruption. "10606"Applications may be affected.\n"));10607(void) snprintf(action, AC_SIZE,10608gettext("Restore the file in question"10609" if possible. Otherwise restore the\n\tentire pool from "10610"backup.\n"));10611break;1061210613case ZPOOL_STATUS_CORRUPT_POOL:10614(void) snprintf(status, ST_SIZE, gettext("The pool metadata is "10615"corrupted and the pool cannot be opened.\n"));10616zpool_explain_recover(zpool_get_handle(zhp),10617zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),10618action, AC_SIZE);10619break;1062010621case ZPOOL_STATUS_VERSION_OLDER:10622(void) snprintf(status, ST_SIZE,10623gettext("The pool is formatted using "10624"a legacy on-disk format. The pool can\n\tstill be used, "10625"but some features are unavailable.\n"));10626(void) snprintf(action, AC_SIZE,10627gettext("Upgrade the pool using "10628"'zpool upgrade'. Once this is done, the\n\tpool will no "10629"longer be accessible on software that does not support\n\t"10630"feature flags.\n"));10631break;1063210633case ZPOOL_STATUS_VERSION_NEWER:10634(void) snprintf(status, ST_SIZE,10635gettext("The pool has been upgraded "10636"to a newer, incompatible on-disk version.\n\tThe pool "10637"cannot be accessed on this system.\n"));10638(void) snprintf(action, AC_SIZE,10639gettext("Access the pool from a "10640"system running more recent software, or\n\trestore the "10641"pool from backup.\n"));10642break;1064310644case ZPOOL_STATUS_FEAT_DISABLED:10645(void) snprintf(status, ST_SIZE, gettext("Some supported and "10646"requested features are not enabled on the pool.\n\t"10647"The pool can still be used, but some features are "10648"unavailable.\n"));10649(void) snprintf(action, AC_SIZE,10650gettext("Enable all features using "10651"'zpool upgrade'. Once this is done,\n\tthe pool may no "10652"longer be accessible by software that does not support\n\t"10653"the features. See zpool-features(7) for details.\n"));10654break;1065510656case ZPOOL_STATUS_COMPATIBILITY_ERR:10657(void) snprintf(status, ST_SIZE, gettext("This pool has a "10658"compatibility list specified, but it could not be\n\t"10659"read/parsed at this time. The pool can still be used, "10660"but this\n\tshould be investigated.\n"));10661(void) snprintf(action, AC_SIZE,10662gettext("Check the value of the "10663"'compatibility' property against the\n\t"10664"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "10665ZPOOL_DATA_COMPAT_D ".\n"));10666break;1066710668case ZPOOL_STATUS_INCOMPATIBLE_FEAT:10669(void) snprintf(status, ST_SIZE, gettext("One or more features "10670"are enabled on the pool despite not being\n\t"10671"requested by the 'compatibility' property.\n"));10672(void) snprintf(action, AC_SIZE, gettext("Consider setting "10673"'compatibility' to an appropriate value, or\n\t"10674"adding needed features to the relevant file in\n\t"10675ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));10676break;1067710678case ZPOOL_STATUS_UNSUP_FEAT_READ:10679(void) snprintf(status, ST_SIZE,10680gettext("The pool cannot be accessed "10681"on this system because it uses the\n\tfollowing feature(s)"10682" not supported on this system:\n"));10683zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,106841024);10685(void) snprintf(action, AC_SIZE,10686gettext("Access the pool from a "10687"system that supports the required feature(s),\n\tor "10688"restore the pool from backup.\n"));10689break;1069010691case ZPOOL_STATUS_UNSUP_FEAT_WRITE:10692(void) snprintf(status, ST_SIZE, gettext("The pool can only be "10693"accessed in read-only mode on this system. It\n\tcannot be"10694" accessed in read-write mode because it uses the "10695"following\n\tfeature(s) not supported on this system:\n"));10696zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,106971024);10698(void) snprintf(action, AC_SIZE,10699gettext("The pool cannot be accessed "10700"in read-write mode. Import the pool with\n"10701"\t\"-o readonly=on\", access the pool from a system that "10702"supports the\n\trequired feature(s), or restore the "10703"pool from backup.\n"));10704break;1070510706case ZPOOL_STATUS_FAULTED_DEV_R:10707(void) snprintf(status, ST_SIZE,10708gettext("One or more devices are "10709"faulted in response to persistent errors.\n\tSufficient "10710"replicas exist for the pool to continue functioning "10711"in a\n\tdegraded state.\n"));10712(void) snprintf(action, AC_SIZE,10713gettext("Replace the faulted device, "10714"or use 'zpool clear' to mark the device\n\trepaired.\n"));10715break;1071610717case ZPOOL_STATUS_FAULTED_DEV_NR:10718(void) snprintf(status, ST_SIZE,10719gettext("One or more devices are "10720"faulted in response to persistent errors. There are "10721"insufficient replicas for the pool to\n\tcontinue "10722"functioning.\n"));10723(void) snprintf(action, AC_SIZE,10724gettext("Destroy and re-create the "10725"pool from a backup source. Manually marking the device\n"10726"\trepaired using 'zpool clear' may allow some data "10727"to be recovered.\n"));10728break;1072910730case ZPOOL_STATUS_IO_FAILURE_MMP:10731(void) snprintf(status, ST_SIZE,10732gettext("The pool is suspended "10733"because multihost writes failed or were delayed;\n\t"10734"another system could import the pool undetected.\n"));10735(void) snprintf(action, AC_SIZE,10736gettext("Make sure the pool's devices"10737" are connected, then reboot your system and\n\timport the "10738"pool or run 'zpool clear' to resume the pool.\n"));10739break;1074010741case ZPOOL_STATUS_IO_FAILURE_WAIT:10742case ZPOOL_STATUS_IO_FAILURE_CONTINUE:10743(void) snprintf(status, ST_SIZE,10744gettext("One or more devices are "10745"faulted in response to IO failures.\n"));10746(void) snprintf(action, AC_SIZE,10747gettext("Make sure the affected "10748"devices are connected, then run 'zpool clear'.\n"));10749break;1075010751case ZPOOL_STATUS_BAD_LOG:10752(void) snprintf(status, ST_SIZE, gettext("An intent log record "10753"could not be read.\n"10754"\tWaiting for administrator intervention to fix the "10755"faulted pool.\n"));10756(void) snprintf(action, AC_SIZE,10757gettext("Either restore the affected "10758"device(s) and run 'zpool online',\n"10759"\tor ignore the intent log records by running "10760"'zpool clear'.\n"));10761break;1076210763case ZPOOL_STATUS_NON_NATIVE_ASHIFT:10764(void) snprintf(status, ST_SIZE,10765gettext("One or more devices are "10766"configured to use a non-native block size.\n"10767"\tExpect reduced performance.\n"));10768(void) snprintf(action, AC_SIZE,10769gettext("Replace affected devices "10770"with devices that support the\n\tconfigured block size, "10771"or migrate data to a properly configured\n\tpool.\n"));10772break;1077310774case ZPOOL_STATUS_HOSTID_MISMATCH:10775(void) snprintf(status, ST_SIZE,10776gettext("Mismatch between pool hostid"10777" and system hostid on imported pool.\n\tThis pool was "10778"previously imported into a system with a different "10779"hostid,\n\tand then was verbatim imported into this "10780"system.\n"));10781(void) snprintf(action, AC_SIZE,10782gettext("Export this pool on all "10783"systems on which it is imported.\n"10784"\tThen import it to correct the mismatch.\n"));10785break;1078610787case ZPOOL_STATUS_ERRATA:10788(void) snprintf(status, ST_SIZE,10789gettext("Errata #%d detected.\n"), errata);10790switch (errata) {10791case ZPOOL_ERRATA_NONE:10792break;1079310794case ZPOOL_ERRATA_ZOL_2094_SCRUB:10795(void) snprintf(action, AC_SIZE,10796gettext("To correct the issue run "10797"'zpool scrub'.\n"));10798break;1079910800case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:10801(void) strlcat(status, gettext("\tExisting encrypted "10802"datasets contain an on-disk incompatibility\n\t "10803"which needs to be corrected.\n"), ST_SIZE);10804(void) snprintf(action, AC_SIZE,10805gettext("To correct the issue"10806" backup existing encrypted datasets to new\n\t"10807"encrypted datasets and destroy the old ones. "10808"'zfs mount -o ro' can\n\tbe used to temporarily "10809"mount existing encrypted datasets readonly.\n"));10810break;1081110812case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:10813(void) strlcat(status, gettext("\tExisting encrypted "10814"snapshots and bookmarks contain an on-disk\n\t"10815"incompatibility. This may cause on-disk "10816"corruption if they are used\n\twith "10817"'zfs recv'.\n"), ST_SIZE);10818(void) snprintf(action, AC_SIZE,10819gettext("To correct the"10820"issue, enable the bookmark_v2 feature. No "10821"additional\n\taction is needed if there are no "10822"encrypted snapshots or bookmarks.\n\tIf preserving"10823"the encrypted snapshots and bookmarks is required,"10824" use\n\ta non-raw send to backup and restore them."10825" Alternately, they may be\n\tremoved to resolve "10826"the incompatibility.\n"));10827break;1082810829default:10830/*10831* All errata which allow the pool to be imported10832* must contain an action message.10833*/10834assert(0);10835}10836break;1083710838default:10839/*10840* The remaining errors can't actually be generated, yet.10841*/10842assert(reason == ZPOOL_STATUS_OK);10843}1084410845if (status[0] != 0) {10846if (cbp->cb_json)10847fnvlist_add_string(item, "status", status);10848else {10849(void) printf_color(ANSI_BOLD, gettext("status: "));10850(void) printf_color(ANSI_YELLOW, status);10851}10852}1085310854if (action[0] != 0) {10855if (cbp->cb_json)10856fnvlist_add_string(item, "action", action);10857else {10858(void) printf_color(ANSI_BOLD, gettext("action: "));10859(void) printf_color(ANSI_YELLOW, action);10860}10861}10862}1086310864static int10865status_callback_json(zpool_handle_t *zhp, void *data)10866{10867status_cbdata_t *cbp = data;10868nvlist_t *config, *nvroot;10869const char *msgid;10870char pool_guid[256];10871char msgbuf[256];10872uint64_t guid;10873zpool_status_t reason;10874zpool_errata_t errata;10875uint_t c;10876vdev_stat_t *vs;10877nvlist_t *item, *d, *load_info, *vds;1087810879/* If dedup stats were requested, also fetch dedupcached. */10880if (cbp->cb_dedup_stats > 1)10881zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);10882reason = zpool_get_status(zhp, &msgid, &errata);10883/*10884* If we were given 'zpool status -x', only report those pools with10885* problems.10886*/10887if (cbp->cb_explain &&10888(reason == ZPOOL_STATUS_OK ||10889reason == ZPOOL_STATUS_VERSION_OLDER ||10890reason == ZPOOL_STATUS_FEAT_DISABLED ||10891reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||10892reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {10893return (0);10894}1089510896d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");10897item = fnvlist_alloc();10898vds = fnvlist_alloc();10899fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);10900config = zpool_get_config(zhp, NULL);1090110902if (config != NULL) {10903nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);10904verify(nvlist_lookup_uint64_array(nvroot,10905ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);10906if (cbp->cb_json_pool_key_guid) {10907guid = fnvlist_lookup_uint64(config,10908ZPOOL_CONFIG_POOL_GUID);10909(void) snprintf(pool_guid, 256, "%llu",10910(u_longlong_t)guid);10911}10912cbp->cb_count++;1091310914print_status_reason(zhp, cbp, reason, errata, item);10915if (msgid != NULL) {10916(void) snprintf(msgbuf, 256,10917"https://openzfs.github.io/openzfs-docs/msg/%s",10918msgid);10919fnvlist_add_string(item, "msgid", msgid);10920fnvlist_add_string(item, "moreinfo", msgbuf);10921}1092210923if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,10924&load_info) == 0) {10925fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,10926load_info);10927}1092810929scan_status_nvlist(zhp, cbp, nvroot, item);10930removal_status_nvlist(zhp, cbp, nvroot, item);10931checkpoint_status_nvlist(nvroot, cbp, item);10932raidz_expand_status_nvlist(zhp, cbp, nvroot, item);10933vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);10934if (cbp->cb_flat_vdevs) {10935class_vdevs_nvlist(zhp, cbp, nvroot,10936VDEV_ALLOC_BIAS_DEDUP, vds);10937class_vdevs_nvlist(zhp, cbp, nvroot,10938VDEV_ALLOC_BIAS_SPECIAL, vds);10939class_vdevs_nvlist(zhp, cbp, nvroot,10940VDEV_ALLOC_CLASS_LOGS, vds);10941l2cache_nvlist(zhp, cbp, nvroot, vds);10942spares_nvlist(zhp, cbp, nvroot, vds);1094310944fnvlist_add_nvlist(item, "vdevs", vds);10945fnvlist_free(vds);10946} else {10947fnvlist_add_nvlist(item, "vdevs", vds);10948fnvlist_free(vds);1094910950class_vdevs_nvlist(zhp, cbp, nvroot,10951VDEV_ALLOC_BIAS_DEDUP, item);10952class_vdevs_nvlist(zhp, cbp, nvroot,10953VDEV_ALLOC_BIAS_SPECIAL, item);10954class_vdevs_nvlist(zhp, cbp, nvroot,10955VDEV_ALLOC_CLASS_LOGS, item);10956l2cache_nvlist(zhp, cbp, nvroot, item);10957spares_nvlist(zhp, cbp, nvroot, item);10958}10959dedup_stats_nvlist(zhp, cbp, item);10960errors_nvlist(zhp, cbp, item);10961}10962if (cbp->cb_json_pool_key_guid) {10963fnvlist_add_nvlist(d, pool_guid, item);10964} else {10965fnvlist_add_nvlist(d, zpool_get_name(zhp),10966item);10967}10968fnvlist_free(item);10969return (0);10970}1097110972/*10973* Display a summary of pool status. Displays a summary such as:10974*10975* pool: tank10976* status: DEGRADED10977* reason: One or more devices ...10978* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-0110979* config:10980* mirror DEGRADED10981* c1t0d0 OK10982* c2t0d0 UNAVAIL10983*10984* When given the '-v' option, we print out the complete config. If the '-e'10985* option is specified, then we print out error rate information as well.10986*/10987static int10988status_callback(zpool_handle_t *zhp, void *data)10989{10990status_cbdata_t *cbp = data;10991nvlist_t *config, *nvroot;10992const char *msgid;10993zpool_status_t reason;10994zpool_errata_t errata;10995const char *health;10996uint_t c;10997vdev_stat_t *vs;1099810999/* If dedup stats were requested, also fetch dedupcached. */11000if (cbp->cb_dedup_stats > 1)11001zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);1100211003config = zpool_get_config(zhp, NULL);11004reason = zpool_get_status(zhp, &msgid, &errata);1100511006cbp->cb_count++;1100711008/*11009* If we were given 'zpool status -x', only report those pools with11010* problems.11011*/11012if (cbp->cb_explain &&11013(reason == ZPOOL_STATUS_OK ||11014reason == ZPOOL_STATUS_VERSION_OLDER ||11015reason == ZPOOL_STATUS_FEAT_DISABLED ||11016reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||11017reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {11018if (!cbp->cb_allpools) {11019(void) printf(gettext("pool '%s' is healthy\n"),11020zpool_get_name(zhp));11021if (cbp->cb_first)11022cbp->cb_first = B_FALSE;11023}11024return (0);11025}1102611027if (cbp->cb_first)11028cbp->cb_first = B_FALSE;11029else11030(void) printf("\n");1103111032nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);11033verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,11034(uint64_t **)&vs, &c) == 0);1103511036health = zpool_get_state_str(zhp);1103711038printf(" ");11039(void) printf_color(ANSI_BOLD, gettext("pool:"));11040printf(" %s\n", zpool_get_name(zhp));11041(void) fputc(' ', stdout);11042(void) printf_color(ANSI_BOLD, gettext("state: "));1104311044(void) printf_color(health_str_to_color(health), "%s", health);1104511046(void) fputc('\n', stdout);11047print_status_reason(zhp, cbp, reason, errata, NULL);1104811049if (msgid != NULL) {11050printf(" ");11051(void) printf_color(ANSI_BOLD, gettext("see:"));11052printf(gettext(11053" https://openzfs.github.io/openzfs-docs/msg/%s\n"),11054msgid);11055}1105611057if (config != NULL) {11058uint64_t nerr;11059nvlist_t **spares, **l2cache;11060uint_t nspares, nl2cache;1106111062print_scan_status(zhp, nvroot);1106311064pool_removal_stat_t *prs = NULL;11065(void) nvlist_lookup_uint64_array(nvroot,11066ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);11067print_removal_status(zhp, prs);1106811069pool_checkpoint_stat_t *pcs = NULL;11070(void) nvlist_lookup_uint64_array(nvroot,11071ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);11072print_checkpoint_status(pcs);1107311074pool_raidz_expand_stat_t *pres = NULL;11075(void) nvlist_lookup_uint64_array(nvroot,11076ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);11077print_raidz_expand_status(zhp, pres);1107811079cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,11080cbp->cb_name_flags | VDEV_NAME_TYPE_ID);11081if (cbp->cb_namewidth < 10)11082cbp->cb_namewidth = 10;1108311084color_start(ANSI_BOLD);11085(void) printf(gettext("config:\n\n"));11086(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),11087cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",11088"CKSUM");11089color_end();1109011091if (cbp->cb_print_slow_ios) {11092(void) printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));11093}1109411095if (cbp->cb_print_power) {11096(void) printf_color(ANSI_BOLD, " %5s",11097gettext("POWER"));11098}1109911100if (cbp->cb_print_dio_verify) {11101(void) printf_color(ANSI_BOLD, " %5s", gettext("DIO"));11102}1110311104if (cbp->vcdl != NULL)11105print_cmd_columns(cbp->vcdl, 0);1110611107printf("\n");1110811109print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,11110B_FALSE, NULL);1111111112print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);11113print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);11114print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);1111511116if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,11117&l2cache, &nl2cache) == 0)11118print_l2cache(zhp, cbp, l2cache, nl2cache);1111911120if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,11121&spares, &nspares) == 0)11122print_spares(zhp, cbp, spares, nspares);1112311124if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,11125&nerr) == 0) {11126(void) printf("\n");11127if (nerr == 0) {11128(void) printf(gettext(11129"errors: No known data errors\n"));11130} else if (!cbp->cb_verbose) {11131color_start(ANSI_RED);11132(void) printf(gettext("errors: %llu data "11133"errors, use '-v' for a list\n"),11134(u_longlong_t)nerr);11135color_end();11136} else {11137print_error_log(zhp);11138}11139}1114011141if (cbp->cb_dedup_stats)11142print_dedup_stats(zhp, config, cbp->cb_literal);11143} else {11144(void) printf(gettext("config: The configuration cannot be "11145"determined.\n"));11146}1114711148return (0);11149}1115011151/*11152* zpool status [-dDegiLpPstvx] [-c [script1,script2,...]] ...11153* [-j|--json [--json-flat-vdevs] [--json-int] ...11154* [--json-pool-key-guid]] [--power] [-T d|u] ...11155* [pool] [interval [count]]11156*11157* -c CMD For each vdev, run command CMD11158* -D Display dedup status (undocumented)11159* -d Display Direct I/O write verify errors11160* -e Display only unhealthy vdevs11161* -g Display guid for individual vdev name.11162* -i Display vdev initialization status.11163* -j [...] Display output in JSON format11164* --json-flat-vdevs Display vdevs in flat hierarchy11165* --json-int Display numbers in integer format instead of string11166* --json-pool-key-guid Use pool GUID as key for pool objects11167* -L Follow links when resolving vdev path name.11168* -P Display full path for vdev name.11169* -p Display values in parsable (exact) format.11170* --power Display vdev enclosure slot power status11171* -s Display slow IOs column.11172* -T Display a timestamp in date(1) or Unix format11173* -t Display vdev TRIM status.11174* -v Display complete error logs11175* -x Display only pools with potential problems11176*11177* Describes the health status of all pools or some subset.11178*/11179int11180zpool_do_status(int argc, char **argv)11181{11182int c;11183int ret;11184float interval = 0;11185unsigned long count = 0;11186status_cbdata_t cb = { 0 };11187nvlist_t *data;11188char *cmd = NULL;1118911190struct option long_options[] = {11191{"power", no_argument, NULL, ZPOOL_OPTION_POWER},11192{"json", no_argument, NULL, 'j'},11193{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},11194{"json-flat-vdevs", no_argument, NULL,11195ZPOOL_OPTION_JSON_FLAT_VDEVS},11196{"json-pool-key-guid", no_argument, NULL,11197ZPOOL_OPTION_POOL_KEY_GUID},11198{0, 0, 0, 0}11199};1120011201/* check options */11202while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,11203NULL)) != -1) {11204switch (c) {11205case 'c':11206if (cmd != NULL) {11207fprintf(stderr,11208gettext("Can't set -c flag twice\n"));11209exit(1);11210}1121111212if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&11213!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {11214fprintf(stderr, gettext(11215"Can't run -c, disabled by "11216"ZPOOL_SCRIPTS_ENABLED.\n"));11217exit(1);11218}1121911220if ((getuid() <= 0 || geteuid() <= 0) &&11221!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {11222fprintf(stderr, gettext(11223"Can't run -c with root privileges "11224"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));11225exit(1);11226}11227cmd = optarg;11228break;11229case 'd':11230cb.cb_print_dio_verify = B_TRUE;11231break;11232case 'D':11233if (++cb.cb_dedup_stats > 2)11234cb.cb_dedup_stats = 2;11235break;11236case 'e':11237cb.cb_print_unhealthy = B_TRUE;11238break;11239case 'g':11240cb.cb_name_flags |= VDEV_NAME_GUID;11241break;11242case 'i':11243cb.cb_print_vdev_init = B_TRUE;11244break;11245case 'L':11246cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;11247break;11248case 'p':11249cb.cb_literal = B_TRUE;11250break;11251case 'P':11252cb.cb_name_flags |= VDEV_NAME_PATH;11253break;11254case 's':11255cb.cb_print_slow_ios = B_TRUE;11256break;11257case 't':11258cb.cb_print_vdev_trim = B_TRUE;11259break;11260case 'T':11261get_timestamp_arg(*optarg);11262break;11263case 'v':11264cb.cb_verbose = B_TRUE;11265break;11266case 'j':11267cb.cb_json = B_TRUE;11268break;11269case 'x':11270cb.cb_explain = B_TRUE;11271break;11272case ZPOOL_OPTION_POWER:11273cb.cb_print_power = B_TRUE;11274break;11275case ZPOOL_OPTION_JSON_FLAT_VDEVS:11276cb.cb_flat_vdevs = B_TRUE;11277break;11278case ZPOOL_OPTION_JSON_NUMS_AS_INT:11279cb.cb_json_as_int = B_TRUE;11280cb.cb_literal = B_TRUE;11281break;11282case ZPOOL_OPTION_POOL_KEY_GUID:11283cb.cb_json_pool_key_guid = B_TRUE;11284break;11285case '?':11286if (optopt == 'c') {11287print_zpool_script_list("status");11288exit(0);11289} else {11290fprintf(stderr,11291gettext("invalid option '%c'\n"), optopt);11292}11293usage(B_FALSE);11294}11295}1129611297argc -= optind;11298argv += optind;1129911300get_interval_count(&argc, argv, &interval, &count);1130111302if (argc == 0)11303cb.cb_allpools = B_TRUE;1130411305cb.cb_first = B_TRUE;11306cb.cb_print_status = B_TRUE;1130711308if (cb.cb_flat_vdevs && !cb.cb_json) {11309fprintf(stderr, gettext("'--json-flat-vdevs' only works with"11310" '-j' option\n"));11311usage(B_FALSE);11312}1131311314if (cb.cb_json_as_int && !cb.cb_json) {11315(void) fprintf(stderr, gettext("'--json-int' only works with"11316" '-j' option\n"));11317usage(B_FALSE);11318}1131911320if (!cb.cb_json && cb.cb_json_pool_key_guid) {11321(void) fprintf(stderr, gettext("'json-pool-key-guid' only"11322" works with '-j' option\n"));11323usage(B_FALSE);11324}1132511326for (;;) {11327if (cb.cb_json) {11328cb.cb_jsobj = zpool_json_schema(0, 1);11329data = fnvlist_alloc();11330fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);11331fnvlist_free(data);11332}1133311334if (timestamp_fmt != NODATE) {11335if (cb.cb_json) {11336if (cb.cb_json_as_int) {11337fnvlist_add_uint64(cb.cb_jsobj, "time",11338time(NULL));11339} else {11340char ts[128];11341get_timestamp(timestamp_fmt, ts, 128);11342fnvlist_add_string(cb.cb_jsobj, "time",11343ts);11344}11345} else11346print_timestamp(timestamp_fmt);11347}1134811349if (cmd != NULL)11350cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,11351NULL, NULL, 0, 0);1135211353if (cb.cb_json) {11354ret = for_each_pool(argc, argv, B_TRUE, NULL,11355ZFS_TYPE_POOL, cb.cb_literal,11356status_callback_json, &cb);11357} else {11358ret = for_each_pool(argc, argv, B_TRUE, NULL,11359ZFS_TYPE_POOL, cb.cb_literal,11360status_callback, &cb);11361}1136211363if (cb.vcdl != NULL)11364free_vdev_cmd_data_list(cb.vcdl);1136511366if (cb.cb_json) {11367if (ret == 0)11368zcmd_print_json(cb.cb_jsobj);11369else11370nvlist_free(cb.cb_jsobj);11371} else {11372if (argc == 0 && cb.cb_count == 0) {11373(void) fprintf(stderr, "%s",11374gettext("no pools available\n"));11375} else if (cb.cb_explain && cb.cb_first &&11376cb.cb_allpools) {11377(void) printf("%s",11378gettext("all pools are healthy\n"));11379}11380}1138111382if (ret != 0)11383return (ret);1138411385if (interval == 0)11386break;1138711388if (count != 0 && --count == 0)11389break;1139011391(void) fflush(stdout);11392(void) fsleep(interval);11393}1139411395return (0);11396}1139711398typedef struct upgrade_cbdata {11399int cb_first;11400int cb_argc;11401uint64_t cb_version;11402char **cb_argv;11403} upgrade_cbdata_t;1140411405static int11406check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)11407{11408int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);11409int *count = (int *)unsupp_fs;1141011411if (zfs_version > ZPL_VERSION) {11412(void) printf(gettext("%s (v%d) is not supported by this "11413"implementation of ZFS.\n"),11414zfs_get_name(zhp), zfs_version);11415(*count)++;11416}1141711418(void) zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);1141911420zfs_close(zhp);1142111422return (0);11423}1142411425static int11426upgrade_version(zpool_handle_t *zhp, uint64_t version)11427{11428int ret;11429nvlist_t *config;11430uint64_t oldversion;11431int unsupp_fs = 0;1143211433config = zpool_get_config(zhp, NULL);11434verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,11435&oldversion) == 0);1143611437char compat[ZFS_MAXPROPLEN];11438if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,11439ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)11440compat[0] = '\0';1144111442assert(SPA_VERSION_IS_SUPPORTED(oldversion));11443assert(oldversion < version);1144411445ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);11446if (ret != 0)11447return (ret);1144811449if (unsupp_fs) {11450(void) fprintf(stderr, gettext("Upgrade not performed due "11451"to %d unsupported filesystems (max v%d).\n"),11452unsupp_fs, (int)ZPL_VERSION);11453return (1);11454}1145511456if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {11457(void) fprintf(stderr, gettext("Upgrade not performed because "11458"'compatibility' property set to '"11459ZPOOL_COMPAT_LEGACY "'.\n"));11460return (1);11461}1146211463ret = zpool_upgrade(zhp, version);11464if (ret != 0)11465return (ret);1146611467if (version >= SPA_VERSION_FEATURES) {11468(void) printf(gettext("Successfully upgraded "11469"'%s' from version %llu to feature flags.\n"),11470zpool_get_name(zhp), (u_longlong_t)oldversion);11471} else {11472(void) printf(gettext("Successfully upgraded "11473"'%s' from version %llu to version %llu.\n"),11474zpool_get_name(zhp), (u_longlong_t)oldversion,11475(u_longlong_t)version);11476}1147711478return (0);11479}1148011481static int11482upgrade_enable_all(zpool_handle_t *zhp, int *countp)11483{11484int i, ret, count;11485boolean_t firstff = B_TRUE;11486nvlist_t *enabled = zpool_get_features(zhp);1148711488char compat[ZFS_MAXPROPLEN];11489if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,11490ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)11491compat[0] = '\0';1149211493boolean_t requested_features[SPA_FEATURES];11494if (zpool_do_load_compat(compat, requested_features) !=11495ZPOOL_COMPATIBILITY_OK)11496return (-1);1149711498count = 0;11499for (i = 0; i < SPA_FEATURES; i++) {11500const char *fname = spa_feature_table[i].fi_uname;11501const char *fguid = spa_feature_table[i].fi_guid;1150211503if (!spa_feature_table[i].fi_zfs_mod_supported ||11504(spa_feature_table[i].fi_flags & ZFEATURE_FLAG_NO_UPGRADE))11505continue;1150611507if (!nvlist_exists(enabled, fguid) && requested_features[i]) {11508char *propname;11509verify(-1 != asprintf(&propname, "feature@%s", fname));11510ret = zpool_set_prop(zhp, propname,11511ZFS_FEATURE_ENABLED);11512if (ret != 0) {11513free(propname);11514return (ret);11515}11516count++;1151711518if (firstff) {11519(void) printf(gettext("Enabled the "11520"following features on '%s':\n"),11521zpool_get_name(zhp));11522firstff = B_FALSE;11523}11524(void) printf(gettext(" %s\n"), fname);11525free(propname);11526}11527}1152811529if (countp != NULL)11530*countp = count;11531return (0);11532}1153311534static int11535upgrade_cb(zpool_handle_t *zhp, void *arg)11536{11537upgrade_cbdata_t *cbp = arg;11538nvlist_t *config;11539uint64_t version;11540boolean_t modified_pool = B_FALSE;11541int ret;1154211543config = zpool_get_config(zhp, NULL);11544verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,11545&version) == 0);1154611547assert(SPA_VERSION_IS_SUPPORTED(version));1154811549if (version < cbp->cb_version) {11550cbp->cb_first = B_FALSE;11551ret = upgrade_version(zhp, cbp->cb_version);11552if (ret != 0)11553return (ret);11554modified_pool = B_TRUE;1155511556/*11557* If they did "zpool upgrade -a", then we could11558* be doing ioctls to different pools. We need11559* to log this history once to each pool, and bypass11560* the normal history logging that happens in main().11561*/11562(void) zpool_log_history(g_zfs, history_str);11563log_history = B_FALSE;11564}1156511566if (cbp->cb_version >= SPA_VERSION_FEATURES) {11567int count;11568ret = upgrade_enable_all(zhp, &count);11569if (ret != 0)11570return (ret);1157111572if (count > 0) {11573cbp->cb_first = B_FALSE;11574modified_pool = B_TRUE;11575}11576}1157711578if (modified_pool) {11579(void) printf("\n");11580(void) after_zpool_upgrade(zhp);11581}1158211583return (0);11584}1158511586static int11587upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)11588{11589upgrade_cbdata_t *cbp = arg;11590nvlist_t *config;11591uint64_t version;1159211593config = zpool_get_config(zhp, NULL);11594verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,11595&version) == 0);1159611597assert(SPA_VERSION_IS_SUPPORTED(version));1159811599if (version < SPA_VERSION_FEATURES) {11600if (cbp->cb_first) {11601(void) printf(gettext("The following pools are "11602"formatted with legacy version numbers and can\n"11603"be upgraded to use feature flags. After "11604"being upgraded, these pools\nwill no "11605"longer be accessible by software that does not "11606"support feature\nflags.\n\n"11607"Note that setting a pool's 'compatibility' "11608"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"11609"inhibit upgrades.\n\n"));11610(void) printf(gettext("VER POOL\n"));11611(void) printf(gettext("--- ------------\n"));11612cbp->cb_first = B_FALSE;11613}1161411615(void) printf("%2llu %s\n", (u_longlong_t)version,11616zpool_get_name(zhp));11617}1161811619return (0);11620}1162111622static int11623upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)11624{11625upgrade_cbdata_t *cbp = arg;11626nvlist_t *config;11627uint64_t version;1162811629config = zpool_get_config(zhp, NULL);11630verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,11631&version) == 0);1163211633if (version >= SPA_VERSION_FEATURES) {11634int i;11635boolean_t poolfirst = B_TRUE;11636nvlist_t *enabled = zpool_get_features(zhp);1163711638for (i = 0; i < SPA_FEATURES; i++) {11639const char *fguid = spa_feature_table[i].fi_guid;11640const char *fname = spa_feature_table[i].fi_uname;1164111642if (!spa_feature_table[i].fi_zfs_mod_supported)11643continue;1164411645if (!nvlist_exists(enabled, fguid)) {11646if (cbp->cb_first) {11647(void) printf(gettext("\nSome "11648"supported features are not "11649"enabled on the following pools. "11650"Once a\nfeature is enabled the "11651"pool may become incompatible with "11652"software\nthat does not support "11653"the feature. See "11654"zpool-features(7) for "11655"details.\n\n"11656"Note that the pool "11657"'compatibility' feature can be "11658"used to inhibit\nfeature "11659"upgrades.\n\n"11660"Features marked with (*) are not "11661"applied automatically on upgrade, "11662"and\nmust be applied explicitly "11663"with zpool-set(7).\n\n"));11664(void) printf(gettext("POOL "11665"FEATURE\n"));11666(void) printf(gettext("------"11667"---------\n"));11668cbp->cb_first = B_FALSE;11669}1167011671if (poolfirst) {11672(void) printf(gettext("%s\n"),11673zpool_get_name(zhp));11674poolfirst = B_FALSE;11675}1167611677(void) printf(gettext(" %s%s\n"), fname,11678spa_feature_table[i].fi_flags &11679ZFEATURE_FLAG_NO_UPGRADE ? "(*)" : "");11680}11681/*11682* If they did "zpool upgrade -a", then we could11683* be doing ioctls to different pools. We need11684* to log this history once to each pool, and bypass11685* the normal history logging that happens in main().11686*/11687(void) zpool_log_history(g_zfs, history_str);11688log_history = B_FALSE;11689}11690}1169111692return (0);11693}1169411695static int11696upgrade_one(zpool_handle_t *zhp, void *data)11697{11698boolean_t modified_pool = B_FALSE;11699upgrade_cbdata_t *cbp = data;11700uint64_t cur_version;11701int ret;1170211703if (strcmp("log", zpool_get_name(zhp)) == 0) {11704(void) fprintf(stderr, gettext("'log' is now a reserved word\n"11705"Pool 'log' must be renamed using export and import"11706" to upgrade.\n"));11707return (1);11708}1170911710cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);11711if (cur_version > cbp->cb_version) {11712(void) printf(gettext("Pool '%s' is already formatted "11713"using more current version '%llu'.\n\n"),11714zpool_get_name(zhp), (u_longlong_t)cur_version);11715return (0);11716}1171711718if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {11719(void) printf(gettext("Pool '%s' is already formatted "11720"using version %llu.\n\n"), zpool_get_name(zhp),11721(u_longlong_t)cbp->cb_version);11722return (0);11723}1172411725if (cur_version != cbp->cb_version) {11726modified_pool = B_TRUE;11727ret = upgrade_version(zhp, cbp->cb_version);11728if (ret != 0)11729return (ret);11730}1173111732if (cbp->cb_version >= SPA_VERSION_FEATURES) {11733int count = 0;11734ret = upgrade_enable_all(zhp, &count);11735if (ret != 0)11736return (ret);1173711738if (count != 0) {11739modified_pool = B_TRUE;11740} else if (cur_version == SPA_VERSION) {11741(void) printf(gettext("Pool '%s' already has all "11742"supported and requested features enabled.\n"),11743zpool_get_name(zhp));11744}11745}1174611747if (modified_pool) {11748(void) printf("\n");11749(void) after_zpool_upgrade(zhp);11750}1175111752return (0);11753}1175411755/*11756* zpool upgrade11757* zpool upgrade -v11758* zpool upgrade [-V version] <-a | pool ...>11759*11760* With no arguments, display downrev'd ZFS pool available for upgrade.11761* Individual pools can be upgraded by specifying the pool, and '-a' will11762* upgrade all pools.11763*/11764int11765zpool_do_upgrade(int argc, char **argv)11766{11767int c;11768upgrade_cbdata_t cb = { 0 };11769int ret = 0;11770boolean_t showversions = B_FALSE;11771boolean_t upgradeall = B_FALSE;11772char *end;117731177411775/* check options */11776while ((c = getopt(argc, argv, ":avV:")) != -1) {11777switch (c) {11778case 'a':11779upgradeall = B_TRUE;11780break;11781case 'v':11782showversions = B_TRUE;11783break;11784case 'V':11785cb.cb_version = strtoll(optarg, &end, 10);11786if (*end != '\0' ||11787!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {11788(void) fprintf(stderr,11789gettext("invalid version '%s'\n"), optarg);11790usage(B_FALSE);11791}11792break;11793case ':':11794(void) fprintf(stderr, gettext("missing argument for "11795"'%c' option\n"), optopt);11796usage(B_FALSE);11797break;11798case '?':11799(void) fprintf(stderr, gettext("invalid option '%c'\n"),11800optopt);11801usage(B_FALSE);11802}11803}1180411805cb.cb_argc = argc;11806cb.cb_argv = argv;11807argc -= optind;11808argv += optind;1180911810if (cb.cb_version == 0) {11811cb.cb_version = SPA_VERSION;11812} else if (!upgradeall && argc == 0) {11813(void) fprintf(stderr, gettext("-V option is "11814"incompatible with other arguments\n"));11815usage(B_FALSE);11816}1181711818if (showversions) {11819if (upgradeall || argc != 0) {11820(void) fprintf(stderr, gettext("-v option is "11821"incompatible with other arguments\n"));11822usage(B_FALSE);11823}11824} else if (upgradeall) {11825if (argc != 0) {11826(void) fprintf(stderr, gettext("-a option should not "11827"be used along with a pool name\n"));11828usage(B_FALSE);11829}11830}1183111832(void) printf("%s", gettext("This system supports ZFS pool feature "11833"flags.\n\n"));11834if (showversions) {11835int i;1183611837(void) printf(gettext("The following features are "11838"supported:\n\n"));11839(void) printf(gettext("FEAT DESCRIPTION\n"));11840(void) printf("----------------------------------------------"11841"---------------\n");11842for (i = 0; i < SPA_FEATURES; i++) {11843zfeature_info_t *fi = &spa_feature_table[i];11844if (!fi->fi_zfs_mod_supported)11845continue;11846const char *ro =11847(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?11848" (read-only compatible)" : "";1184911850(void) printf("%-37s%s\n", fi->fi_uname, ro);11851(void) printf(" %s\n", fi->fi_desc);11852}11853(void) printf("\n");1185411855(void) printf(gettext("The following legacy versions are also "11856"supported:\n\n"));11857(void) printf(gettext("VER DESCRIPTION\n"));11858(void) printf("--- -----------------------------------------"11859"---------------\n");11860(void) printf(gettext(" 1 Initial ZFS version\n"));11861(void) printf(gettext(" 2 Ditto blocks "11862"(replicated metadata)\n"));11863(void) printf(gettext(" 3 Hot spares and double parity "11864"RAID-Z\n"));11865(void) printf(gettext(" 4 zpool history\n"));11866(void) printf(gettext(" 5 Compression using the gzip "11867"algorithm\n"));11868(void) printf(gettext(" 6 bootfs pool property\n"));11869(void) printf(gettext(" 7 Separate intent log devices\n"));11870(void) printf(gettext(" 8 Delegated administration\n"));11871(void) printf(gettext(" 9 refquota and refreservation "11872"properties\n"));11873(void) printf(gettext(" 10 Cache devices\n"));11874(void) printf(gettext(" 11 Improved scrub performance\n"));11875(void) printf(gettext(" 12 Snapshot properties\n"));11876(void) printf(gettext(" 13 snapused property\n"));11877(void) printf(gettext(" 14 passthrough-x aclinherit\n"));11878(void) printf(gettext(" 15 user/group space accounting\n"));11879(void) printf(gettext(" 16 stmf property support\n"));11880(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));11881(void) printf(gettext(" 18 Snapshot user holds\n"));11882(void) printf(gettext(" 19 Log device removal\n"));11883(void) printf(gettext(" 20 Compression using zle "11884"(zero-length encoding)\n"));11885(void) printf(gettext(" 21 Deduplication\n"));11886(void) printf(gettext(" 22 Received properties\n"));11887(void) printf(gettext(" 23 Slim ZIL\n"));11888(void) printf(gettext(" 24 System attributes\n"));11889(void) printf(gettext(" 25 Improved scrub stats\n"));11890(void) printf(gettext(" 26 Improved snapshot deletion "11891"performance\n"));11892(void) printf(gettext(" 27 Improved snapshot creation "11893"performance\n"));11894(void) printf(gettext(" 28 Multiple vdev replacements\n"));11895(void) printf(gettext("\nFor more information on a particular "11896"version, including supported releases,\n"));11897(void) printf(gettext("see the ZFS Administration Guide.\n\n"));11898} else if (argc == 0 && upgradeall) {11899cb.cb_first = B_TRUE;11900ret = zpool_iter(g_zfs, upgrade_cb, &cb);11901if (ret == 0 && cb.cb_first) {11902if (cb.cb_version == SPA_VERSION) {11903(void) printf(gettext("All pools are already "11904"formatted using feature flags.\n\n"));11905(void) printf(gettext("Every feature flags "11906"pool already has all supported and "11907"requested features enabled.\n"));11908} else {11909(void) printf(gettext("All pools are already "11910"formatted with version %llu or higher.\n"),11911(u_longlong_t)cb.cb_version);11912}11913}11914} else if (argc == 0) {11915cb.cb_first = B_TRUE;11916ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);11917assert(ret == 0);1191811919if (cb.cb_first) {11920(void) printf(gettext("All pools are formatted "11921"using feature flags.\n\n"));11922} else {11923(void) printf(gettext("\nUse 'zpool upgrade -v' "11924"for a list of available legacy versions.\n"));11925}1192611927cb.cb_first = B_TRUE;11928ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);11929assert(ret == 0);1193011931if (cb.cb_first) {11932(void) printf(gettext("Every feature flags pool has "11933"all supported and requested features enabled.\n"));11934} else {11935(void) printf(gettext("\n"));11936}11937} else {11938ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,11939B_FALSE, upgrade_one, &cb);11940}1194111942return (ret);11943}1194411945typedef struct hist_cbdata {11946boolean_t first;11947boolean_t longfmt;11948boolean_t internal;11949} hist_cbdata_t;1195011951static void11952print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)11953{11954nvlist_t **records;11955uint_t numrecords;11956int i;1195711958verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,11959&records, &numrecords) == 0);11960for (i = 0; i < numrecords; i++) {11961nvlist_t *rec = records[i];11962char tbuf[64] = "";1196311964if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {11965time_t tsec;11966struct tm t;1196711968tsec = fnvlist_lookup_uint64(records[i],11969ZPOOL_HIST_TIME);11970(void) localtime_r(&tsec, &t);11971(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);11972}1197311974if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {11975uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],11976ZPOOL_HIST_ELAPSED_NS);11977(void) snprintf(tbuf + strlen(tbuf),11978sizeof (tbuf) - strlen(tbuf),11979" (%lldms)", (long long)elapsed_ns / 1000 / 1000);11980}1198111982if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {11983(void) printf("%s %s", tbuf,11984fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));11985} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {11986int ievent =11987fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);11988if (!cb->internal)11989continue;11990if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {11991(void) printf("%s unrecognized record:\n",11992tbuf);11993dump_nvlist(rec, 4);11994continue;11995}11996(void) printf("%s [internal %s txg:%lld] %s", tbuf,11997zfs_history_event_names[ievent],11998(longlong_t)fnvlist_lookup_uint64(11999rec, ZPOOL_HIST_TXG),12000fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));12001} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {12002if (!cb->internal)12003continue;12004(void) printf("%s [txg:%lld] %s", tbuf,12005(longlong_t)fnvlist_lookup_uint64(12006rec, ZPOOL_HIST_TXG),12007fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));12008if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {12009(void) printf(" %s (%llu)",12010fnvlist_lookup_string(rec,12011ZPOOL_HIST_DSNAME),12012(u_longlong_t)fnvlist_lookup_uint64(rec,12013ZPOOL_HIST_DSID));12014}12015(void) printf(" %s", fnvlist_lookup_string(rec,12016ZPOOL_HIST_INT_STR));12017} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {12018if (!cb->internal)12019continue;12020(void) printf("%s ioctl %s\n", tbuf,12021fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));12022if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {12023(void) printf(" input:\n");12024dump_nvlist(fnvlist_lookup_nvlist(rec,12025ZPOOL_HIST_INPUT_NVL), 8);12026}12027if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {12028(void) printf(" output:\n");12029dump_nvlist(fnvlist_lookup_nvlist(rec,12030ZPOOL_HIST_OUTPUT_NVL), 8);12031}12032if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {12033(void) printf(" output nvlist omitted; "12034"original size: %lldKB\n",12035(longlong_t)fnvlist_lookup_int64(rec,12036ZPOOL_HIST_OUTPUT_SIZE) / 1024);12037}12038if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {12039(void) printf(" errno: %lld\n",12040(longlong_t)fnvlist_lookup_int64(rec,12041ZPOOL_HIST_ERRNO));12042}12043} else {12044if (!cb->internal)12045continue;12046(void) printf("%s unrecognized record:\n", tbuf);12047dump_nvlist(rec, 4);12048}1204912050if (!cb->longfmt) {12051(void) printf("\n");12052continue;12053}12054(void) printf(" [");12055if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {12056uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);12057struct passwd *pwd = getpwuid(who);12058(void) printf("user %d ", (int)who);12059if (pwd != NULL)12060(void) printf("(%s) ", pwd->pw_name);12061}12062if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {12063(void) printf("on %s",12064fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));12065}12066if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {12067(void) printf(":%s",12068fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));12069}1207012071(void) printf("]");12072(void) printf("\n");12073}12074}1207512076/*12077* Print out the command history for a specific pool.12078*/12079static int12080get_history_one(zpool_handle_t *zhp, void *data)12081{12082nvlist_t *nvhis;12083int ret;12084hist_cbdata_t *cb = (hist_cbdata_t *)data;12085uint64_t off = 0;12086boolean_t eof = B_FALSE;1208712088cb->first = B_FALSE;1208912090(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));1209112092while (!eof) {12093if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)12094return (ret);1209512096print_history_records(nvhis, cb);12097nvlist_free(nvhis);12098}12099(void) printf("\n");1210012101return (ret);12102}1210312104/*12105* zpool history <pool>12106*12107* Displays the history of commands that modified pools.12108*/12109int12110zpool_do_history(int argc, char **argv)12111{12112hist_cbdata_t cbdata = { 0 };12113int ret;12114int c;1211512116cbdata.first = B_TRUE;12117/* check options */12118while ((c = getopt(argc, argv, "li")) != -1) {12119switch (c) {12120case 'l':12121cbdata.longfmt = B_TRUE;12122break;12123case 'i':12124cbdata.internal = B_TRUE;12125break;12126case '?':12127(void) fprintf(stderr, gettext("invalid option '%c'\n"),12128optopt);12129usage(B_FALSE);12130}12131}12132argc -= optind;12133argv += optind;1213412135ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,12136B_FALSE, get_history_one, &cbdata);1213712138if (argc == 0 && cbdata.first == B_TRUE) {12139(void) fprintf(stderr, gettext("no pools available\n"));12140return (0);12141}1214212143return (ret);12144}1214512146typedef struct ev_opts {12147int verbose;12148int scripted;12149int follow;12150int clear;12151char poolname[ZFS_MAX_DATASET_NAME_LEN];12152} ev_opts_t;1215312154static void12155zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)12156{12157char ctime_str[26], str[32];12158const char *ptr;12159int64_t *tv;12160uint_t n;1216112162verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);12163memset(str, ' ', 32);12164(void) ctime_r((const time_t *)&tv[0], ctime_str);12165(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */12166(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */12167(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */12168(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */12169if (opts->scripted)12170(void) printf(gettext("%s\t"), str);12171else12172(void) printf(gettext("%s "), str);1217312174verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);12175(void) printf(gettext("%s\n"), ptr);12176}1217712178static void12179zpool_do_events_nvprint(nvlist_t *nvl, int depth)12180{12181nvpair_t *nvp;12182static char flagstr[256];1218312184for (nvp = nvlist_next_nvpair(nvl, NULL);12185nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {1218612187data_type_t type = nvpair_type(nvp);12188const char *name = nvpair_name(nvp);1218912190boolean_t b;12191uint8_t i8;12192uint16_t i16;12193uint32_t i32;12194uint64_t i64;12195const char *str;12196nvlist_t *cnv;1219712198printf(gettext("%*s%s = "), depth, "", name);1219912200switch (type) {12201case DATA_TYPE_BOOLEAN:12202printf(gettext("%s"), "1");12203break;1220412205case DATA_TYPE_BOOLEAN_VALUE:12206(void) nvpair_value_boolean_value(nvp, &b);12207printf(gettext("%s"), b ? "1" : "0");12208break;1220912210case DATA_TYPE_BYTE:12211(void) nvpair_value_byte(nvp, &i8);12212printf(gettext("0x%x"), i8);12213break;1221412215case DATA_TYPE_INT8:12216(void) nvpair_value_int8(nvp, (void *)&i8);12217printf(gettext("0x%x"), i8);12218break;1221912220case DATA_TYPE_UINT8:12221(void) nvpair_value_uint8(nvp, &i8);12222printf(gettext("0x%x"), i8);12223break;1222412225case DATA_TYPE_INT16:12226(void) nvpair_value_int16(nvp, (void *)&i16);12227printf(gettext("0x%x"), i16);12228break;1222912230case DATA_TYPE_UINT16:12231(void) nvpair_value_uint16(nvp, &i16);12232printf(gettext("0x%x"), i16);12233break;1223412235case DATA_TYPE_INT32:12236(void) nvpair_value_int32(nvp, (void *)&i32);12237printf(gettext("0x%x"), i32);12238break;1223912240case DATA_TYPE_UINT32:12241(void) nvpair_value_uint32(nvp, &i32);12242if (strcmp(name,12243FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||12244strcmp(name,12245FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {12246(void) zfs_valstr_zio_stage(i32, flagstr,12247sizeof (flagstr));12248printf(gettext("0x%x [%s]"), i32, flagstr);12249} else if (strcmp(name,12250FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) {12251(void) zfs_valstr_zio_type(i32, flagstr,12252sizeof (flagstr));12253printf(gettext("0x%x [%s]"), i32, flagstr);12254} else if (strcmp(name,12255FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {12256(void) zfs_valstr_zio_priority(i32, flagstr,12257sizeof (flagstr));12258printf(gettext("0x%x [%s]"), i32, flagstr);12259} else {12260printf(gettext("0x%x"), i32);12261}12262break;1226312264case DATA_TYPE_INT64:12265(void) nvpair_value_int64(nvp, (void *)&i64);12266printf(gettext("0x%llx"), (u_longlong_t)i64);12267break;1226812269case DATA_TYPE_UINT64:12270(void) nvpair_value_uint64(nvp, &i64);12271/*12272* translate vdev state values to readable12273* strings to aide zpool events consumers12274*/12275if (strcmp(name,12276FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||12277strcmp(name,12278FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {12279printf(gettext("\"%s\" (0x%llx)"),12280zpool_state_to_name(i64, VDEV_AUX_NONE),12281(u_longlong_t)i64);12282} else if (strcmp(name,12283FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {12284(void) zfs_valstr_zio_flag(i64, flagstr,12285sizeof (flagstr));12286printf(gettext("0x%llx [%s]"),12287(u_longlong_t)i64, flagstr);12288} else {12289printf(gettext("0x%llx"), (u_longlong_t)i64);12290}12291break;1229212293case DATA_TYPE_HRTIME:12294(void) nvpair_value_hrtime(nvp, (void *)&i64);12295printf(gettext("0x%llx"), (u_longlong_t)i64);12296break;1229712298case DATA_TYPE_STRING:12299(void) nvpair_value_string(nvp, &str);12300printf(gettext("\"%s\""), str ? str : "<NULL>");12301break;1230212303case DATA_TYPE_NVLIST:12304printf(gettext("(embedded nvlist)\n"));12305(void) nvpair_value_nvlist(nvp, &cnv);12306zpool_do_events_nvprint(cnv, depth + 8);12307printf(gettext("%*s(end %s)"), depth, "", name);12308break;1230912310case DATA_TYPE_NVLIST_ARRAY: {12311nvlist_t **val;12312uint_t i, nelem;1231312314(void) nvpair_value_nvlist_array(nvp, &val, &nelem);12315printf(gettext("(%d embedded nvlists)\n"), nelem);12316for (i = 0; i < nelem; i++) {12317printf(gettext("%*s%s[%d] = %s\n"),12318depth, "", name, i, "(embedded nvlist)");12319zpool_do_events_nvprint(val[i], depth + 8);12320printf(gettext("%*s(end %s[%i])\n"),12321depth, "", name, i);12322}12323printf(gettext("%*s(end %s)\n"), depth, "", name);12324}12325break;1232612327case DATA_TYPE_INT8_ARRAY: {12328int8_t *val;12329uint_t i, nelem;1233012331(void) nvpair_value_int8_array(nvp, &val, &nelem);12332for (i = 0; i < nelem; i++)12333printf(gettext("0x%x "), val[i]);1233412335break;12336}1233712338case DATA_TYPE_UINT8_ARRAY: {12339uint8_t *val;12340uint_t i, nelem;1234112342(void) nvpair_value_uint8_array(nvp, &val, &nelem);12343for (i = 0; i < nelem; i++)12344printf(gettext("0x%x "), val[i]);1234512346break;12347}1234812349case DATA_TYPE_INT16_ARRAY: {12350int16_t *val;12351uint_t i, nelem;1235212353(void) nvpair_value_int16_array(nvp, &val, &nelem);12354for (i = 0; i < nelem; i++)12355printf(gettext("0x%x "), val[i]);1235612357break;12358}1235912360case DATA_TYPE_UINT16_ARRAY: {12361uint16_t *val;12362uint_t i, nelem;1236312364(void) nvpair_value_uint16_array(nvp, &val, &nelem);12365for (i = 0; i < nelem; i++)12366printf(gettext("0x%x "), val[i]);1236712368break;12369}1237012371case DATA_TYPE_INT32_ARRAY: {12372int32_t *val;12373uint_t i, nelem;1237412375(void) nvpair_value_int32_array(nvp, &val, &nelem);12376for (i = 0; i < nelem; i++)12377printf(gettext("0x%x "), val[i]);1237812379break;12380}1238112382case DATA_TYPE_UINT32_ARRAY: {12383uint32_t *val;12384uint_t i, nelem;1238512386(void) nvpair_value_uint32_array(nvp, &val, &nelem);12387for (i = 0; i < nelem; i++)12388printf(gettext("0x%x "), val[i]);1238912390break;12391}1239212393case DATA_TYPE_INT64_ARRAY: {12394int64_t *val;12395uint_t i, nelem;1239612397(void) nvpair_value_int64_array(nvp, &val, &nelem);12398for (i = 0; i < nelem; i++)12399printf(gettext("0x%llx "),12400(u_longlong_t)val[i]);1240112402break;12403}1240412405case DATA_TYPE_UINT64_ARRAY: {12406uint64_t *val;12407uint_t i, nelem;1240812409(void) nvpair_value_uint64_array(nvp, &val, &nelem);12410for (i = 0; i < nelem; i++)12411printf(gettext("0x%llx "),12412(u_longlong_t)val[i]);1241312414break;12415}1241612417case DATA_TYPE_STRING_ARRAY: {12418const char **str;12419uint_t i, nelem;1242012421(void) nvpair_value_string_array(nvp, &str, &nelem);12422for (i = 0; i < nelem; i++)12423printf(gettext("\"%s\" "),12424str[i] ? str[i] : "<NULL>");1242512426break;12427}1242812429case DATA_TYPE_BOOLEAN_ARRAY:12430case DATA_TYPE_BYTE_ARRAY:12431case DATA_TYPE_DOUBLE:12432case DATA_TYPE_DONTCARE:12433case DATA_TYPE_UNKNOWN:12434printf(gettext("<unknown>"));12435break;12436}1243712438printf(gettext("\n"));12439}12440}1244112442static int12443zpool_do_events_next(ev_opts_t *opts)12444{12445nvlist_t *nvl;12446int zevent_fd, ret, dropped;12447const char *pool;1244812449zevent_fd = open(ZFS_DEV, O_RDWR);12450VERIFY(zevent_fd >= 0);1245112452if (!opts->scripted)12453(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");1245412455while (1) {12456ret = zpool_events_next(g_zfs, &nvl, &dropped,12457(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);12458if (ret || nvl == NULL)12459break;1246012461if (dropped > 0)12462(void) printf(gettext("dropped %d events\n"), dropped);1246312464if (strlen(opts->poolname) > 0 &&12465nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&12466strcmp(opts->poolname, pool) != 0)12467continue;1246812469zpool_do_events_short(nvl, opts);1247012471if (opts->verbose) {12472zpool_do_events_nvprint(nvl, 8);12473printf(gettext("\n"));12474}12475(void) fflush(stdout);1247612477nvlist_free(nvl);12478}1247912480VERIFY0(close(zevent_fd));1248112482return (ret);12483}1248412485static int12486zpool_do_events_clear(void)12487{12488int count, ret;1248912490ret = zpool_events_clear(g_zfs, &count);12491if (!ret)12492(void) printf(gettext("cleared %d events\n"), count);1249312494return (ret);12495}1249612497/*12498* zpool events [-vHf [pool] | -c]12499*12500* Displays events logs by ZFS.12501*/12502int12503zpool_do_events(int argc, char **argv)12504{12505ev_opts_t opts = { 0 };12506int ret;12507int c;1250812509/* check options */12510while ((c = getopt(argc, argv, "vHfc")) != -1) {12511switch (c) {12512case 'v':12513opts.verbose = 1;12514break;12515case 'H':12516opts.scripted = 1;12517break;12518case 'f':12519opts.follow = 1;12520break;12521case 'c':12522opts.clear = 1;12523break;12524case '?':12525(void) fprintf(stderr, gettext("invalid option '%c'\n"),12526optopt);12527usage(B_FALSE);12528}12529}12530argc -= optind;12531argv += optind;1253212533if (argc > 1) {12534(void) fprintf(stderr, gettext("too many arguments\n"));12535usage(B_FALSE);12536} else if (argc == 1) {12537(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));12538if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {12539(void) fprintf(stderr,12540gettext("invalid pool name '%s'\n"), opts.poolname);12541usage(B_FALSE);12542}12543}1254412545if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&12546opts.clear) {12547(void) fprintf(stderr,12548gettext("invalid options combined with -c\n"));12549usage(B_FALSE);12550}1255112552if (opts.clear)12553ret = zpool_do_events_clear();12554else12555ret = zpool_do_events_next(&opts);1255612557return (ret);12558}1255912560static int12561get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)12562{12563zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;12564char value[ZFS_MAXPROPLEN];12565zprop_source_t srctype;12566nvlist_t *props, *item, *d;12567props = item = d = NULL;1256812569if (cbp->cb_json) {12570d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");12571if (d == NULL) {12572fprintf(stderr, "vdevs obj not found.\n");12573exit(1);12574}12575props = fnvlist_alloc();12576}1257712578for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;12579pl = pl->pl_next) {12580char *prop_name;12581/*12582* If the first property is pool name, it is a special12583* placeholder that we can skip. This will also skip12584* over the name property when 'all' is specified.12585*/12586if (pl->pl_prop == ZPOOL_PROP_NAME &&12587pl == cbp->cb_proplist)12588continue;1258912590if (pl->pl_prop == ZPROP_INVAL) {12591prop_name = pl->pl_user_prop;12592} else {12593prop_name = (char *)vdev_prop_to_name(pl->pl_prop);12594}12595if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,12596prop_name, value, sizeof (value), &srctype,12597cbp->cb_literal) == 0) {12598(void) zprop_collect_property(vdevname, cbp, prop_name,12599value, srctype, NULL, NULL, props);12600}12601}1260212603if (cbp->cb_json) {12604if (!nvlist_empty(props)) {12605item = fnvlist_alloc();12606fill_vdev_info(item, zhp, vdevname, B_TRUE,12607cbp->cb_json_as_int);12608fnvlist_add_nvlist(item, "properties", props);12609fnvlist_add_nvlist(d, vdevname, item);12610fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);12611fnvlist_free(item);12612}12613fnvlist_free(props);12614}1261512616return (0);12617}1261812619static int12620get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)12621{12622zpool_handle_t *zhp = zhp_data;12623zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;12624char *vdevname;12625const char *type;12626int ret;1262712628/*12629* zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the12630* pool name for display purposes, which is not desired. Fallback to12631* zpool_vdev_name() when not dealing with the root vdev.12632*/12633type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);12634if (zhp != NULL && strcmp(type, "root") == 0)12635vdevname = strdup("root-0");12636else12637vdevname = zpool_vdev_name(g_zfs, zhp, nv,12638cbp->cb_vdevs.cb_name_flags);1263912640(void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);1264112642ret = get_callback_vdev(zhp, vdevname, data);1264312644free(vdevname);1264512646return (ret);12647}1264812649static int12650get_callback(zpool_handle_t *zhp, void *data)12651{12652zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;12653char value[ZFS_MAXPROPLEN];12654zprop_source_t srctype;12655zprop_list_t *pl;12656int vid;12657int err = 0;12658nvlist_t *props, *item, *d;12659props = item = d = NULL;1266012661if (cbp->cb_type == ZFS_TYPE_VDEV) {12662if (cbp->cb_json) {12663nvlist_t *pool = fnvlist_alloc();12664fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);12665fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);12666fnvlist_free(pool);12667}1266812669if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {12670(void) for_each_vdev(zhp, get_callback_vdev_cb, data);12671} else {12672/* Adjust column widths for vdev properties */12673for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;12674vid++) {12675(void) vdev_expand_proplist(zhp,12676cbp->cb_vdevs.cb_names[vid],12677&cbp->cb_proplist);12678}12679/* Display the properties */12680for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;12681vid++) {12682(void) get_callback_vdev(zhp,12683cbp->cb_vdevs.cb_names[vid], data);12684}12685}12686} else {12687assert(cbp->cb_type == ZFS_TYPE_POOL);12688if (cbp->cb_json) {12689d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");12690if (d == NULL) {12691fprintf(stderr, "pools obj not found.\n");12692exit(1);12693}12694props = fnvlist_alloc();12695}12696for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {12697/*12698* Skip the special fake placeholder. This will also12699* skip over the name property when 'all' is specified.12700*/12701if (pl->pl_prop == ZPOOL_PROP_NAME &&12702pl == cbp->cb_proplist)12703continue;1270412705if (pl->pl_prop == ZPROP_INVAL &&12706zfs_prop_user(pl->pl_user_prop)) {12707srctype = ZPROP_SRC_LOCAL;1270812709if (zpool_get_userprop(zhp, pl->pl_user_prop,12710value, sizeof (value), &srctype) != 0)12711continue;1271212713err = zprop_collect_property(12714zpool_get_name(zhp), cbp, pl->pl_user_prop,12715value, srctype, NULL, NULL, props);12716} else if (pl->pl_prop == ZPROP_INVAL &&12717(zpool_prop_feature(pl->pl_user_prop) ||12718zpool_prop_unsupported(pl->pl_user_prop))) {12719srctype = ZPROP_SRC_LOCAL;1272012721if (zpool_prop_get_feature(zhp,12722pl->pl_user_prop, value,12723sizeof (value)) == 0) {12724err = zprop_collect_property(12725zpool_get_name(zhp), cbp,12726pl->pl_user_prop, value, srctype,12727NULL, NULL, props);12728}12729} else {12730if (zpool_get_prop(zhp, pl->pl_prop, value,12731sizeof (value), &srctype,12732cbp->cb_literal) != 0)12733continue;1273412735err = zprop_collect_property(12736zpool_get_name(zhp), cbp,12737zpool_prop_to_name(pl->pl_prop),12738value, srctype, NULL, NULL, props);12739}12740if (err != 0)12741return (err);12742}1274312744if (cbp->cb_json) {12745if (!nvlist_empty(props)) {12746item = fnvlist_alloc();12747fill_pool_info(item, zhp, B_TRUE,12748cbp->cb_json_as_int);12749fnvlist_add_nvlist(item, "properties", props);12750if (cbp->cb_json_pool_key_guid) {12751char buf[256];12752uint64_t guid = fnvlist_lookup_uint64(12753zpool_get_config(zhp, NULL),12754ZPOOL_CONFIG_POOL_GUID);12755(void) snprintf(buf, 256, "%llu",12756(u_longlong_t)guid);12757fnvlist_add_nvlist(d, buf, item);12758} else {12759const char *name = zpool_get_name(zhp);12760fnvlist_add_nvlist(d, name, item);12761}12762fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);12763fnvlist_free(item);12764}12765fnvlist_free(props);12766}12767}1276812769return (0);12770}1277112772/*12773* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...12774*12775* -H Scripted mode. Don't display headers, and separate properties12776* by a single tab.12777* -o List of columns to display. Defaults to12778* "name,property,value,source".12779* -p Display values in parsable (exact) format.12780* -j Display output in JSON format.12781* --json-int Display numbers as integers instead of strings.12782* --json-pool-key-guid Set pool GUID as key for pool objects.12783*12784* Get properties of pools in the system. Output space statistics12785* for each one as well as other attributes.12786*/12787int12788zpool_do_get(int argc, char **argv)12789{12790zprop_get_cbdata_t cb = { 0 };12791zprop_list_t fake_name = { 0 };12792int ret;12793int c, i;12794char *propstr = NULL;12795char *vdev = NULL;12796nvlist_t *data = NULL;1279712798cb.cb_first = B_TRUE;1279912800/*12801* Set up default columns and sources.12802*/12803cb.cb_sources = ZPROP_SRC_ALL;12804cb.cb_columns[0] = GET_COL_NAME;12805cb.cb_columns[1] = GET_COL_PROPERTY;12806cb.cb_columns[2] = GET_COL_VALUE;12807cb.cb_columns[3] = GET_COL_SOURCE;12808cb.cb_type = ZFS_TYPE_POOL;12809cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;12810current_prop_type = cb.cb_type;1281112812struct option long_options[] = {12813{"json", no_argument, NULL, 'j'},12814{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},12815{"json-pool-key-guid", no_argument, NULL,12816ZPOOL_OPTION_POOL_KEY_GUID},12817{0, 0, 0, 0}12818};1281912820/* check options */12821while ((c = getopt_long(argc, argv, ":jHpo:", long_options,12822NULL)) != -1) {12823switch (c) {12824case 'p':12825cb.cb_literal = B_TRUE;12826break;12827case 'H':12828cb.cb_scripted = B_TRUE;12829break;12830case 'j':12831cb.cb_json = B_TRUE;12832cb.cb_jsobj = zpool_json_schema(0, 1);12833data = fnvlist_alloc();12834break;12835case ZPOOL_OPTION_POOL_KEY_GUID:12836cb.cb_json_pool_key_guid = B_TRUE;12837break;12838case ZPOOL_OPTION_JSON_NUMS_AS_INT:12839cb.cb_json_as_int = B_TRUE;12840cb.cb_literal = B_TRUE;12841break;12842case 'o':12843memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));12844i = 0;1284512846for (char *tok; (tok = strsep(&optarg, ",")); ) {12847static const char *const col_opts[] =12848{ "name", "property", "value", "source",12849"all" };12850static const zfs_get_column_t col_cols[] =12851{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,12852GET_COL_SOURCE };1285312854if (i == ZFS_GET_NCOLS - 1) {12855(void) fprintf(stderr, gettext("too "12856"many fields given to -o "12857"option\n"));12858usage(B_FALSE);12859}1286012861for (c = 0; c < ARRAY_SIZE(col_opts); ++c)12862if (strcmp(tok, col_opts[c]) == 0)12863goto found;1286412865(void) fprintf(stderr,12866gettext("invalid column name '%s'\n"), tok);12867usage(B_FALSE);1286812869found:12870if (c >= 4) {12871if (i > 0) {12872(void) fprintf(stderr,12873gettext("\"all\" conflicts "12874"with specific fields "12875"given to -o option\n"));12876usage(B_FALSE);12877}1287812879memcpy(cb.cb_columns, col_cols,12880sizeof (col_cols));12881i = ZFS_GET_NCOLS - 1;12882} else12883cb.cb_columns[i++] = col_cols[c];12884}12885break;12886case '?':12887(void) fprintf(stderr, gettext("invalid option '%c'\n"),12888optopt);12889usage(B_FALSE);12890}12891}1289212893argc -= optind;12894argv += optind;1289512896if (!cb.cb_json && cb.cb_json_as_int) {12897(void) fprintf(stderr, gettext("'--json-int' only works with"12898" '-j' option\n"));12899usage(B_FALSE);12900}1290112902if (!cb.cb_json && cb.cb_json_pool_key_guid) {12903(void) fprintf(stderr, gettext("'json-pool-key-guid' only"12904" works with '-j' option\n"));12905usage(B_FALSE);12906}1290712908if (argc < 1) {12909(void) fprintf(stderr, gettext("missing property "12910"argument\n"));12911usage(B_FALSE);12912}1291312914/* Properties list is needed later by zprop_get_list() */12915propstr = argv[0];1291612917argc--;12918argv++;1291912920if (argc == 0) {12921/* No args, so just print the defaults. */12922} else if (are_all_pools(argc, argv)) {12923/* All the args are pool names */12924} else if (are_all_pools(1, argv)) {12925/* The first arg is a pool name */12926if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||12927(argc == 2 && strcmp(argv[1], "root") == 0) ||12928are_vdevs_in_pool(argc - 1, argv + 1, argv[0],12929&cb.cb_vdevs)) {1293012931if (strcmp(argv[1], "root") == 0)12932vdev = strdup("root-0");1293312934/* ... and the rest are vdev names */12935if (vdev == NULL)12936cb.cb_vdevs.cb_names = argv + 1;12937else12938cb.cb_vdevs.cb_names = &vdev;1293912940cb.cb_vdevs.cb_names_count = argc - 1;12941cb.cb_type = ZFS_TYPE_VDEV;12942argc = 1; /* One pool to process */12943} else {12944if (cb.cb_json) {12945nvlist_free(cb.cb_jsobj);12946nvlist_free(data);12947}12948fprintf(stderr, gettext("Expected a list of vdevs in"12949" \"%s\", but got:\n"), argv[0]);12950error_list_unresolved_vdevs(argc - 1, argv + 1,12951argv[0], &cb.cb_vdevs);12952fprintf(stderr, "\n");12953usage(B_FALSE);12954}12955} else {12956if (cb.cb_json) {12957nvlist_free(cb.cb_jsobj);12958nvlist_free(data);12959}12960/*12961* The first arg isn't the name of a valid pool.12962*/12963fprintf(stderr, gettext("Cannot get properties of %s: "12964"no such pool available.\n"), argv[0]);12965return (1);12966}1296712968if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,12969cb.cb_type) != 0) {12970/* Use correct list of valid properties (pool or vdev) */12971current_prop_type = cb.cb_type;12972usage(B_FALSE);12973}1297412975if (cb.cb_proplist != NULL) {12976fake_name.pl_prop = ZPOOL_PROP_NAME;12977fake_name.pl_width = strlen(gettext("NAME"));12978fake_name.pl_next = cb.cb_proplist;12979cb.cb_proplist = &fake_name;12980}1298112982if (cb.cb_json) {12983if (cb.cb_type == ZFS_TYPE_VDEV)12984fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);12985else12986fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);12987fnvlist_free(data);12988}1298912990ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,12991cb.cb_literal, get_callback, &cb);1299212993if (ret == 0 && cb.cb_json)12994zcmd_print_json(cb.cb_jsobj);12995else if (ret != 0 && cb.cb_json)12996nvlist_free(cb.cb_jsobj);1299712998if (cb.cb_proplist == &fake_name)12999zprop_free_list(fake_name.pl_next);13000else13001zprop_free_list(cb.cb_proplist);1300213003if (vdev != NULL)13004free(vdev);1300513006return (ret);13007}1300813009typedef struct set_cbdata {13010char *cb_propname;13011char *cb_value;13012zfs_type_t cb_type;13013vdev_cbdata_t cb_vdevs;13014boolean_t cb_any_successful;13015} set_cbdata_t;1301613017static int13018set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)13019{13020int error;1302113022/* Check if we have out-of-bounds features */13023if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {13024boolean_t features[SPA_FEATURES];13025if (zpool_do_load_compat(cb->cb_value, features) !=13026ZPOOL_COMPATIBILITY_OK)13027return (-1);1302813029nvlist_t *enabled = zpool_get_features(zhp);13030spa_feature_t i;13031for (i = 0; i < SPA_FEATURES; i++) {13032const char *fguid = spa_feature_table[i].fi_guid;13033if (nvlist_exists(enabled, fguid) && !features[i])13034break;13035}13036if (i < SPA_FEATURES)13037(void) fprintf(stderr, gettext("Warning: one or "13038"more features already enabled on pool '%s'\n"13039"are not present in this compatibility set.\n"),13040zpool_get_name(zhp));13041}1304213043/* if we're setting a feature, check it's in compatibility set */13044if (zpool_prop_feature(cb->cb_propname) &&13045strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {13046char *fname = strchr(cb->cb_propname, '@') + 1;13047spa_feature_t f;1304813049if (zfeature_lookup_name(fname, &f) == 0) {13050char compat[ZFS_MAXPROPLEN];13051if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,13052compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)13053compat[0] = '\0';1305413055boolean_t features[SPA_FEATURES];13056if (zpool_do_load_compat(compat, features) !=13057ZPOOL_COMPATIBILITY_OK) {13058(void) fprintf(stderr, gettext("Error: "13059"cannot enable feature '%s' on pool '%s'\n"13060"because the pool's 'compatibility' "13061"property cannot be parsed.\n"),13062fname, zpool_get_name(zhp));13063return (-1);13064}1306513066if (!features[f]) {13067(void) fprintf(stderr, gettext("Error: "13068"cannot enable feature '%s' on pool '%s'\n"13069"as it is not specified in this pool's "13070"current compatibility set.\n"13071"Consider setting 'compatibility' to a "13072"less restrictive set, or to 'off'.\n"),13073fname, zpool_get_name(zhp));13074return (-1);13075}13076}13077}1307813079error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);1308013081return (error);13082}1308313084static int13085set_callback(zpool_handle_t *zhp, void *data)13086{13087int error;13088set_cbdata_t *cb = (set_cbdata_t *)data;1308913090if (cb->cb_type == ZFS_TYPE_VDEV) {13091error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,13092cb->cb_propname, cb->cb_value);13093} else {13094assert(cb->cb_type == ZFS_TYPE_POOL);13095error = set_pool_callback(zhp, cb);13096}1309713098cb->cb_any_successful = !error;13099return (error);13100}1310113102int13103zpool_do_set(int argc, char **argv)13104{13105set_cbdata_t cb = { 0 };13106int error;13107char *vdev = NULL;1310813109current_prop_type = ZFS_TYPE_POOL;13110if (argc > 1 && argv[1][0] == '-') {13111(void) fprintf(stderr, gettext("invalid option '%c'\n"),13112argv[1][1]);13113usage(B_FALSE);13114}1311513116if (argc < 2) {13117(void) fprintf(stderr, gettext("missing property=value "13118"argument\n"));13119usage(B_FALSE);13120}1312113122if (argc < 3) {13123(void) fprintf(stderr, gettext("missing pool name\n"));13124usage(B_FALSE);13125}1312613127if (argc > 4) {13128(void) fprintf(stderr, gettext("too many pool names\n"));13129usage(B_FALSE);13130}1313113132cb.cb_propname = argv[1];13133cb.cb_type = ZFS_TYPE_POOL;13134cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;13135cb.cb_value = strchr(cb.cb_propname, '=');13136if (cb.cb_value == NULL) {13137(void) fprintf(stderr, gettext("missing value in "13138"property=value argument\n"));13139usage(B_FALSE);13140}1314113142*(cb.cb_value) = '\0';13143cb.cb_value++;13144argc -= 2;13145argv += 2;1314613147/* argv[0] is pool name */13148if (!is_pool(argv[0])) {13149(void) fprintf(stderr,13150gettext("cannot open '%s': is not a pool\n"), argv[0]);13151return (EINVAL);13152}1315313154/* argv[1], when supplied, is vdev name */13155if (argc == 2) {1315613157if (strcmp(argv[1], "root") == 0)13158vdev = strdup("root-0");13159else13160vdev = strdup(argv[1]);1316113162if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {13163(void) fprintf(stderr, gettext(13164"cannot find '%s' in '%s': device not in pool\n"),13165vdev, argv[0]);13166free(vdev);13167return (EINVAL);13168}13169cb.cb_vdevs.cb_names = &vdev;13170cb.cb_vdevs.cb_names_count = 1;13171cb.cb_type = ZFS_TYPE_VDEV;13172}1317313174error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,13175B_FALSE, set_callback, &cb);1317613177if (vdev != NULL)13178free(vdev);1317913180return (error);13181}1318213183/* Add up the total number of bytes left to initialize/trim across all vdevs */13184static uint64_t13185vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)13186{13187uint64_t bytes_remaining;13188nvlist_t **child;13189uint_t c, children;13190vdev_stat_t *vs;1319113192assert(activity == ZPOOL_WAIT_INITIALIZE ||13193activity == ZPOOL_WAIT_TRIM);1319413195verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,13196(uint64_t **)&vs, &c) == 0);1319713198if (activity == ZPOOL_WAIT_INITIALIZE &&13199vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)13200bytes_remaining = vs->vs_initialize_bytes_est -13201vs->vs_initialize_bytes_done;13202else if (activity == ZPOOL_WAIT_TRIM &&13203vs->vs_trim_state == VDEV_TRIM_ACTIVE)13204bytes_remaining = vs->vs_trim_bytes_est -13205vs->vs_trim_bytes_done;13206else13207bytes_remaining = 0;1320813209if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,13210&child, &children) != 0)13211children = 0;1321213213for (c = 0; c < children; c++)13214bytes_remaining += vdev_activity_remaining(child[c], activity);1321513216return (bytes_remaining);13217}1321813219/* Add up the total number of bytes left to rebuild across top-level vdevs */13220static uint64_t13221vdev_activity_top_remaining(nvlist_t *nv)13222{13223uint64_t bytes_remaining = 0;13224nvlist_t **child;13225uint_t children;13226int error;1322713228if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,13229&child, &children) != 0)13230children = 0;1323113232for (uint_t c = 0; c < children; c++) {13233vdev_rebuild_stat_t *vrs;13234uint_t i;1323513236error = nvlist_lookup_uint64_array(child[c],13237ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);13238if (error == 0) {13239if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {13240bytes_remaining += (vrs->vrs_bytes_est -13241vrs->vrs_bytes_rebuilt);13242}13243}13244}1324513246return (bytes_remaining);13247}1324813249/* Whether any vdevs are 'spare' or 'replacing' vdevs */13250static boolean_t13251vdev_any_spare_replacing(nvlist_t *nv)13252{13253nvlist_t **child;13254uint_t c, children;13255const char *vdev_type;1325613257(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);1325813259if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||13260strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||13261strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {13262return (B_TRUE);13263}1326413265if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,13266&child, &children) != 0)13267children = 0;1326813269for (c = 0; c < children; c++) {13270if (vdev_any_spare_replacing(child[c]))13271return (B_TRUE);13272}1327313274return (B_FALSE);13275}1327613277typedef struct wait_data {13278char *wd_poolname;13279boolean_t wd_scripted;13280boolean_t wd_exact;13281boolean_t wd_headers_once;13282boolean_t wd_should_exit;13283/* Which activities to wait for */13284boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];13285float wd_interval;13286pthread_cond_t wd_cv;13287pthread_mutex_t wd_mutex;13288} wait_data_t;1328913290/*13291* Print to stdout a single line, containing one column for each activity that13292* we are waiting for specifying how many bytes of work are left for that13293* activity.13294*/13295static void13296print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)13297{13298nvlist_t *config, *nvroot;13299uint_t c;13300int i;13301pool_checkpoint_stat_t *pcs = NULL;13302pool_scan_stat_t *pss = NULL;13303pool_removal_stat_t *prs = NULL;13304pool_raidz_expand_stat_t *pres = NULL;13305const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",13306"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};13307int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];1330813309/* Calculate the width of each column */13310for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {13311/*13312* Make sure we have enough space in the col for pretty-printed13313* numbers and for the column header, and then leave a couple13314* spaces between cols for readability.13315*/13316col_widths[i] = MAX(strlen(headers[i]), 6) + 2;13317}1331813319if (timestamp_fmt != NODATE)13320print_timestamp(timestamp_fmt);1332113322/* Print header if appropriate */13323int term_height = terminal_height();13324boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&13325row % (term_height-1) == 0);13326if (!wd->wd_scripted && (row == 0 || reprint_header)) {13327for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {13328if (wd->wd_enabled[i])13329(void) printf("%*s", col_widths[i], headers[i]);13330}13331(void) fputc('\n', stdout);13332}1333313334/* Bytes of work remaining in each activity */13335int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};1333613337bytes_rem[ZPOOL_WAIT_FREE] =13338zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);1333913340config = zpool_get_config(zhp, NULL);13341nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);1334213343(void) nvlist_lookup_uint64_array(nvroot,13344ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);13345if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)13346bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;1334713348(void) nvlist_lookup_uint64_array(nvroot,13349ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);13350if (prs != NULL && prs->prs_state == DSS_SCANNING)13351bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -13352prs->prs_copied;1335313354(void) nvlist_lookup_uint64_array(nvroot,13355ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);13356if (pss != NULL && pss->pss_state == DSS_SCANNING &&13357pss->pss_pass_scrub_pause == 0) {13358int64_t rem = pss->pss_to_examine - pss->pss_issued;13359if (pss->pss_func == POOL_SCAN_SCRUB)13360bytes_rem[ZPOOL_WAIT_SCRUB] = rem;13361else13362bytes_rem[ZPOOL_WAIT_RESILVER] = rem;13363} else if (check_rebuilding(nvroot, NULL)) {13364bytes_rem[ZPOOL_WAIT_RESILVER] =13365vdev_activity_top_remaining(nvroot);13366}1336713368(void) nvlist_lookup_uint64_array(nvroot,13369ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);13370if (pres != NULL && pres->pres_state == DSS_SCANNING) {13371int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;13372bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;13373}1337413375bytes_rem[ZPOOL_WAIT_INITIALIZE] =13376vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);13377bytes_rem[ZPOOL_WAIT_TRIM] =13378vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);1337913380/*13381* A replace finishes after resilvering finishes, so the amount of work13382* left for a replace is the same as for resilvering.13383*13384* It isn't quite correct to say that if we have any 'spare' or13385* 'replacing' vdevs and a resilver is happening, then a replace is in13386* progress, like we do here. When a hot spare is used, the faulted vdev13387* is not removed after the hot spare is resilvered, so parent 'spare'13388* vdev is not removed either. So we could have a 'spare' vdev, but be13389* resilvering for a different reason. However, we use it as a heuristic13390* because we don't have access to the DTLs, which could tell us whether13391* or not we have really finished resilvering a hot spare.13392*/13393if (vdev_any_spare_replacing(nvroot))13394bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];1339513396for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {13397char buf[64];13398if (!wd->wd_enabled[i])13399continue;1340013401if (wd->wd_exact) {13402(void) snprintf(buf, sizeof (buf), "%" PRIi64,13403bytes_rem[i]);13404} else {13405zfs_nicenum(bytes_rem[i], buf, sizeof (buf));13406}1340713408if (wd->wd_scripted)13409(void) printf(i == 0 ? "%s" : "\t%s", buf);13410else13411(void) printf(" %*s", col_widths[i] - 1, buf);13412}13413(void) printf("\n");13414(void) fflush(stdout);13415}1341613417static void *13418wait_status_thread(void *arg)13419{13420wait_data_t *wd = (wait_data_t *)arg;13421zpool_handle_t *zhp;1342213423if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)13424return (void *)(1);1342513426for (int row = 0; ; row++) {13427boolean_t missing;13428struct timespec timeout;13429int ret = 0;13430(void) clock_gettime(CLOCK_REALTIME, &timeout);1343113432if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||13433zpool_props_refresh(zhp) != 0) {13434zpool_close(zhp);13435return (void *)(uintptr_t)(missing ? 0 : 1);13436}1343713438print_wait_status_row(wd, zhp, row);1343913440timeout.tv_sec += floor(wd->wd_interval);13441long nanos = timeout.tv_nsec +13442(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;13443if (nanos >= NANOSEC) {13444timeout.tv_sec++;13445timeout.tv_nsec = nanos - NANOSEC;13446} else {13447timeout.tv_nsec = nanos;13448}13449(void) pthread_mutex_lock(&wd->wd_mutex);13450if (!wd->wd_should_exit)13451ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,13452&timeout);13453(void) pthread_mutex_unlock(&wd->wd_mutex);13454if (ret == 0) {13455break; /* signaled by main thread */13456} else if (ret != ETIMEDOUT) {13457(void) fprintf(stderr, gettext("pthread_cond_timedwait "13458"failed: %s\n"), strerror(ret));13459zpool_close(zhp);13460return (void *)(uintptr_t)(1);13461}13462}1346313464zpool_close(zhp);13465return (void *)(0);13466}1346713468int13469zpool_do_wait(int argc, char **argv)13470{13471boolean_t verbose = B_FALSE;13472int c, i;13473unsigned long count;13474pthread_t status_thr;13475int error = 0;13476zpool_handle_t *zhp;1347713478wait_data_t wd;13479wd.wd_scripted = B_FALSE;13480wd.wd_exact = B_FALSE;13481wd.wd_headers_once = B_FALSE;13482wd.wd_should_exit = B_FALSE;1348313484(void) pthread_mutex_init(&wd.wd_mutex, NULL);13485(void) pthread_cond_init(&wd.wd_cv, NULL);1348613487/* By default, wait for all types of activity. */13488for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)13489wd.wd_enabled[i] = B_TRUE;1349013491while ((c = getopt(argc, argv, "HpT:t:")) != -1) {13492switch (c) {13493case 'H':13494wd.wd_scripted = B_TRUE;13495break;13496case 'n':13497wd.wd_headers_once = B_TRUE;13498break;13499case 'p':13500wd.wd_exact = B_TRUE;13501break;13502case 'T':13503get_timestamp_arg(*optarg);13504break;13505case 't':13506/* Reset activities array */13507memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));1350813509for (char *tok; (tok = strsep(&optarg, ",")); ) {13510static const char *const col_opts[] = {13511"discard", "free", "initialize", "replace",13512"remove", "resilver", "scrub", "trim",13513"raidz_expand" };1351413515for (i = 0; i < ARRAY_SIZE(col_opts); ++i)13516if (strcmp(tok, col_opts[i]) == 0) {13517wd.wd_enabled[i] = B_TRUE;13518goto found;13519}1352013521(void) fprintf(stderr,13522gettext("invalid activity '%s'\n"), tok);13523usage(B_FALSE);13524found:;13525}13526break;13527case '?':13528(void) fprintf(stderr, gettext("invalid option '%c'\n"),13529optopt);13530usage(B_FALSE);13531}13532}1353313534argc -= optind;13535argv += optind;1353613537get_interval_count(&argc, argv, &wd.wd_interval, &count);13538if (count != 0) {13539/* This subcmd only accepts an interval, not a count */13540(void) fprintf(stderr, gettext("too many arguments\n"));13541usage(B_FALSE);13542}1354313544if (wd.wd_interval != 0)13545verbose = B_TRUE;1354613547if (argc < 1) {13548(void) fprintf(stderr, gettext("missing 'pool' argument\n"));13549usage(B_FALSE);13550}13551if (argc > 1) {13552(void) fprintf(stderr, gettext("too many arguments\n"));13553usage(B_FALSE);13554}1355513556wd.wd_poolname = argv[0];1355713558if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)13559return (1);1356013561if (verbose) {13562/*13563* We use a separate thread for printing status updates because13564* the main thread will call lzc_wait(), which blocks as long13565* as an activity is in progress, which can be a long time.13566*/13567if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)13568!= 0) {13569(void) fprintf(stderr, gettext("failed to create status"13570"thread: %s\n"), strerror(errno));13571zpool_close(zhp);13572return (1);13573}13574}1357513576/*13577* Loop over all activities that we are supposed to wait for until none13578* of them are in progress. Note that this means we can end up waiting13579* for more activities to complete than just those that were in progress13580* when we began waiting; if an activity we are interested in begins13581* while we are waiting for another activity, we will wait for both to13582* complete before exiting.13583*/13584for (;;) {13585boolean_t missing = B_FALSE;13586boolean_t any_waited = B_FALSE;1358713588for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {13589boolean_t waited;1359013591if (!wd.wd_enabled[i])13592continue;1359313594error = zpool_wait_status(zhp, i, &missing, &waited);13595if (error != 0 || missing)13596break;1359713598any_waited = (any_waited || waited);13599}1360013601if (error != 0 || missing || !any_waited)13602break;13603}1360413605zpool_close(zhp);1360613607if (verbose) {13608uintptr_t status;13609(void) pthread_mutex_lock(&wd.wd_mutex);13610wd.wd_should_exit = B_TRUE;13611(void) pthread_cond_signal(&wd.wd_cv);13612(void) pthread_mutex_unlock(&wd.wd_mutex);13613(void) pthread_join(status_thr, (void *)&status);13614if (status != 0)13615error = status;13616}1361713618(void) pthread_mutex_destroy(&wd.wd_mutex);13619(void) pthread_cond_destroy(&wd.wd_cv);13620return (error);13621}1362213623/*13624* zpool ddtprune -d|-p <amount> <pool>13625*13626* -d <days> Prune entries <days> old and older13627* -p <percent> Prune <percent> amount of entries13628*13629* Prune single reference entries from DDT to satisfy the amount specified.13630*/13631int13632zpool_do_ddt_prune(int argc, char **argv)13633{13634zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;13635uint64_t amount = 0;13636zpool_handle_t *zhp;13637char *endptr;13638int c;1363913640while ((c = getopt(argc, argv, "d:p:")) != -1) {13641switch (c) {13642case 'd':13643if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {13644(void) fprintf(stderr, gettext("-d cannot be "13645"combined with -p option\n"));13646usage(B_FALSE);13647}13648errno = 0;13649amount = strtoull(optarg, &endptr, 0);13650if (errno != 0 || *endptr != '\0' || amount == 0) {13651(void) fprintf(stderr,13652gettext("invalid days value\n"));13653usage(B_FALSE);13654}13655amount *= 86400; /* convert days to seconds */13656unit = ZPOOL_DDT_PRUNE_AGE;13657break;13658case 'p':13659if (unit == ZPOOL_DDT_PRUNE_AGE) {13660(void) fprintf(stderr, gettext("-p cannot be "13661"combined with -d option\n"));13662usage(B_FALSE);13663}13664errno = 0;13665amount = strtoull(optarg, &endptr, 0);13666if (errno != 0 || *endptr != '\0' ||13667amount == 0 || amount > 100) {13668(void) fprintf(stderr,13669gettext("invalid percentage value\n"));13670usage(B_FALSE);13671}13672unit = ZPOOL_DDT_PRUNE_PERCENTAGE;13673break;13674case '?':13675(void) fprintf(stderr, gettext("invalid option '%c'\n"),13676optopt);13677usage(B_FALSE);13678}13679}13680argc -= optind;13681argv += optind;1368213683if (unit == ZPOOL_DDT_PRUNE_NONE) {13684(void) fprintf(stderr,13685gettext("missing amount option (-d|-p <value>)\n"));13686usage(B_FALSE);13687} else if (argc < 1) {13688(void) fprintf(stderr, gettext("missing pool argument\n"));13689usage(B_FALSE);13690} else if (argc > 1) {13691(void) fprintf(stderr, gettext("too many arguments\n"));13692usage(B_FALSE);13693}13694zhp = zpool_open(g_zfs, argv[0]);13695if (zhp == NULL)13696return (-1);1369713698int error = zpool_ddt_prune(zhp, unit, amount);1369913700zpool_close(zhp);1370113702return (error);13703}1370413705static int13706find_command_idx(const char *command, int *idx)13707{13708for (int i = 0; i < NCOMMAND; ++i) {13709if (command_table[i].name == NULL)13710continue;1371113712if (strcmp(command, command_table[i].name) == 0) {13713*idx = i;13714return (0);13715}13716}13717return (1);13718}1371913720/*13721* Display version message13722*/13723static int13724zpool_do_version(int argc, char **argv)13725{13726int c;13727nvlist_t *jsobj = NULL, *zfs_ver = NULL;13728boolean_t json = B_FALSE;1372913730struct option long_options[] = {13731{"json", no_argument, NULL, 'j'},13732};1373313734while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {13735switch (c) {13736case 'j':13737json = B_TRUE;13738jsobj = zpool_json_schema(0, 1);13739break;13740case '?':13741(void) fprintf(stderr, gettext("invalid option '%c'\n"),13742optopt);13743usage(B_FALSE);13744}13745}1374613747argc -= optind;13748if (argc != 0) {13749(void) fprintf(stderr, "too many arguments\n");13750usage(B_FALSE);13751}1375213753if (json) {13754zfs_ver = zfs_version_nvlist();13755if (zfs_ver) {13756fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);13757zcmd_print_json(jsobj);13758fnvlist_free(zfs_ver);13759return (0);13760} else13761return (-1);13762} else13763return (zfs_version_print() != 0);13764}1376513766/* Display documentation */13767static int13768zpool_do_help(int argc, char **argv)13769{13770char page[MAXNAMELEN];13771if (argc < 3 || strcmp(argv[2], "zpool") == 0)13772(void) strcpy(page, "zpool");13773else if (strcmp(argv[2], "concepts") == 0 ||13774strcmp(argv[2], "props") == 0)13775(void) snprintf(page, sizeof (page), "zpool%s", argv[2]);13776else13777(void) snprintf(page, sizeof (page), "zpool-%s", argv[2]);1377813779(void) execlp("man", "man", page, NULL);1378013781fprintf(stderr, "couldn't run man program: %s", strerror(errno));13782return (-1);13783}1378413785/*13786* Do zpool_load_compat() and print error message on failure13787*/13788static zpool_compat_status_t13789zpool_do_load_compat(const char *compat, boolean_t *list)13790{13791char report[1024];1379213793zpool_compat_status_t ret;1379413795ret = zpool_load_compat(compat, list, report, 1024);13796switch (ret) {1379713798case ZPOOL_COMPATIBILITY_OK:13799break;1380013801case ZPOOL_COMPATIBILITY_NOFILES:13802case ZPOOL_COMPATIBILITY_BADFILE:13803case ZPOOL_COMPATIBILITY_BADTOKEN:13804(void) fprintf(stderr, "Error: %s\n", report);13805break;1380613807case ZPOOL_COMPATIBILITY_WARNTOKEN:13808(void) fprintf(stderr, "Warning: %s\n", report);13809ret = ZPOOL_COMPATIBILITY_OK;13810break;13811}13812return (ret);13813}1381413815int13816main(int argc, char **argv)13817{13818int ret = 0;13819int i = 0;13820char *cmdname;13821char **newargv;1382213823(void) setlocale(LC_ALL, "");13824(void) setlocale(LC_NUMERIC, "C");13825(void) textdomain(TEXT_DOMAIN);13826srand(time(NULL));1382713828opterr = 0;1382913830/*13831* Make sure the user has specified some command.13832*/13833if (argc < 2) {13834(void) fprintf(stderr, gettext("missing command\n"));13835usage(B_FALSE);13836}1383713838cmdname = argv[1];1383913840/*13841* Special case '-?'13842*/13843if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)13844usage(B_TRUE);1384513846/*13847* Special case '-V|--version'13848*/13849if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))13850return (zfs_version_print() != 0);1385113852/*13853* Special case 'help'13854*/13855if (strcmp(cmdname, "help") == 0)13856return (zpool_do_help(argc, argv));1385713858if ((g_zfs = libzfs_init()) == NULL) {13859(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));13860return (1);13861}1386213863libzfs_print_on_error(g_zfs, B_TRUE);1386413865zfs_save_arguments(argc, argv, history_str, sizeof (history_str));1386613867/*13868* Many commands modify input strings for string parsing reasons.13869* We create a copy to protect the original argv.13870*/13871newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));13872for (i = 0; i < argc; i++)13873newargv[i] = strdup(argv[i]);13874newargv[argc] = NULL;1387513876/*13877* Run the appropriate command.13878*/13879if (find_command_idx(cmdname, &i) == 0) {13880current_command = &command_table[i];13881ret = command_table[i].func(argc - 1, newargv + 1);13882} else if (strchr(cmdname, '=')) {13883verify(find_command_idx("set", &i) == 0);13884current_command = &command_table[i];13885ret = command_table[i].func(argc, newargv);13886} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {13887/*13888* 'freeze' is a vile debugging abomination, so we treat13889* it as such.13890*/13891zfs_cmd_t zc = {"\0"};1389213893(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));13894ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);13895if (ret != 0) {13896(void) fprintf(stderr,13897gettext("failed to freeze pool: %d\n"), errno);13898ret = 1;13899}1390013901log_history = 0;13902} else {13903(void) fprintf(stderr, gettext("unrecognized "13904"command '%s'\n"), cmdname);13905usage(B_FALSE);13906}1390713908for (i = 0; i < argc; i++)13909free(newargv[i]);13910free(newargv);1391113912if (ret == 0 && log_history)13913(void) zpool_log_history(g_zfs, history_str);1391413915libzfs_fini(g_zfs);1391613917/*13918* The 'ZFS_ABORT' environment variable causes us to dump core on exit13919* for the purposes of running ::findleaks.13920*/13921if (getenv("ZFS_ABORT") != NULL) {13922(void) printf("dumping core by request\n");13923abort();13924}1392513926return (ret);13927}139281392913930