Path: blob/main/sys/contrib/openzfs/cmd/zpool/zpool_main.c
48288 views
// SPDX-License-Identifier: CDDL-1.01/*2* CDDL HEADER START3*4* The contents of this file are subject to the terms of the5* Common Development and Distribution License (the "License").6* You may not use this file except in compliance with the License.7*8* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE9* or https://opensource.org/licenses/CDDL-1.0.10* See the License for the specific language governing permissions11* and limitations under the License.12*13* When distributing Covered Code, include this CDDL HEADER in each14* file and include the License file at usr/src/OPENSOLARIS.LICENSE.15* If applicable, add the following below this CDDL HEADER, with the16* fields enclosed by brackets "[]" replaced with your own identifying17* information: Portions Copyright [yyyy] [name of copyright owner]18*19* CDDL HEADER END20*/2122/*23* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.24* Copyright 2011 Nexenta Systems, Inc. All rights reserved.25* Copyright (c) 2011, 2024 by Delphix. All rights reserved.26* Copyright (c) 2012 by Frederik Wessels. All rights reserved.27* Copyright (c) 2012 by Cyril Plisko. All rights reserved.28* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.29* Copyright 2016 Igor Kozhukhov <[email protected]>.30* Copyright (c) 2017 Datto Inc.31* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.32* Copyright (c) 2017, Intel Corporation.33* Copyright (c) 2019, loli10K <[email protected]>34* Copyright (c) 2021, Colm Buckley <[email protected]>35* Copyright (c) 2021, 2023, 2025, Klara, Inc.36* Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.37*/3839#include <assert.h>40#include <ctype.h>41#include <dirent.h>42#include <errno.h>43#include <fcntl.h>44#include <getopt.h>45#include <libgen.h>46#include <libintl.h>47#include <libuutil.h>48#include <locale.h>49#include <pthread.h>50#include <stdio.h>51#include <stdlib.h>52#include <string.h>53#include <thread_pool.h>54#include <time.h>55#include <unistd.h>56#include <pwd.h>57#include <zone.h>58#include <sys/wait.h>59#include <zfs_prop.h>60#include <sys/fs/zfs.h>61#include <sys/stat.h>62#include <sys/systeminfo.h>63#include <sys/fm/fs/zfs.h>64#include <sys/fm/util.h>65#include <sys/fm/protocol.h>66#include <sys/zfs_ioctl.h>67#include <sys/mount.h>68#include <sys/sysmacros.h>69#include <string.h>70#include <math.h>7172#include <libzfs.h>73#include <libzutil.h>7475#include "zpool_util.h"76#include "zfs_comutil.h"77#include "zfeature_common.h"78#include "zfs_valstr.h"7980#include "statcommon.h"8182libzfs_handle_t *g_zfs;8384static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */8586static int zpool_do_create(int, char **);87static int zpool_do_destroy(int, char **);8889static int zpool_do_add(int, char **);90static int zpool_do_remove(int, char **);91static int zpool_do_labelclear(int, char **);9293static int zpool_do_checkpoint(int, char **);94static int zpool_do_prefetch(int, char **);9596static int zpool_do_list(int, char **);97static int zpool_do_iostat(int, char **);98static int zpool_do_status(int, char **);99100static int zpool_do_online(int, char **);101static int zpool_do_offline(int, char **);102static int zpool_do_clear(int, char **);103static int zpool_do_reopen(int, char **);104105static int zpool_do_reguid(int, char **);106107static int zpool_do_attach(int, char **);108static int zpool_do_detach(int, char **);109static int zpool_do_replace(int, char **);110static int zpool_do_split(int, char **);111112static int zpool_do_initialize(int, char **);113static int zpool_do_scrub(int, char **);114static int zpool_do_resilver(int, char **);115static int zpool_do_trim(int, char **);116117static int zpool_do_import(int, char **);118static int zpool_do_export(int, char **);119120static int zpool_do_upgrade(int, char **);121122static int zpool_do_history(int, char **);123static int zpool_do_events(int, char **);124125static int zpool_do_get(int, char **);126static int zpool_do_set(int, char **);127128static int zpool_do_sync(int, char **);129130static int zpool_do_version(int, char **);131132static int zpool_do_wait(int, char **);133134static int zpool_do_ddt_prune(int, char **);135136static int zpool_do_help(int argc, char **argv);137138static zpool_compat_status_t zpool_do_load_compat(139const char *, boolean_t *);140141enum zpool_options {142ZPOOL_OPTION_POWER = 1024,143ZPOOL_OPTION_ALLOW_INUSE,144ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,145ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,146ZPOOL_OPTION_POOL_KEY_GUID,147ZPOOL_OPTION_JSON_NUMS_AS_INT,148ZPOOL_OPTION_JSON_FLAT_VDEVS149};150151/*152* These libumem hooks provide a reasonable set of defaults for the allocator's153* debugging facilities.154*/155156#ifdef DEBUG157const char *158_umem_debug_init(void)159{160return ("default,verbose"); /* $UMEM_DEBUG setting */161}162163const char *164_umem_logging_init(void)165{166return ("fail,contents"); /* $UMEM_LOGGING setting */167}168#endif169170typedef enum {171HELP_ADD,172HELP_ATTACH,173HELP_CLEAR,174HELP_CREATE,175HELP_CHECKPOINT,176HELP_DDT_PRUNE,177HELP_DESTROY,178HELP_DETACH,179HELP_EXPORT,180HELP_HISTORY,181HELP_IMPORT,182HELP_IOSTAT,183HELP_LABELCLEAR,184HELP_LIST,185HELP_OFFLINE,186HELP_ONLINE,187HELP_PREFETCH,188HELP_REPLACE,189HELP_REMOVE,190HELP_INITIALIZE,191HELP_SCRUB,192HELP_RESILVER,193HELP_TRIM,194HELP_STATUS,195HELP_UPGRADE,196HELP_EVENTS,197HELP_GET,198HELP_SET,199HELP_SPLIT,200HELP_SYNC,201HELP_REGUID,202HELP_REOPEN,203HELP_VERSION,204HELP_WAIT205} zpool_help_t;206207208/*209* Flags for stats to display with "zpool iostats"210*/211enum iostat_type {212IOS_DEFAULT = 0,213IOS_LATENCY = 1,214IOS_QUEUES = 2,215IOS_L_HISTO = 3,216IOS_RQ_HISTO = 4,217IOS_COUNT, /* always last element */218};219220/* iostat_type entries as bitmasks */221#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)222#define IOS_LATENCY_M (1ULL << IOS_LATENCY)223#define IOS_QUEUES_M (1ULL << IOS_QUEUES)224#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)225#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)226227/* Mask of all the histo bits */228#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)229230/*231* Lookup table for iostat flags to nvlist names. Basically a list232* of all the nvlists a flag requires. Also specifies the order in233* which data gets printed in zpool iostat.234*/235static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {236[IOS_L_HISTO] = {237ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,238ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,239ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,240ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,241ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,242ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,243ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,244ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,245ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,246ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,247ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,248NULL},249[IOS_LATENCY] = {250ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,251ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,252ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,253ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,254ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,255ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,256NULL},257[IOS_QUEUES] = {258ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,259ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,260ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,261ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,262ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,263ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,264ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,265NULL},266[IOS_RQ_HISTO] = {267ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,268ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,269ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,270ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,271ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,272ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,273ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,274ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,275ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,276ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,277ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,278ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,279ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,280ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,281NULL},282};283284static const char *pool_scan_func_str[] = {285"NONE",286"SCRUB",287"RESILVER",288"ERRORSCRUB"289};290291static const char *pool_scan_state_str[] = {292"NONE",293"SCANNING",294"FINISHED",295"CANCELED",296"ERRORSCRUBBING"297};298299static const char *vdev_rebuild_state_str[] = {300"NONE",301"ACTIVE",302"CANCELED",303"COMPLETE"304};305306static const char *checkpoint_state_str[] = {307"NONE",308"EXISTS",309"DISCARDING"310};311312static const char *vdev_state_str[] = {313"UNKNOWN",314"CLOSED",315"OFFLINE",316"REMOVED",317"CANT_OPEN",318"FAULTED",319"DEGRADED",320"ONLINE"321};322323static const char *vdev_aux_str[] = {324"NONE",325"OPEN_FAILED",326"CORRUPT_DATA",327"NO_REPLICAS",328"BAD_GUID_SUM",329"TOO_SMALL",330"BAD_LABEL",331"VERSION_NEWER",332"VERSION_OLDER",333"UNSUP_FEAT",334"SPARED",335"ERR_EXCEEDED",336"IO_FAILURE",337"BAD_LOG",338"EXTERNAL",339"SPLIT_POOL",340"BAD_ASHIFT",341"EXTERNAL_PERSIST",342"ACTIVE",343"CHILDREN_OFFLINE",344"ASHIFT_TOO_BIG"345};346347static const char *vdev_init_state_str[] = {348"NONE",349"ACTIVE",350"CANCELED",351"SUSPENDED",352"COMPLETE"353};354355static const char *vdev_trim_state_str[] = {356"NONE",357"ACTIVE",358"CANCELED",359"SUSPENDED",360"COMPLETE"361};362363#define ZFS_NICE_TIMESTAMP 100364365/*366* Given a cb->cb_flags with a histogram bit set, return the iostat_type.367* Right now, only one histo bit is ever set at one time, so we can368* just do a highbit64(a)369*/370#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)371372typedef struct zpool_command {373const char *name;374int (*func)(int, char **);375zpool_help_t usage;376} zpool_command_t;377378/*379* Master command table. Each ZFS command has a name, associated function, and380* usage message. The usage messages need to be internationalized, so we have381* to have a function to return the usage message based on a command index.382*383* These commands are organized according to how they are displayed in the usage384* message. An empty command (one with a NULL name) indicates an empty line in385* the generic usage message.386*/387static zpool_command_t command_table[] = {388{ "version", zpool_do_version, HELP_VERSION },389{ NULL },390{ "create", zpool_do_create, HELP_CREATE },391{ "destroy", zpool_do_destroy, HELP_DESTROY },392{ NULL },393{ "add", zpool_do_add, HELP_ADD },394{ "remove", zpool_do_remove, HELP_REMOVE },395{ NULL },396{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },397{ NULL },398{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },399{ "prefetch", zpool_do_prefetch, HELP_PREFETCH },400{ NULL },401{ "list", zpool_do_list, HELP_LIST },402{ "iostat", zpool_do_iostat, HELP_IOSTAT },403{ "status", zpool_do_status, HELP_STATUS },404{ NULL },405{ "online", zpool_do_online, HELP_ONLINE },406{ "offline", zpool_do_offline, HELP_OFFLINE },407{ "clear", zpool_do_clear, HELP_CLEAR },408{ "reopen", zpool_do_reopen, HELP_REOPEN },409{ NULL },410{ "attach", zpool_do_attach, HELP_ATTACH },411{ "detach", zpool_do_detach, HELP_DETACH },412{ "replace", zpool_do_replace, HELP_REPLACE },413{ "split", zpool_do_split, HELP_SPLIT },414{ NULL },415{ "initialize", zpool_do_initialize, HELP_INITIALIZE },416{ "resilver", zpool_do_resilver, HELP_RESILVER },417{ "scrub", zpool_do_scrub, HELP_SCRUB },418{ "trim", zpool_do_trim, HELP_TRIM },419{ NULL },420{ "import", zpool_do_import, HELP_IMPORT },421{ "export", zpool_do_export, HELP_EXPORT },422{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },423{ "reguid", zpool_do_reguid, HELP_REGUID },424{ NULL },425{ "history", zpool_do_history, HELP_HISTORY },426{ "events", zpool_do_events, HELP_EVENTS },427{ NULL },428{ "get", zpool_do_get, HELP_GET },429{ "set", zpool_do_set, HELP_SET },430{ "sync", zpool_do_sync, HELP_SYNC },431{ NULL },432{ "wait", zpool_do_wait, HELP_WAIT },433{ NULL },434{ "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },435};436437#define NCOMMAND (ARRAY_SIZE(command_table))438439#define VDEV_ALLOC_CLASS_LOGS "logs"440441#define MAX_CMD_LEN 256442443static zpool_command_t *current_command;444static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);445static char history_str[HIS_MAX_RECORD_LEN];446static boolean_t log_history = B_TRUE;447static uint_t timestamp_fmt = NODATE;448449static const char *450get_usage(zpool_help_t idx)451{452switch (idx) {453case HELP_ADD:454return (gettext("\tadd [-afgLnP] [-o property=value] "455"<pool> <vdev> ...\n"));456case HELP_ATTACH:457return (gettext("\tattach [-fsw] [-o property=value] "458"<pool> <vdev> <new-device>\n"));459case HELP_CLEAR:460return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));461case HELP_CREATE:462return (gettext("\tcreate [-fnd] [-o property=value] ... \n"463"\t [-O file-system-property=value] ... \n"464"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));465case HELP_CHECKPOINT:466return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));467case HELP_DESTROY:468return (gettext("\tdestroy [-f] <pool>\n"));469case HELP_DETACH:470return (gettext("\tdetach <pool> <device>\n"));471case HELP_EXPORT:472return (gettext("\texport [-af] <pool> ...\n"));473case HELP_HISTORY:474return (gettext("\thistory [-il] [<pool>] ...\n"));475case HELP_IMPORT:476return (gettext("\timport [-d dir] [-D]\n"477"\timport [-o mntopts] [-o property=value] ... \n"478"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "479"[-R root] [-F [-n]] -a\n"480"\timport [-o mntopts] [-o property=value] ... \n"481"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "482"[-R root] [-F [-n]]\n"483"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));484case HELP_IOSTAT:485return (gettext("\tiostat [[[-c [script1,script2,...]"486"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"487"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"488" [[-n] interval [count]]\n"));489case HELP_LABELCLEAR:490return (gettext("\tlabelclear [-f] <vdev>\n"));491case HELP_LIST:492return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "493"[--json-int, --json-pool-key-guid]] ...\n"494"\t [-T d|u] [pool] [interval [count]]\n"));495case HELP_PREFETCH:496return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n"497"\t -t ddt <pool>\n"));498case HELP_OFFLINE:499return (gettext("\toffline [--power]|[[-f][-t]] <pool> "500"<device> ...\n"));501case HELP_ONLINE:502return (gettext("\tonline [--power][-e] <pool> <device> "503"...\n"));504case HELP_REPLACE:505return (gettext("\treplace [-fsw] [-o property=value] "506"<pool> <device> [new-device]\n"));507case HELP_REMOVE:508return (gettext("\tremove [-npsw] <pool> <device> ...\n"));509case HELP_REOPEN:510return (gettext("\treopen [-n] <pool>\n"));511case HELP_INITIALIZE:512return (gettext("\tinitialize [-c | -s | -u] [-w] <-a | <pool> "513"[<device> ...]>\n"));514case HELP_SCRUB:515return (gettext("\tscrub [-e | -s | -p | -C | -E | -S] [-w] "516"<-a | <pool> [<pool> ...]>\n"));517case HELP_RESILVER:518return (gettext("\tresilver <pool> ...\n"));519case HELP_TRIM:520return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] "521"<-a | <pool> [<device> ...]>\n"));522case HELP_STATUS:523return (gettext("\tstatus [-DdegiLPpstvx] "524"[-c script1[,script2,...]] ...\n"525"\t [-j|--json [--json-flat-vdevs] [--json-int] "526"[--json-pool-key-guid]] ...\n"527"\t [-T d|u] [--power] [pool] [interval [count]]\n"));528case HELP_UPGRADE:529return (gettext("\tupgrade\n"530"\tupgrade -v\n"531"\tupgrade [-V version] <-a | pool ...>\n"));532case HELP_EVENTS:533return (gettext("\tevents [-vHf [pool] | -c]\n"));534case HELP_GET:535return (gettext("\tget [-Hp] [-j [--json-int, "536"--json-pool-key-guid]] ...\n"537"\t [-o \"all\" | field[,...]] "538"<\"all\" | property[,...]> <pool> ...\n"));539case HELP_SET:540return (gettext("\tset <property=value> <pool>\n"541"\tset <vdev_property=value> <pool> <vdev>\n"));542case HELP_SPLIT:543return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"544"\t [-o property=value] <pool> <newpool> "545"[<device> ...]\n"));546case HELP_REGUID:547return (gettext("\treguid [-g guid] <pool>\n"));548case HELP_SYNC:549return (gettext("\tsync [pool] ...\n"));550case HELP_VERSION:551return (gettext("\tversion [-j]\n"));552case HELP_WAIT:553return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "554"<pool> [interval]\n"));555case HELP_DDT_PRUNE:556return (gettext("\tddtprune -d|-p <amount> <pool>\n"));557default:558__builtin_unreachable();559}560}561562/*563* Callback routine that will print out a pool property value.564*/565static int566print_pool_prop_cb(int prop, void *cb)567{568FILE *fp = cb;569570(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));571572if (zpool_prop_readonly(prop))573(void) fprintf(fp, " NO ");574else575(void) fprintf(fp, " YES ");576577if (zpool_prop_values(prop) == NULL)578(void) fprintf(fp, "-\n");579else580(void) fprintf(fp, "%s\n", zpool_prop_values(prop));581582return (ZPROP_CONT);583}584585/*586* Callback routine that will print out a vdev property value.587*/588static int589print_vdev_prop_cb(int prop, void *cb)590{591FILE *fp = cb;592593(void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));594595if (vdev_prop_readonly(prop))596(void) fprintf(fp, " NO ");597else598(void) fprintf(fp, " YES ");599600if (vdev_prop_values(prop) == NULL)601(void) fprintf(fp, "-\n");602else603(void) fprintf(fp, "%s\n", vdev_prop_values(prop));604605return (ZPROP_CONT);606}607608/*609* Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like610* '/dev/disk/by-vdev/L5'.611*/612static const char *613vdev_name_to_path(zpool_handle_t *zhp, char *vdev)614{615nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);616if (vdev_nv == NULL) {617return (NULL);618}619return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));620}621622static int623zpool_power_on(zpool_handle_t *zhp, char *vdev)624{625return (zpool_power(zhp, vdev, B_TRUE));626}627628static int629zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)630{631int rc;632633rc = zpool_power_on(zhp, vdev);634if (rc != 0)635return (rc);636637zpool_disk_wait(vdev_name_to_path(zhp, vdev));638639return (0);640}641642static int643zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)644{645nvlist_t *nv;646const char *path = NULL;647int rc;648649/* Power up all the devices first */650FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {651path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);652if (path != NULL) {653rc = zpool_power_on(zhp, (char *)path);654if (rc != 0) {655return (rc);656}657}658}659660/*661* Wait for their devices to show up. Since we powered them on662* at roughly the same time, they should all come online around663* the same time.664*/665FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {666path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);667zpool_disk_wait(path);668}669670return (0);671}672673static int674zpool_power_off(zpool_handle_t *zhp, char *vdev)675{676return (zpool_power(zhp, vdev, B_FALSE));677}678679/*680* Display usage message. If we're inside a command, display only the usage for681* that command. Otherwise, iterate over the entire command table and display682* a complete usage message.683*/684static __attribute__((noreturn)) void685usage(boolean_t requested)686{687FILE *fp = requested ? stdout : stderr;688689if (current_command == NULL) {690int i;691692(void) fprintf(fp, gettext("usage: zpool command args ...\n"));693(void) fprintf(fp,694gettext("where 'command' is one of the following:\n\n"));695696for (i = 0; i < NCOMMAND; i++) {697if (command_table[i].name == NULL)698(void) fprintf(fp, "\n");699else700(void) fprintf(fp, "%s",701get_usage(command_table[i].usage));702}703704(void) fprintf(fp,705gettext("\nFor further help on a command or topic, "706"run: %s\n"), "zpool help [<topic>]");707} else {708(void) fprintf(fp, gettext("usage:\n"));709(void) fprintf(fp, "%s", get_usage(current_command->usage));710}711712if (current_command != NULL &&713current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&714((strcmp(current_command->name, "set") == 0) ||715(strcmp(current_command->name, "get") == 0) ||716(strcmp(current_command->name, "list") == 0))) {717718(void) fprintf(fp, "%s",719gettext("\nthe following properties are supported:\n"));720721(void) fprintf(fp, "\n\t%-19s %s %s\n\n",722"PROPERTY", "EDIT", "VALUES");723724/* Iterate over all properties */725if (current_prop_type == ZFS_TYPE_POOL) {726(void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,727B_TRUE, current_prop_type);728729(void) fprintf(fp, "\t%-19s ", "feature@...");730(void) fprintf(fp, "YES "731"disabled | enabled | active\n");732733(void) fprintf(fp, gettext("\nThe feature@ properties "734"must be appended with a feature name.\n"735"See zpool-features(7).\n"));736} else if (current_prop_type == ZFS_TYPE_VDEV) {737(void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,738B_TRUE, current_prop_type);739}740}741742/*743* See comments at end of main().744*/745if (getenv("ZFS_ABORT") != NULL) {746(void) printf("dumping core by request\n");747abort();748}749750exit(requested ? 0 : 2);751}752753/*754* zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...]755* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool756* if none specified.757*758* -a Use all pools.759* -c Cancel. Ends active initializing.760* -s Suspend. Initializing can then be restarted with no flags.761* -u Uninitialize. Clears initialization state.762* -w Wait. Blocks until initializing has completed.763*/764int765zpool_do_initialize(int argc, char **argv)766{767int c;768char *poolname;769zpool_handle_t *zhp;770int err = 0;771boolean_t wait = B_FALSE;772boolean_t initialize_all = B_FALSE;773774struct option long_options[] = {775{"cancel", no_argument, NULL, 'c'},776{"suspend", no_argument, NULL, 's'},777{"uninit", no_argument, NULL, 'u'},778{"wait", no_argument, NULL, 'w'},779{"all", no_argument, NULL, 'a'},780{0, 0, 0, 0}781};782783pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;784while ((c = getopt_long(argc, argv, "acsuw", long_options,785NULL)) != -1) {786switch (c) {787case 'a':788initialize_all = B_TRUE;789break;790case 'c':791if (cmd_type != POOL_INITIALIZE_START &&792cmd_type != POOL_INITIALIZE_CANCEL) {793(void) fprintf(stderr, gettext("-c cannot be "794"combined with other options\n"));795usage(B_FALSE);796}797cmd_type = POOL_INITIALIZE_CANCEL;798break;799case 's':800if (cmd_type != POOL_INITIALIZE_START &&801cmd_type != POOL_INITIALIZE_SUSPEND) {802(void) fprintf(stderr, gettext("-s cannot be "803"combined with other options\n"));804usage(B_FALSE);805}806cmd_type = POOL_INITIALIZE_SUSPEND;807break;808case 'u':809if (cmd_type != POOL_INITIALIZE_START &&810cmd_type != POOL_INITIALIZE_UNINIT) {811(void) fprintf(stderr, gettext("-u cannot be "812"combined with other options\n"));813usage(B_FALSE);814}815cmd_type = POOL_INITIALIZE_UNINIT;816break;817case 'w':818wait = B_TRUE;819break;820case '?':821if (optopt != 0) {822(void) fprintf(stderr,823gettext("invalid option '%c'\n"), optopt);824} else {825(void) fprintf(stderr,826gettext("invalid option '%s'\n"),827argv[optind - 1]);828}829usage(B_FALSE);830}831}832833argc -= optind;834argv += optind;835836initialize_cbdata_t cbdata = {837.wait = wait,838.cmd_type = cmd_type839};840841if (initialize_all && argc > 0) {842(void) fprintf(stderr, gettext("-a cannot be combined with "843"individual pools or vdevs\n"));844usage(B_FALSE);845}846847if (argc < 1 && !initialize_all) {848(void) fprintf(stderr, gettext("missing pool name argument\n"));849usage(B_FALSE);850return (-1);851}852853if (wait && (cmd_type != POOL_INITIALIZE_START)) {854(void) fprintf(stderr, gettext("-w cannot be used with -c, -s"855"or -u\n"));856usage(B_FALSE);857}858859if (argc == 0 && initialize_all) {860/* Initilize each pool recursively */861err = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,862B_FALSE, zpool_initialize_one, &cbdata);863return (err);864} else if (argc == 1) {865/* no individual leaf vdevs specified, initialize the pool */866poolname = argv[0];867zhp = zpool_open(g_zfs, poolname);868if (zhp == NULL)869return (-1);870err = zpool_initialize_one(zhp, &cbdata);871} else {872/* individual leaf vdevs specified, initialize them */873poolname = argv[0];874zhp = zpool_open(g_zfs, poolname);875if (zhp == NULL)876return (-1);877nvlist_t *vdevs = fnvlist_alloc();878for (int i = 1; i < argc; i++) {879fnvlist_add_boolean(vdevs, argv[i]);880}881if (wait)882err = zpool_initialize_wait(zhp, cmd_type, vdevs);883else884err = zpool_initialize(zhp, cmd_type, vdevs);885fnvlist_free(vdevs);886}887888zpool_close(zhp);889890return (err);891}892893/*894* print a pool vdev config for dry runs895*/896static void897print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,898const char *match, int name_flags)899{900nvlist_t **child;901uint_t c, children;902char *vname;903boolean_t printed = B_FALSE;904905if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,906&child, &children) != 0) {907if (name != NULL)908(void) printf("\t%*s%s\n", indent, "", name);909return;910}911912for (c = 0; c < children; c++) {913uint64_t is_log = B_FALSE, is_hole = B_FALSE;914const char *class = "";915916(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,917&is_hole);918919if (is_hole == B_TRUE) {920continue;921}922923(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,924&is_log);925if (is_log)926class = VDEV_ALLOC_BIAS_LOG;927(void) nvlist_lookup_string(child[c],928ZPOOL_CONFIG_ALLOCATION_BIAS, &class);929if (strcmp(match, class) != 0)930continue;931932if (!printed && name != NULL) {933(void) printf("\t%*s%s\n", indent, "", name);934printed = B_TRUE;935}936vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);937print_vdev_tree(zhp, vname, child[c], indent + 2, "",938name_flags);939free(vname);940}941}942943/*944* Print the list of l2cache devices for dry runs.945*/946static void947print_cache_list(nvlist_t *nv, int indent)948{949nvlist_t **child;950uint_t c, children;951952if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,953&child, &children) == 0 && children > 0) {954(void) printf("\t%*s%s\n", indent, "", "cache");955} else {956return;957}958for (c = 0; c < children; c++) {959char *vname;960961vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);962(void) printf("\t%*s%s\n", indent + 2, "", vname);963free(vname);964}965}966967/*968* Print the list of spares for dry runs.969*/970static void971print_spare_list(nvlist_t *nv, int indent)972{973nvlist_t **child;974uint_t c, children;975976if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,977&child, &children) == 0 && children > 0) {978(void) printf("\t%*s%s\n", indent, "", "spares");979} else {980return;981}982for (c = 0; c < children; c++) {983char *vname;984985vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);986(void) printf("\t%*s%s\n", indent + 2, "", vname);987free(vname);988}989}990991typedef struct spare_cbdata {992uint64_t cb_guid;993zpool_handle_t *cb_zhp;994} spare_cbdata_t;995996static boolean_t997find_vdev(nvlist_t *nv, uint64_t search)998{999uint64_t guid;1000nvlist_t **child;1001uint_t c, children;10021003if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&1004search == guid)1005return (B_TRUE);10061007if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,1008&child, &children) == 0) {1009for (c = 0; c < children; c++)1010if (find_vdev(child[c], search))1011return (B_TRUE);1012}10131014return (B_FALSE);1015}10161017static int1018find_spare(zpool_handle_t *zhp, void *data)1019{1020spare_cbdata_t *cbp = data;1021nvlist_t *config, *nvroot;10221023config = zpool_get_config(zhp, NULL);1024verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,1025&nvroot) == 0);10261027if (find_vdev(nvroot, cbp->cb_guid)) {1028cbp->cb_zhp = zhp;1029return (1);1030}10311032zpool_close(zhp);1033return (0);1034}10351036static void1037nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,1038boolean_t literal, boolean_t as_int, int format)1039{1040char buf[256];1041if (literal) {1042if (!as_int)1043snprintf(buf, 256, "%llu", (u_longlong_t)value);1044} else {1045switch (format) {1046case ZFS_NICENUM_1024:1047zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);1048break;1049case ZFS_NICENUM_BYTES:1050zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);1051break;1052case ZFS_NICENUM_TIME:1053zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);1054break;1055case ZFS_NICE_TIMESTAMP:1056format_timestamp(value, buf, 256);1057break;1058default:1059fprintf(stderr, "Invalid number format");1060exit(1);1061}1062}1063if (as_int)1064fnvlist_add_uint64(item, key, value);1065else1066fnvlist_add_string(item, key, buf);1067}10681069/*1070* Generates an nvlist with output version for every command based on params.1071* Purpose of this is to add a version of JSON output, considering the schema1072* format might be updated for each command in future.1073*1074* Schema:1075*1076* "output_version": {1077* "command": string,1078* "vers_major": integer,1079* "vers_minor": integer,1080* }1081*/1082static nvlist_t *1083zpool_json_schema(int maj_v, int min_v)1084{1085char cmd[MAX_CMD_LEN];1086nvlist_t *sch = fnvlist_alloc();1087nvlist_t *ov = fnvlist_alloc();10881089snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);1090fnvlist_add_string(ov, "command", cmd);1091fnvlist_add_uint32(ov, "vers_major", maj_v);1092fnvlist_add_uint32(ov, "vers_minor", min_v);1093fnvlist_add_nvlist(sch, "output_version", ov);1094fnvlist_free(ov);1095return (sch);1096}10971098static void1099fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,1100boolean_t as_int)1101{1102nvlist_t *config = zpool_get_config(zhp, NULL);1103uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);1104uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);11051106fnvlist_add_string(list, "name", zpool_get_name(zhp));1107if (addtype)1108fnvlist_add_string(list, "type", "POOL");1109fnvlist_add_string(list, "state", zpool_get_state_str(zhp));1110if (as_int) {1111if (guid)1112fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);1113if (txg)1114fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);1115fnvlist_add_uint64(list, "spa_version", SPA_VERSION);1116fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);1117} else {1118char value[ZFS_MAXPROPLEN];1119if (guid) {1120snprintf(value, ZFS_MAXPROPLEN, "%llu",1121(u_longlong_t)guid);1122fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);1123}1124if (txg) {1125snprintf(value, ZFS_MAXPROPLEN, "%llu",1126(u_longlong_t)txg);1127fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);1128}1129fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);1130fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);1131}1132}11331134static void1135used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)1136{1137spare_cbdata_t spare_cb;1138verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,1139&spare_cb.cb_guid) == 0);1140if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {1141if (strcmp(zpool_get_name(spare_cb.cb_zhp),1142zpool_get_name(zhp)) != 0) {1143fnvlist_add_string(list, "used_by",1144zpool_get_name(spare_cb.cb_zhp));1145}1146zpool_close(spare_cb.cb_zhp);1147}1148}11491150static void1151fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,1152boolean_t addtype, boolean_t as_int)1153{1154boolean_t l2c = B_FALSE;1155const char *path, *phys, *devid, *bias = NULL;1156uint64_t hole = 0, log = 0, spare = 0;1157vdev_stat_t *vs;1158uint_t c;1159nvlist_t *nvdev;1160nvlist_t *nvdev_parent = NULL;1161char *_name;11621163if (strcmp(name, zpool_get_name(zhp)) != 0)1164_name = name;1165else1166_name = (char *)"root-0";11671168nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);11691170fnvlist_add_string(list, "name", name);1171if (addtype)1172fnvlist_add_string(list, "type", "VDEV");1173if (nvdev) {1174const char *type = fnvlist_lookup_string(nvdev,1175ZPOOL_CONFIG_TYPE);1176if (type)1177fnvlist_add_string(list, "vdev_type", type);1178uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);1179if (guid) {1180if (as_int) {1181fnvlist_add_uint64(list, "guid", guid);1182} else {1183char buf[ZFS_MAXPROPLEN];1184snprintf(buf, ZFS_MAXPROPLEN, "%llu",1185(u_longlong_t)guid);1186fnvlist_add_string(list, "guid", buf);1187}1188}1189if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)1190fnvlist_add_string(list, "path", path);1191if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,1192&phys) == 0)1193fnvlist_add_string(list, "phys_path", phys);1194if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,1195&devid) == 0)1196fnvlist_add_string(list, "devid", devid);1197(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);1198(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,1199&spare);1200(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);1201if (hole)1202fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);1203else if (l2c)1204fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);1205else if (spare)1206fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);1207else if (log)1208fnvlist_add_string(list, "class", VDEV_TYPE_LOG);1209else {1210(void) nvlist_lookup_string(nvdev,1211ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);1212if (bias != NULL)1213fnvlist_add_string(list, "class", bias);1214else {1215nvdev_parent = NULL;1216nvdev_parent = zpool_find_parent_vdev(zhp,1217_name, NULL, NULL, NULL);12181219/*1220* With a mirrored special device, the parent1221* "mirror" vdev will have1222* ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"1223* not the leaf vdevs. If we're a leaf vdev1224* in that case we need to look at our parent1225* to see if they're "special" to know if we1226* are "special" too.1227*/1228if (nvdev_parent) {1229(void) nvlist_lookup_string(1230nvdev_parent,1231ZPOOL_CONFIG_ALLOCATION_BIAS,1232&bias);1233}1234if (bias != NULL)1235fnvlist_add_string(list, "class", bias);1236else1237fnvlist_add_string(list, "class",1238"normal");1239}1240}1241if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,1242(uint64_t **)&vs, &c) == 0) {1243fnvlist_add_string(list, "state",1244vdev_state_str[vs->vs_state]);1245}1246}1247}12481249static boolean_t1250prop_list_contains_feature(nvlist_t *proplist)1251{1252nvpair_t *nvp;1253for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;1254nvp = nvlist_next_nvpair(proplist, nvp)) {1255if (zpool_prop_feature(nvpair_name(nvp)))1256return (B_TRUE);1257}1258return (B_FALSE);1259}12601261/*1262* Add a property pair (name, string-value) into a property nvlist.1263*/1264static int1265add_prop_list(const char *propname, const char *propval, nvlist_t **props,1266boolean_t poolprop)1267{1268zpool_prop_t prop = ZPOOL_PROP_INVAL;1269nvlist_t *proplist;1270const char *normnm;1271const char *strval;12721273if (*props == NULL &&1274nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {1275(void) fprintf(stderr,1276gettext("internal error: out of memory\n"));1277return (1);1278}12791280proplist = *props;12811282if (poolprop) {1283const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);1284const char *cname =1285zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);12861287if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&1288(!zpool_prop_feature(propname) &&1289!zpool_prop_vdev(propname))) {1290(void) fprintf(stderr, gettext("property '%s' is "1291"not a valid pool or vdev property\n"), propname);1292return (2);1293}12941295/*1296* feature@ properties and version should not be specified1297* at the same time.1298*/1299if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&1300nvlist_exists(proplist, vname)) ||1301(prop == ZPOOL_PROP_VERSION &&1302prop_list_contains_feature(proplist))) {1303(void) fprintf(stderr, gettext("'feature@' and "1304"'version' properties cannot be specified "1305"together\n"));1306return (2);1307}13081309/*1310* if version is specified, only "legacy" compatibility1311* may be requested1312*/1313if ((prop == ZPOOL_PROP_COMPATIBILITY &&1314strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&1315nvlist_exists(proplist, vname)) ||1316(prop == ZPOOL_PROP_VERSION &&1317nvlist_exists(proplist, cname) &&1318strcmp(fnvlist_lookup_string(proplist, cname),1319ZPOOL_COMPAT_LEGACY) != 0)) {1320(void) fprintf(stderr, gettext("when 'version' is "1321"specified, the 'compatibility' feature may only "1322"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));1323return (2);1324}13251326if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))1327normnm = propname;1328else1329normnm = zpool_prop_to_name(prop);1330} else {1331zfs_prop_t fsprop = zfs_name_to_prop(propname);13321333if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,1334B_FALSE)) {1335normnm = zfs_prop_to_name(fsprop);1336} else if (zfs_prop_user(propname) ||1337zfs_prop_userquota(propname)) {1338normnm = propname;1339} else {1340(void) fprintf(stderr, gettext("property '%s' is "1341"not a valid filesystem property\n"), propname);1342return (2);1343}1344}13451346if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&1347prop != ZPOOL_PROP_CACHEFILE) {1348(void) fprintf(stderr, gettext("property '%s' "1349"specified multiple times\n"), propname);1350return (2);1351}13521353if (nvlist_add_string(proplist, normnm, propval) != 0) {1354(void) fprintf(stderr, gettext("internal "1355"error: out of memory\n"));1356return (1);1357}13581359return (0);1360}13611362/*1363* Set a default property pair (name, string-value) in a property nvlist1364*/1365static int1366add_prop_list_default(const char *propname, const char *propval,1367nvlist_t **props)1368{1369const char *pval;13701371if (nvlist_lookup_string(*props, propname, &pval) == 0)1372return (0);13731374return (add_prop_list(propname, propval, props, B_TRUE));1375}13761377/*1378* zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...1379*1380* -a Disable the ashift validation checks1381* -f Force addition of devices, even if they appear in use1382* -g Display guid for individual vdev name.1383* -L Follow links when resolving vdev path name.1384* -n Do not add the devices, but display the resulting layout if1385* they were to be added.1386* -o Set property=value.1387* -P Display full path for vdev name.1388*1389* Adds the given vdevs to 'pool'. As with create, the bulk of this work is1390* handled by make_root_vdev(), which constructs the nvlist needed to pass to1391* libzfs.1392*/1393int1394zpool_do_add(int argc, char **argv)1395{1396boolean_t check_replication = B_TRUE;1397boolean_t check_inuse = B_TRUE;1398boolean_t dryrun = B_FALSE;1399boolean_t check_ashift = B_TRUE;1400boolean_t force = B_FALSE;1401int name_flags = 0;1402int c;1403nvlist_t *nvroot;1404char *poolname;1405int ret;1406zpool_handle_t *zhp;1407nvlist_t *config;1408nvlist_t *props = NULL;1409char *propval;14101411struct option long_options[] = {1412{"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},1413{"allow-replication-mismatch", no_argument, NULL,1414ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},1415{"allow-ashift-mismatch", no_argument, NULL,1416ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},1417{0, 0, 0, 0}1418};14191420/* check options */1421while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))1422!= -1) {1423switch (c) {1424case 'f':1425force = B_TRUE;1426break;1427case 'g':1428name_flags |= VDEV_NAME_GUID;1429break;1430case 'L':1431name_flags |= VDEV_NAME_FOLLOW_LINKS;1432break;1433case 'n':1434dryrun = B_TRUE;1435break;1436case 'o':1437if ((propval = strchr(optarg, '=')) == NULL) {1438(void) fprintf(stderr, gettext("missing "1439"'=' for -o option\n"));1440usage(B_FALSE);1441}1442*propval = '\0';1443propval++;14441445if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||1446(add_prop_list(optarg, propval, &props, B_TRUE)))1447usage(B_FALSE);1448break;1449case 'P':1450name_flags |= VDEV_NAME_PATH;1451break;1452case ZPOOL_OPTION_ALLOW_INUSE:1453check_inuse = B_FALSE;1454break;1455case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:1456check_replication = B_FALSE;1457break;1458case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:1459check_ashift = B_FALSE;1460break;1461case '?':1462(void) fprintf(stderr, gettext("invalid option '%c'\n"),1463optopt);1464usage(B_FALSE);1465}1466}14671468argc -= optind;1469argv += optind;14701471/* get pool name and check number of arguments */1472if (argc < 1) {1473(void) fprintf(stderr, gettext("missing pool name argument\n"));1474usage(B_FALSE);1475}1476if (argc < 2) {1477(void) fprintf(stderr, gettext("missing vdev specification\n"));1478usage(B_FALSE);1479}14801481if (force) {1482if (!check_inuse || !check_replication || !check_ashift) {1483(void) fprintf(stderr, gettext("'-f' option is not "1484"allowed with '--allow-replication-mismatch', "1485"'--allow-ashift-mismatch', or "1486"'--allow-in-use'\n"));1487usage(B_FALSE);1488}1489check_inuse = B_FALSE;1490check_replication = B_FALSE;1491check_ashift = B_FALSE;1492}14931494poolname = argv[0];14951496argc--;1497argv++;14981499if ((zhp = zpool_open(g_zfs, poolname)) == NULL)1500return (1);15011502if ((config = zpool_get_config(zhp, NULL)) == NULL) {1503(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),1504poolname);1505zpool_close(zhp);1506return (1);1507}15081509/* unless manually specified use "ashift" pool property (if set) */1510if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {1511int intval;1512zprop_source_t src;1513char strval[ZPOOL_MAXPROPLEN];15141515intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);1516if (src != ZPROP_SRC_DEFAULT) {1517(void) sprintf(strval, "%" PRId32, intval);1518verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,1519&props, B_TRUE) == 0);1520}1521}15221523/* pass off to make_root_vdev for processing */1524nvroot = make_root_vdev(zhp, props, !check_inuse,1525check_replication, B_FALSE, dryrun, argc, argv);1526if (nvroot == NULL) {1527zpool_close(zhp);1528return (1);1529}15301531if (dryrun) {1532nvlist_t *poolnvroot;1533nvlist_t **l2child, **sparechild;1534uint_t l2children, sparechildren, c;1535char *vname;1536boolean_t hadcache = B_FALSE, hadspare = B_FALSE;15371538verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,1539&poolnvroot) == 0);15401541(void) printf(gettext("would update '%s' to the following "1542"configuration:\n\n"), zpool_get_name(zhp));15431544/* print original main pool and new tree */1545print_vdev_tree(zhp, poolname, poolnvroot, 0, "",1546name_flags | VDEV_NAME_TYPE_ID);1547print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);15481549/* print other classes: 'dedup', 'special', and 'log' */1550if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {1551print_vdev_tree(zhp, "dedup", poolnvroot, 0,1552VDEV_ALLOC_BIAS_DEDUP, name_flags);1553print_vdev_tree(zhp, NULL, nvroot, 0,1554VDEV_ALLOC_BIAS_DEDUP, name_flags);1555} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {1556print_vdev_tree(zhp, "dedup", nvroot, 0,1557VDEV_ALLOC_BIAS_DEDUP, name_flags);1558}15591560if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {1561print_vdev_tree(zhp, "special", poolnvroot, 0,1562VDEV_ALLOC_BIAS_SPECIAL, name_flags);1563print_vdev_tree(zhp, NULL, nvroot, 0,1564VDEV_ALLOC_BIAS_SPECIAL, name_flags);1565} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {1566print_vdev_tree(zhp, "special", nvroot, 0,1567VDEV_ALLOC_BIAS_SPECIAL, name_flags);1568}15691570if (num_logs(poolnvroot) > 0) {1571print_vdev_tree(zhp, "logs", poolnvroot, 0,1572VDEV_ALLOC_BIAS_LOG, name_flags);1573print_vdev_tree(zhp, NULL, nvroot, 0,1574VDEV_ALLOC_BIAS_LOG, name_flags);1575} else if (num_logs(nvroot) > 0) {1576print_vdev_tree(zhp, "logs", nvroot, 0,1577VDEV_ALLOC_BIAS_LOG, name_flags);1578}15791580/* Do the same for the caches */1581if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,1582&l2child, &l2children) == 0 && l2children) {1583hadcache = B_TRUE;1584(void) printf(gettext("\tcache\n"));1585for (c = 0; c < l2children; c++) {1586vname = zpool_vdev_name(g_zfs, NULL,1587l2child[c], name_flags);1588(void) printf("\t %s\n", vname);1589free(vname);1590}1591}1592if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,1593&l2child, &l2children) == 0 && l2children) {1594if (!hadcache)1595(void) printf(gettext("\tcache\n"));1596for (c = 0; c < l2children; c++) {1597vname = zpool_vdev_name(g_zfs, NULL,1598l2child[c], name_flags);1599(void) printf("\t %s\n", vname);1600free(vname);1601}1602}1603/* And finally the spares */1604if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,1605&sparechild, &sparechildren) == 0 && sparechildren > 0) {1606hadspare = B_TRUE;1607(void) printf(gettext("\tspares\n"));1608for (c = 0; c < sparechildren; c++) {1609vname = zpool_vdev_name(g_zfs, NULL,1610sparechild[c], name_flags);1611(void) printf("\t %s\n", vname);1612free(vname);1613}1614}1615if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,1616&sparechild, &sparechildren) == 0 && sparechildren > 0) {1617if (!hadspare)1618(void) printf(gettext("\tspares\n"));1619for (c = 0; c < sparechildren; c++) {1620vname = zpool_vdev_name(g_zfs, NULL,1621sparechild[c], name_flags);1622(void) printf("\t %s\n", vname);1623free(vname);1624}1625}16261627ret = 0;1628} else {1629ret = (zpool_add(zhp, nvroot, check_ashift) != 0);1630}16311632nvlist_free(props);1633nvlist_free(nvroot);1634zpool_close(zhp);16351636return (ret);1637}16381639/*1640* zpool remove [-npsw] <pool> <vdev> ...1641*1642* Removes the given vdev from the pool.1643*/1644int1645zpool_do_remove(int argc, char **argv)1646{1647char *poolname;1648int i, ret = 0;1649zpool_handle_t *zhp = NULL;1650boolean_t stop = B_FALSE;1651int c;1652boolean_t noop = B_FALSE;1653boolean_t parsable = B_FALSE;1654boolean_t wait = B_FALSE;16551656/* check options */1657while ((c = getopt(argc, argv, "npsw")) != -1) {1658switch (c) {1659case 'n':1660noop = B_TRUE;1661break;1662case 'p':1663parsable = B_TRUE;1664break;1665case 's':1666stop = B_TRUE;1667break;1668case 'w':1669wait = B_TRUE;1670break;1671case '?':1672(void) fprintf(stderr, gettext("invalid option '%c'\n"),1673optopt);1674usage(B_FALSE);1675}1676}16771678argc -= optind;1679argv += optind;16801681/* get pool name and check number of arguments */1682if (argc < 1) {1683(void) fprintf(stderr, gettext("missing pool name argument\n"));1684usage(B_FALSE);1685}16861687poolname = argv[0];16881689if ((zhp = zpool_open(g_zfs, poolname)) == NULL)1690return (1);16911692if (stop && noop) {1693zpool_close(zhp);1694(void) fprintf(stderr, gettext("stop request ignored\n"));1695return (0);1696}16971698if (stop) {1699if (argc > 1) {1700(void) fprintf(stderr, gettext("too many arguments\n"));1701usage(B_FALSE);1702}1703if (zpool_vdev_remove_cancel(zhp) != 0)1704ret = 1;1705if (wait) {1706(void) fprintf(stderr, gettext("invalid option "1707"combination: -w cannot be used with -s\n"));1708usage(B_FALSE);1709}1710} else {1711if (argc < 2) {1712(void) fprintf(stderr, gettext("missing device\n"));1713usage(B_FALSE);1714}17151716for (i = 1; i < argc; i++) {1717if (noop) {1718uint64_t size;17191720if (zpool_vdev_indirect_size(zhp, argv[i],1721&size) != 0) {1722ret = 1;1723break;1724}1725if (parsable) {1726(void) printf("%s %llu\n",1727argv[i], (unsigned long long)size);1728} else {1729char valstr[32];1730zfs_nicenum(size, valstr,1731sizeof (valstr));1732(void) printf("Memory that will be "1733"used after removing %s: %s\n",1734argv[i], valstr);1735}1736} else {1737if (zpool_vdev_remove(zhp, argv[i]) != 0)1738ret = 1;1739}1740}17411742if (ret == 0 && wait)1743ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);1744}1745zpool_close(zhp);17461747return (ret);1748}17491750/*1751* Return 1 if a vdev is active (being used in a pool)1752* Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)1753*1754* This is useful for checking if a disk in an active pool is offlined or1755* faulted.1756*/1757static int1758vdev_is_active(char *vdev_path)1759{1760int fd;1761fd = open(vdev_path, O_EXCL);1762if (fd < 0) {1763return (1); /* cant open O_EXCL - disk is active */1764}17651766close(fd);1767return (0); /* disk is inactive in the pool */1768}17691770/*1771* zpool labelclear [-f] <vdev>1772*1773* -f Force clearing the label for the vdevs which are members of1774* the exported or foreign pools.1775*1776* Verifies that the vdev is not active and zeros out the label information1777* on the device.1778*/1779int1780zpool_do_labelclear(int argc, char **argv)1781{1782char vdev[MAXPATHLEN];1783char *name = NULL;1784int c, fd, ret = 0;1785nvlist_t *config;1786pool_state_t state;1787boolean_t inuse = B_FALSE;1788boolean_t force = B_FALSE;17891790/* check options */1791while ((c = getopt(argc, argv, "f")) != -1) {1792switch (c) {1793case 'f':1794force = B_TRUE;1795break;1796default:1797(void) fprintf(stderr, gettext("invalid option '%c'\n"),1798optopt);1799usage(B_FALSE);1800}1801}18021803argc -= optind;1804argv += optind;18051806/* get vdev name */1807if (argc < 1) {1808(void) fprintf(stderr, gettext("missing vdev name\n"));1809usage(B_FALSE);1810}1811if (argc > 1) {1812(void) fprintf(stderr, gettext("too many arguments\n"));1813usage(B_FALSE);1814}18151816(void) strlcpy(vdev, argv[0], sizeof (vdev));18171818/*1819* If we cannot open an absolute path, we quit.1820* Otherwise if the provided vdev name doesn't point to a file,1821* try prepending expected disk paths and partition numbers.1822*/1823if ((fd = open(vdev, O_RDWR)) < 0) {1824int error;1825if (vdev[0] == '/') {1826(void) fprintf(stderr, gettext("failed to open "1827"%s: %s\n"), vdev, strerror(errno));1828return (1);1829}18301831error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);1832if (error == 0 && zfs_dev_is_whole_disk(vdev)) {1833if (zfs_append_partition(vdev, MAXPATHLEN) == -1)1834error = ENOENT;1835}18361837if (error || ((fd = open(vdev, O_RDWR)) < 0)) {1838if (errno == ENOENT) {1839(void) fprintf(stderr, gettext(1840"failed to find device %s, try "1841"specifying absolute path instead\n"),1842argv[0]);1843return (1);1844}18451846(void) fprintf(stderr, gettext("failed to open %s:"1847" %s\n"), vdev, strerror(errno));1848return (1);1849}1850}18511852/*1853* Flush all dirty pages for the block device. This should not be1854* fatal when the device does not support BLKFLSBUF as would be the1855* case for a file vdev.1856*/1857if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))1858(void) fprintf(stderr, gettext("failed to invalidate "1859"cache for %s: %s\n"), vdev, strerror(errno));18601861if (zpool_read_label(fd, &config, NULL) != 0) {1862(void) fprintf(stderr,1863gettext("failed to read label from %s\n"), vdev);1864ret = 1;1865goto errout;1866}1867nvlist_free(config);18681869ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);1870if (ret != 0) {1871(void) fprintf(stderr,1872gettext("failed to check state for %s\n"), vdev);1873ret = 1;1874goto errout;1875}18761877if (!inuse)1878goto wipe_label;18791880switch (state) {1881default:1882case POOL_STATE_ACTIVE:1883case POOL_STATE_SPARE:1884case POOL_STATE_L2CACHE:1885/*1886* We allow the user to call 'zpool offline -f'1887* on an offlined disk in an active pool. We can check if1888* the disk is online by calling vdev_is_active().1889*/1890if (force && !vdev_is_active(vdev))1891break;18921893(void) fprintf(stderr, gettext(1894"%s is a member (%s) of pool \"%s\""),1895vdev, zpool_pool_state_to_name(state), name);18961897if (force) {1898(void) fprintf(stderr, gettext(1899". Offline the disk first to clear its label."));1900}1901printf("\n");1902ret = 1;1903goto errout;19041905case POOL_STATE_EXPORTED:1906if (force)1907break;1908(void) fprintf(stderr, gettext(1909"use '-f' to override the following error:\n"1910"%s is a member of exported pool \"%s\"\n"),1911vdev, name);1912ret = 1;1913goto errout;19141915case POOL_STATE_POTENTIALLY_ACTIVE:1916if (force)1917break;1918(void) fprintf(stderr, gettext(1919"use '-f' to override the following error:\n"1920"%s is a member of potentially active pool \"%s\"\n"),1921vdev, name);1922ret = 1;1923goto errout;19241925case POOL_STATE_DESTROYED:1926/* inuse should never be set for a destroyed pool */1927assert(0);1928break;1929}19301931wipe_label:1932ret = zpool_clear_label(fd);1933if (ret != 0) {1934(void) fprintf(stderr,1935gettext("failed to clear label for %s\n"), vdev);1936}19371938errout:1939free(name);1940(void) close(fd);19411942return (ret);1943}19441945/*1946* zpool create [-fnd] [-o property=value] ...1947* [-O file-system-property=value] ...1948* [-R root] [-m mountpoint] <pool> <dev> ...1949*1950* -f Force creation, even if devices appear in use1951* -n Do not create the pool, but display the resulting layout if it1952* were to be created.1953* -R Create a pool under an alternate root1954* -m Set default mountpoint for the root dataset. By default it's1955* '/<pool>'1956* -o Set property=value.1957* -o Set feature@feature=enabled|disabled.1958* -d Don't automatically enable all supported pool features1959* (individual features can be enabled with -o).1960* -O Set fsproperty=value in the pool's root file system1961*1962* Creates the named pool according to the given vdev specification. The1963* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.1964* Once we get the nvlist back from make_root_vdev(), we either print out the1965* contents (if '-n' was specified), or pass it to libzfs to do the creation.1966*/1967int1968zpool_do_create(int argc, char **argv)1969{1970boolean_t force = B_FALSE;1971boolean_t dryrun = B_FALSE;1972boolean_t enable_pool_features = B_TRUE;19731974int c;1975nvlist_t *nvroot = NULL;1976char *poolname;1977char *tname = NULL;1978int ret = 1;1979char *altroot = NULL;1980char *compat = NULL;1981char *mountpoint = NULL;1982nvlist_t *fsprops = NULL;1983nvlist_t *props = NULL;1984char *propval;19851986/* check options */1987while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {1988switch (c) {1989case 'f':1990force = B_TRUE;1991break;1992case 'n':1993dryrun = B_TRUE;1994break;1995case 'd':1996enable_pool_features = B_FALSE;1997break;1998case 'R':1999altroot = optarg;2000if (add_prop_list(zpool_prop_to_name(2001ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))2002goto errout;2003if (add_prop_list_default(zpool_prop_to_name(2004ZPOOL_PROP_CACHEFILE), "none", &props))2005goto errout;2006break;2007case 'm':2008/* Equivalent to -O mountpoint=optarg */2009mountpoint = optarg;2010break;2011case 'o':2012if ((propval = strchr(optarg, '=')) == NULL) {2013(void) fprintf(stderr, gettext("missing "2014"'=' for -o option\n"));2015goto errout;2016}2017*propval = '\0';2018propval++;20192020if (add_prop_list(optarg, propval, &props, B_TRUE))2021goto errout;20222023/*2024* If the user is creating a pool that doesn't support2025* feature flags, don't enable any features.2026*/2027if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {2028char *end;2029u_longlong_t ver;20302031ver = strtoull(propval, &end, 0);2032if (*end == '\0' &&2033ver < SPA_VERSION_FEATURES) {2034enable_pool_features = B_FALSE;2035}2036}2037if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)2038altroot = propval;2039if (zpool_name_to_prop(optarg) ==2040ZPOOL_PROP_COMPATIBILITY)2041compat = propval;2042break;2043case 'O':2044if ((propval = strchr(optarg, '=')) == NULL) {2045(void) fprintf(stderr, gettext("missing "2046"'=' for -O option\n"));2047goto errout;2048}2049*propval = '\0';2050propval++;20512052/*2053* Mountpoints are checked and then added later.2054* Uniquely among properties, they can be specified2055* more than once, to avoid conflict with -m.2056*/2057if (0 == strcmp(optarg,2058zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {2059mountpoint = propval;2060} else if (add_prop_list(optarg, propval, &fsprops,2061B_FALSE)) {2062goto errout;2063}2064break;2065case 't':2066/*2067* Sanity check temporary pool name.2068*/2069if (strchr(optarg, '/') != NULL) {2070(void) fprintf(stderr, gettext("cannot create "2071"'%s': invalid character '/' in temporary "2072"name\n"), optarg);2073(void) fprintf(stderr, gettext("use 'zfs "2074"create' to create a dataset\n"));2075goto errout;2076}20772078if (add_prop_list(zpool_prop_to_name(2079ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))2080goto errout;2081if (add_prop_list_default(zpool_prop_to_name(2082ZPOOL_PROP_CACHEFILE), "none", &props))2083goto errout;2084tname = optarg;2085break;2086case ':':2087(void) fprintf(stderr, gettext("missing argument for "2088"'%c' option\n"), optopt);2089goto badusage;2090case '?':2091(void) fprintf(stderr, gettext("invalid option '%c'\n"),2092optopt);2093goto badusage;2094}2095}20962097argc -= optind;2098argv += optind;20992100/* get pool name and check number of arguments */2101if (argc < 1) {2102(void) fprintf(stderr, gettext("missing pool name argument\n"));2103goto badusage;2104}2105if (argc < 2) {2106(void) fprintf(stderr, gettext("missing vdev specification\n"));2107goto badusage;2108}21092110poolname = argv[0];21112112/*2113* As a special case, check for use of '/' in the name, and direct the2114* user to use 'zfs create' instead.2115*/2116if (strchr(poolname, '/') != NULL) {2117(void) fprintf(stderr, gettext("cannot create '%s': invalid "2118"character '/' in pool name\n"), poolname);2119(void) fprintf(stderr, gettext("use 'zfs create' to "2120"create a dataset\n"));2121goto errout;2122}21232124/* pass off to make_root_vdev for bulk processing */2125nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,2126argc - 1, argv + 1);2127if (nvroot == NULL)2128goto errout;21292130/* make_root_vdev() allows 0 toplevel children if there are spares */2131if (!zfs_allocatable_devs(nvroot)) {2132(void) fprintf(stderr, gettext("invalid vdev "2133"specification: at least one toplevel vdev must be "2134"specified\n"));2135goto errout;2136}21372138if (altroot != NULL && altroot[0] != '/') {2139(void) fprintf(stderr, gettext("invalid alternate root '%s': "2140"must be an absolute path\n"), altroot);2141goto errout;2142}21432144/*2145* Check the validity of the mountpoint and direct the user to use the2146* '-m' mountpoint option if it looks like its in use.2147*/2148if (mountpoint == NULL ||2149(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&2150strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {2151char buf[MAXPATHLEN];2152DIR *dirp;21532154if (mountpoint && mountpoint[0] != '/') {2155(void) fprintf(stderr, gettext("invalid mountpoint "2156"'%s': must be an absolute path, 'legacy', or "2157"'none'\n"), mountpoint);2158goto errout;2159}21602161if (mountpoint == NULL) {2162if (altroot != NULL)2163(void) snprintf(buf, sizeof (buf), "%s/%s",2164altroot, poolname);2165else2166(void) snprintf(buf, sizeof (buf), "/%s",2167poolname);2168} else {2169if (altroot != NULL)2170(void) snprintf(buf, sizeof (buf), "%s%s",2171altroot, mountpoint);2172else2173(void) snprintf(buf, sizeof (buf), "%s",2174mountpoint);2175}21762177if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {2178(void) fprintf(stderr, gettext("mountpoint '%s' : "2179"%s\n"), buf, strerror(errno));2180(void) fprintf(stderr, gettext("use '-m' "2181"option to provide a different default\n"));2182goto errout;2183} else if (dirp) {2184int count = 0;21852186while (count < 3 && readdir(dirp) != NULL)2187count++;2188(void) closedir(dirp);21892190if (count > 2) {2191(void) fprintf(stderr, gettext("mountpoint "2192"'%s' exists and is not empty\n"), buf);2193(void) fprintf(stderr, gettext("use '-m' "2194"option to provide a "2195"different default\n"));2196goto errout;2197}2198}2199}22002201/*2202* Now that the mountpoint's validity has been checked, ensure that2203* the property is set appropriately prior to creating the pool.2204*/2205if (mountpoint != NULL) {2206ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),2207mountpoint, &fsprops, B_FALSE);2208if (ret != 0)2209goto errout;2210}22112212ret = 1;2213if (dryrun) {2214/*2215* For a dry run invocation, print out a basic message and run2216* through all the vdevs in the list and print out in an2217* appropriate hierarchy.2218*/2219(void) printf(gettext("would create '%s' with the "2220"following layout:\n\n"), poolname);22212222print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);2223print_vdev_tree(NULL, "dedup", nvroot, 0,2224VDEV_ALLOC_BIAS_DEDUP, 0);2225print_vdev_tree(NULL, "special", nvroot, 0,2226VDEV_ALLOC_BIAS_SPECIAL, 0);2227print_vdev_tree(NULL, "logs", nvroot, 0,2228VDEV_ALLOC_BIAS_LOG, 0);2229print_cache_list(nvroot, 0);2230print_spare_list(nvroot, 0);22312232ret = 0;2233} else {2234/*2235* Load in feature set.2236* Note: if compatibility property not given, we'll have2237* NULL, which means 'all features'.2238*/2239boolean_t requested_features[SPA_FEATURES];2240if (zpool_do_load_compat(compat, requested_features) !=2241ZPOOL_COMPATIBILITY_OK)2242goto errout;22432244/*2245* props contains list of features to enable.2246* For each feature:2247* - remove it if feature@name=disabled2248* - leave it there if feature@name=enabled2249* - add it if:2250* - enable_pool_features (ie: no '-d' or '-o version')2251* - it's supported by the kernel module2252* - it's in the requested feature set2253* - warn if it's enabled but not in compat2254*/2255for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {2256char propname[MAXPATHLEN];2257const char *propval;2258zfeature_info_t *feat = &spa_feature_table[i];22592260(void) snprintf(propname, sizeof (propname),2261"feature@%s", feat->fi_uname);22622263if (!nvlist_lookup_string(props, propname, &propval)) {2264if (strcmp(propval,2265ZFS_FEATURE_DISABLED) == 0) {2266(void) nvlist_remove_all(props,2267propname);2268} else if (strcmp(propval,2269ZFS_FEATURE_ENABLED) == 0 &&2270!requested_features[i]) {2271(void) fprintf(stderr, gettext(2272"Warning: feature \"%s\" enabled "2273"but is not in specified "2274"'compatibility' feature set.\n"),2275feat->fi_uname);2276}2277} else if (2278enable_pool_features &&2279feat->fi_zfs_mod_supported &&2280requested_features[i]) {2281ret = add_prop_list(propname,2282ZFS_FEATURE_ENABLED, &props, B_TRUE);2283if (ret != 0)2284goto errout;2285}2286}22872288ret = 1;2289if (zpool_create(g_zfs, poolname,2290nvroot, props, fsprops) == 0) {2291zfs_handle_t *pool = zfs_open(g_zfs,2292tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);2293if (pool != NULL) {2294if (zfs_mount(pool, NULL, 0) == 0) {2295ret = zfs_share(pool, NULL);2296zfs_commit_shares(NULL);2297}2298zfs_close(pool);2299}2300} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {2301(void) fprintf(stderr, gettext("pool name may have "2302"been omitted\n"));2303}2304}23052306errout:2307nvlist_free(nvroot);2308nvlist_free(fsprops);2309nvlist_free(props);2310return (ret);2311badusage:2312nvlist_free(fsprops);2313nvlist_free(props);2314usage(B_FALSE);2315return (2);2316}23172318/*2319* zpool destroy <pool>2320*2321* -f Forcefully unmount any datasets2322*2323* Destroy the given pool. Automatically unmounts any datasets in the pool.2324*/2325int2326zpool_do_destroy(int argc, char **argv)2327{2328boolean_t force = B_FALSE;2329int c;2330char *pool;2331zpool_handle_t *zhp;2332int ret;23332334/* check options */2335while ((c = getopt(argc, argv, "f")) != -1) {2336switch (c) {2337case 'f':2338force = B_TRUE;2339break;2340case '?':2341(void) fprintf(stderr, gettext("invalid option '%c'\n"),2342optopt);2343usage(B_FALSE);2344}2345}23462347argc -= optind;2348argv += optind;23492350/* check arguments */2351if (argc < 1) {2352(void) fprintf(stderr, gettext("missing pool argument\n"));2353usage(B_FALSE);2354}2355if (argc > 1) {2356(void) fprintf(stderr, gettext("too many arguments\n"));2357usage(B_FALSE);2358}23592360pool = argv[0];23612362if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {2363/*2364* As a special case, check for use of '/' in the name, and2365* direct the user to use 'zfs destroy' instead.2366*/2367if (strchr(pool, '/') != NULL)2368(void) fprintf(stderr, gettext("use 'zfs destroy' to "2369"destroy a dataset\n"));2370return (1);2371}23722373if (zpool_disable_datasets(zhp, force) != 0) {2374(void) fprintf(stderr, gettext("could not destroy '%s': "2375"could not unmount datasets\n"), zpool_get_name(zhp));2376zpool_close(zhp);2377return (1);2378}23792380/* The history must be logged as part of the export */2381log_history = B_FALSE;23822383ret = (zpool_destroy(zhp, history_str) != 0);23842385zpool_close(zhp);23862387return (ret);2388}23892390typedef struct export_cbdata {2391tpool_t *tpool;2392pthread_mutex_t mnttab_lock;2393boolean_t force;2394boolean_t hardforce;2395int retval;2396} export_cbdata_t;239723982399typedef struct {2400char *aea_poolname;2401export_cbdata_t *aea_cbdata;2402} async_export_args_t;24032404/*2405* Export one pool2406*/2407static int2408zpool_export_one(zpool_handle_t *zhp, void *data)2409{2410export_cbdata_t *cb = data;24112412/*2413* zpool_disable_datasets() is not thread-safe for mnttab access.2414* So we serialize access here for 'zpool export -a' parallel case.2415*/2416if (cb->tpool != NULL)2417pthread_mutex_lock(&cb->mnttab_lock);24182419int retval = zpool_disable_datasets(zhp, cb->force);24202421if (cb->tpool != NULL)2422pthread_mutex_unlock(&cb->mnttab_lock);24232424if (retval)2425return (1);24262427if (cb->hardforce) {2428if (zpool_export_force(zhp, history_str) != 0)2429return (1);2430} else if (zpool_export(zhp, cb->force, history_str) != 0) {2431return (1);2432}24332434return (0);2435}24362437/*2438* Asynchronous export request2439*/2440static void2441zpool_export_task(void *arg)2442{2443async_export_args_t *aea = arg;24442445zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);2446if (zhp != NULL) {2447int ret = zpool_export_one(zhp, aea->aea_cbdata);2448if (ret != 0)2449aea->aea_cbdata->retval = ret;2450zpool_close(zhp);2451} else {2452aea->aea_cbdata->retval = 1;2453}24542455free(aea->aea_poolname);2456free(aea);2457}24582459/*2460* Process an export request in parallel2461*/2462static int2463zpool_export_one_async(zpool_handle_t *zhp, void *data)2464{2465tpool_t *tpool = ((export_cbdata_t *)data)->tpool;2466async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));24672468/* save pool name since zhp will go out of scope */2469aea->aea_poolname = strdup(zpool_get_name(zhp));2470aea->aea_cbdata = data;24712472/* ship off actual export to another thread */2473if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0)2474return (errno); /* unlikely */2475else2476return (0);2477}24782479/*2480* zpool export [-f] <pool> ...2481*2482* -a Export all pools2483* -f Forcefully unmount datasets2484*2485* Export the given pools. By default, the command will attempt to cleanly2486* unmount any active datasets within the pool. If the '-f' flag is specified,2487* then the datasets will be forcefully unmounted.2488*/2489int2490zpool_do_export(int argc, char **argv)2491{2492export_cbdata_t cb;2493boolean_t do_all = B_FALSE;2494boolean_t force = B_FALSE;2495boolean_t hardforce = B_FALSE;2496int c, ret;24972498/* check options */2499while ((c = getopt(argc, argv, "afF")) != -1) {2500switch (c) {2501case 'a':2502do_all = B_TRUE;2503break;2504case 'f':2505force = B_TRUE;2506break;2507case 'F':2508hardforce = B_TRUE;2509break;2510case '?':2511(void) fprintf(stderr, gettext("invalid option '%c'\n"),2512optopt);2513usage(B_FALSE);2514}2515}25162517cb.force = force;2518cb.hardforce = hardforce;2519cb.tpool = NULL;2520cb.retval = 0;2521argc -= optind;2522argv += optind;25232524/* The history will be logged as part of the export itself */2525log_history = B_FALSE;25262527if (do_all) {2528if (argc != 0) {2529(void) fprintf(stderr, gettext("too many arguments\n"));2530usage(B_FALSE);2531}25322533cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),25340, NULL);2535pthread_mutex_init(&cb.mnttab_lock, NULL);25362537/* Asynchronously call zpool_export_one using thread pool */2538ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,2539B_FALSE, zpool_export_one_async, &cb);25402541tpool_wait(cb.tpool);2542tpool_destroy(cb.tpool);2543(void) pthread_mutex_destroy(&cb.mnttab_lock);25442545return (ret | cb.retval);2546}25472548/* check arguments */2549if (argc < 1) {2550(void) fprintf(stderr, gettext("missing pool argument\n"));2551usage(B_FALSE);2552}25532554ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,2555B_FALSE, zpool_export_one, &cb);25562557return (ret);2558}25592560/*2561* Given a vdev configuration, determine the maximum width needed for the device2562* name column.2563*/2564static int2565max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,2566int name_flags)2567{2568static const char *const subtypes[] =2569{ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};25702571char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);2572max = MAX(strlen(name) + depth, max);2573free(name);25742575nvlist_t **child;2576uint_t children;2577for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)2578if (nvlist_lookup_nvlist_array(nv, subtypes[i],2579&child, &children) == 0)2580for (uint_t c = 0; c < children; ++c)2581max = MAX(max_width(zhp, child[c], depth + 2,2582max, name_flags), max);25832584return (max);2585}25862587typedef struct status_cbdata {2588int cb_count;2589int cb_name_flags;2590int cb_namewidth;2591boolean_t cb_allpools;2592boolean_t cb_verbose;2593boolean_t cb_literal;2594boolean_t cb_explain;2595boolean_t cb_first;2596boolean_t cb_dedup_stats;2597boolean_t cb_print_unhealthy;2598boolean_t cb_print_status;2599boolean_t cb_print_slow_ios;2600boolean_t cb_print_dio_verify;2601boolean_t cb_print_vdev_init;2602boolean_t cb_print_vdev_trim;2603vdev_cmd_data_list_t *vcdl;2604boolean_t cb_print_power;2605boolean_t cb_json;2606boolean_t cb_flat_vdevs;2607nvlist_t *cb_jsobj;2608boolean_t cb_json_as_int;2609boolean_t cb_json_pool_key_guid;2610} status_cbdata_t;26112612/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */2613static boolean_t2614is_blank_str(const char *str)2615{2616for (; str != NULL && *str != '\0'; ++str)2617if (!isblank(*str))2618return (B_FALSE);2619return (B_TRUE);2620}26212622static void2623zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,2624nvlist_t *item)2625{2626vdev_cmd_data_t *data;2627int i, j, k = 1;2628char tmp[256];2629const char *val;26302631for (i = 0; i < vcdl->count; i++) {2632if ((strcmp(vcdl->data[i].path, path) != 0) ||2633(strcmp(vcdl->data[i].pool, pool) != 0))2634continue;26352636data = &vcdl->data[i];2637for (j = 0; j < vcdl->uniq_cols_cnt; j++) {2638val = NULL;2639for (int k = 0; k < data->cols_cnt; k++) {2640if (strcmp(data->cols[k],2641vcdl->uniq_cols[j]) == 0) {2642val = data->lines[k];2643break;2644}2645}2646if (val == NULL || is_blank_str(val))2647val = "-";2648fnvlist_add_string(item, vcdl->uniq_cols[j], val);2649}26502651for (j = data->cols_cnt; j < data->lines_cnt; j++) {2652if (data->lines[j]) {2653snprintf(tmp, 256, "extra_%d", k++);2654fnvlist_add_string(item, tmp,2655data->lines[j]);2656}2657}2658break;2659}2660}26612662/* Print command output lines for specific vdev in a specific pool */2663static void2664zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)2665{2666vdev_cmd_data_t *data;2667int i, j;2668const char *val;26692670for (i = 0; i < vcdl->count; i++) {2671if ((strcmp(vcdl->data[i].path, path) != 0) ||2672(strcmp(vcdl->data[i].pool, pool) != 0)) {2673/* Not the vdev we're looking for */2674continue;2675}26762677data = &vcdl->data[i];2678/* Print out all the output values for this vdev */2679for (j = 0; j < vcdl->uniq_cols_cnt; j++) {2680val = NULL;2681/* Does this vdev have values for this column? */2682for (int k = 0; k < data->cols_cnt; k++) {2683if (strcmp(data->cols[k],2684vcdl->uniq_cols[j]) == 0) {2685/* yes it does, record the value */2686val = data->lines[k];2687break;2688}2689}2690/*2691* Mark empty values with dashes to make output2692* awk-able.2693*/2694if (val == NULL || is_blank_str(val))2695val = "-";26962697printf("%*s", vcdl->uniq_cols_width[j], val);2698if (j < vcdl->uniq_cols_cnt - 1)2699fputs(" ", stdout);2700}27012702/* Print out any values that aren't in a column at the end */2703for (j = data->cols_cnt; j < data->lines_cnt; j++) {2704/* Did we have any columns? If so print a spacer. */2705if (vcdl->uniq_cols_cnt > 0)2706fputs(" ", stdout);27072708val = data->lines[j];2709fputs(val ?: "", stdout);2710}2711break;2712}2713}27142715/*2716* Print vdev initialization status for leaves2717*/2718static void2719print_status_initialize(vdev_stat_t *vs, boolean_t verbose)2720{2721if (verbose) {2722if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||2723vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||2724vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&2725!vs->vs_scan_removing) {2726char zbuf[1024];2727char tbuf[256];27282729time_t t = vs->vs_initialize_action_time;2730int initialize_pct = 100;2731if (vs->vs_initialize_state !=2732VDEV_INITIALIZE_COMPLETE) {2733initialize_pct = (vs->vs_initialize_bytes_done *2734100 / (vs->vs_initialize_bytes_est + 1));2735}27362737(void) ctime_r(&t, tbuf);2738tbuf[24] = 0;27392740switch (vs->vs_initialize_state) {2741case VDEV_INITIALIZE_SUSPENDED:2742(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2743gettext("suspended, started at"), tbuf);2744break;2745case VDEV_INITIALIZE_ACTIVE:2746(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2747gettext("started at"), tbuf);2748break;2749case VDEV_INITIALIZE_COMPLETE:2750(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2751gettext("completed at"), tbuf);2752break;2753}27542755(void) printf(gettext(" (%d%% initialized%s)"),2756initialize_pct, zbuf);2757} else {2758(void) printf(gettext(" (uninitialized)"));2759}2760} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {2761(void) printf(gettext(" (initializing)"));2762}2763}27642765/*2766* Print vdev TRIM status for leaves2767*/2768static void2769print_status_trim(vdev_stat_t *vs, boolean_t verbose)2770{2771if (verbose) {2772if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||2773vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||2774vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&2775!vs->vs_scan_removing) {2776char zbuf[1024];2777char tbuf[256];27782779time_t t = vs->vs_trim_action_time;2780int trim_pct = 100;2781if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {2782trim_pct = (vs->vs_trim_bytes_done *2783100 / (vs->vs_trim_bytes_est + 1));2784}27852786(void) ctime_r(&t, tbuf);2787tbuf[24] = 0;27882789switch (vs->vs_trim_state) {2790case VDEV_TRIM_SUSPENDED:2791(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2792gettext("suspended, started at"), tbuf);2793break;2794case VDEV_TRIM_ACTIVE:2795(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2796gettext("started at"), tbuf);2797break;2798case VDEV_TRIM_COMPLETE:2799(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",2800gettext("completed at"), tbuf);2801break;2802}28032804(void) printf(gettext(" (%d%% trimmed%s)"),2805trim_pct, zbuf);2806} else if (vs->vs_trim_notsup) {2807(void) printf(gettext(" (trim unsupported)"));2808} else {2809(void) printf(gettext(" (untrimmed)"));2810}2811} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {2812(void) printf(gettext(" (trimming)"));2813}2814}28152816/*2817* Return the color associated with a health string. This includes returning2818* NULL for no color change.2819*/2820static const char *2821health_str_to_color(const char *health)2822{2823if (strcmp(health, gettext("FAULTED")) == 0 ||2824strcmp(health, gettext("SUSPENDED")) == 0 ||2825strcmp(health, gettext("UNAVAIL")) == 0) {2826return (ANSI_RED);2827}28282829if (strcmp(health, gettext("OFFLINE")) == 0 ||2830strcmp(health, gettext("DEGRADED")) == 0 ||2831strcmp(health, gettext("REMOVED")) == 0) {2832return (ANSI_YELLOW);2833}28342835return (NULL);2836}28372838/*2839* Called for each leaf vdev. Returns 0 if the vdev is healthy.2840* A vdev is unhealthy if any of the following are true:2841* 1) there are read, write, or checksum errors,2842* 2) its state is not ONLINE, or2843* 3) slow IO reporting was requested (-s) and there are slow IOs.2844*/2845static int2846vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)2847{2848status_cbdata_t *cb = data;2849vdev_stat_t *vs;2850uint_t vsc;2851(void) hdl_data;28522853if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,2854(uint64_t **)&vs, &vsc) != 0)2855return (1);28562857if (vs->vs_checksum_errors || vs->vs_read_errors ||2858vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)2859return (1);28602861if (cb->cb_print_slow_ios && vs->vs_slow_ios)2862return (1);28632864return (0);2865}28662867/*2868* Print out configuration state as requested by status_callback.2869*/2870static void2871print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,2872nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)2873{2874nvlist_t **child, *root;2875uint_t c, i, vsc, children;2876pool_scan_stat_t *ps = NULL;2877vdev_stat_t *vs;2878char rbuf[6], wbuf[6], cbuf[6], dbuf[6];2879char *vname;2880uint64_t notpresent;2881spare_cbdata_t spare_cb;2882const char *state;2883const char *type;2884const char *path = NULL;2885const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,2886*scolor = NULL;28872888if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,2889&child, &children) != 0)2890children = 0;28912892verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,2893(uint64_t **)&vs, &vsc) == 0);28942895verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);28962897if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)2898return;28992900state = zpool_state_to_name(vs->vs_state, vs->vs_aux);29012902if (isspare) {2903/*2904* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for2905* online drives.2906*/2907if (vs->vs_aux == VDEV_AUX_SPARED)2908state = gettext("INUSE");2909else if (vs->vs_state == VDEV_STATE_HEALTHY)2910state = gettext("AVAIL");2911}29122913/*2914* If '-e' is specified then top-level vdevs and their children2915* can be pruned if all of their leaves are healthy.2916*/2917if (cb->cb_print_unhealthy && depth > 0 &&2918for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {2919return;2920}29212922printf_color(health_str_to_color(state),2923"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,2924name, state);29252926if (!isspare) {2927if (vs->vs_read_errors)2928rcolor = ANSI_RED;29292930if (vs->vs_write_errors)2931wcolor = ANSI_RED;29322933if (vs->vs_checksum_errors)2934ccolor = ANSI_RED;29352936if (vs->vs_slow_ios)2937scolor = ANSI_BLUE;29382939if (cb->cb_literal) {2940fputc(' ', stdout);2941printf_color(rcolor, "%5llu",2942(u_longlong_t)vs->vs_read_errors);2943fputc(' ', stdout);2944printf_color(wcolor, "%5llu",2945(u_longlong_t)vs->vs_write_errors);2946fputc(' ', stdout);2947printf_color(ccolor, "%5llu",2948(u_longlong_t)vs->vs_checksum_errors);2949} else {2950zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));2951zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));2952zfs_nicenum(vs->vs_checksum_errors, cbuf,2953sizeof (cbuf));2954fputc(' ', stdout);2955printf_color(rcolor, "%5s", rbuf);2956fputc(' ', stdout);2957printf_color(wcolor, "%5s", wbuf);2958fputc(' ', stdout);2959printf_color(ccolor, "%5s", cbuf);2960}2961if (cb->cb_print_slow_ios) {2962if (children == 0) {2963/* Only leafs vdevs have slow IOs */2964zfs_nicenum(vs->vs_slow_ios, rbuf,2965sizeof (rbuf));2966} else {2967snprintf(rbuf, sizeof (rbuf), "-");2968}29692970if (cb->cb_literal)2971printf_color(scolor, " %5llu",2972(u_longlong_t)vs->vs_slow_ios);2973else2974printf_color(scolor, " %5s", rbuf);2975}2976if (cb->cb_print_power) {2977if (children == 0) {2978/* Only leaf vdevs have physical slots */2979switch (zpool_power_current_state(zhp, (char *)2980fnvlist_lookup_string(nv,2981ZPOOL_CONFIG_PATH))) {2982case 0:2983printf_color(ANSI_RED, " %5s",2984gettext("off"));2985break;2986case 1:2987printf(" %5s", gettext("on"));2988break;2989default:2990printf(" %5s", "-");2991}2992} else {2993printf(" %5s", "-");2994}2995}2996if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&2997cb->cb_print_dio_verify) {2998zfs_nicenum(vs->vs_dio_verify_errors, dbuf,2999sizeof (dbuf));30003001if (cb->cb_literal)3002printf(" %5llu",3003(u_longlong_t)vs->vs_dio_verify_errors);3004else3005printf(" %5s", dbuf);3006}3007}30083009if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,3010¬present) == 0) {3011verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);3012(void) printf(" %s %s", gettext("was"), path);3013} else if (vs->vs_aux != 0) {3014(void) printf(" ");3015color_start(ANSI_RED);3016switch (vs->vs_aux) {3017case VDEV_AUX_OPEN_FAILED:3018(void) printf(gettext("cannot open"));3019break;30203021case VDEV_AUX_BAD_GUID_SUM:3022(void) printf(gettext("missing device"));3023break;30243025case VDEV_AUX_NO_REPLICAS:3026(void) printf(gettext("insufficient replicas"));3027break;30283029case VDEV_AUX_VERSION_NEWER:3030(void) printf(gettext("newer version"));3031break;30323033case VDEV_AUX_UNSUP_FEAT:3034(void) printf(gettext("unsupported feature(s)"));3035break;30363037case VDEV_AUX_ASHIFT_TOO_BIG:3038(void) printf(gettext("unsupported minimum blocksize"));3039break;30403041case VDEV_AUX_SPARED:3042verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,3043&spare_cb.cb_guid) == 0);3044if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {3045if (strcmp(zpool_get_name(spare_cb.cb_zhp),3046zpool_get_name(zhp)) == 0)3047(void) printf(gettext("currently in "3048"use"));3049else3050(void) printf(gettext("in use by "3051"pool '%s'"),3052zpool_get_name(spare_cb.cb_zhp));3053zpool_close(spare_cb.cb_zhp);3054} else {3055(void) printf(gettext("currently in use"));3056}3057break;30583059case VDEV_AUX_ERR_EXCEEDED:3060if (vs->vs_read_errors + vs->vs_write_errors +3061vs->vs_checksum_errors == 0 && children == 0 &&3062vs->vs_slow_ios > 0) {3063(void) printf(gettext("too many slow I/Os"));3064} else {3065(void) printf(gettext("too many errors"));3066}3067break;30683069case VDEV_AUX_IO_FAILURE:3070(void) printf(gettext("experienced I/O failures"));3071break;30723073case VDEV_AUX_BAD_LOG:3074(void) printf(gettext("bad intent log"));3075break;30763077case VDEV_AUX_EXTERNAL:3078(void) printf(gettext("external device fault"));3079break;30803081case VDEV_AUX_SPLIT_POOL:3082(void) printf(gettext("split into new pool"));3083break;30843085case VDEV_AUX_ACTIVE:3086(void) printf(gettext("currently in use"));3087break;30883089case VDEV_AUX_CHILDREN_OFFLINE:3090(void) printf(gettext("all children offline"));3091break;30923093case VDEV_AUX_BAD_LABEL:3094(void) printf(gettext("invalid label"));3095break;30963097default:3098(void) printf(gettext("corrupted data"));3099break;3100}3101color_end();3102} else if (children == 0 && !isspare &&3103getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&3104VDEV_STAT_VALID(vs_physical_ashift, vsc) &&3105vs->vs_configured_ashift < vs->vs_physical_ashift) {3106(void) printf(3107gettext(" block size: %dB configured, %dB native"),31081 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);3109}31103111if (vs->vs_scan_removing != 0) {3112(void) printf(gettext(" (removing)"));3113} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {3114(void) printf(gettext(" (non-allocating)"));3115}31163117/* The root vdev has the scrub/resilver stats */3118root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),3119ZPOOL_CONFIG_VDEV_TREE);3120(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,3121(uint64_t **)&ps, &c);31223123/*3124* If you force fault a drive that's resilvering, its scan stats can3125* get frozen in time, giving the false impression that it's3126* being resilvered. That's why we check the state to see if the vdev3127* is healthy before reporting "resilvering" or "repairing".3128*/3129if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&3130vs->vs_state == VDEV_STATE_HEALTHY) {3131if (vs->vs_scan_processed != 0) {3132(void) printf(gettext(" (%s)"),3133(ps->pss_func == POOL_SCAN_RESILVER) ?3134"resilvering" : "repairing");3135} else if (vs->vs_resilver_deferred) {3136(void) printf(gettext(" (awaiting resilver)"));3137}3138}31393140/* The top-level vdevs have the rebuild stats */3141if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&3142children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {3143if (vs->vs_rebuild_processed != 0) {3144(void) printf(gettext(" (resilvering)"));3145}3146}31473148if (cb->vcdl != NULL) {3149if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {3150printf(" ");3151zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);3152}3153}31543155/* Display vdev initialization and trim status for leaves. */3156if (children == 0) {3157print_status_initialize(vs, cb->cb_print_vdev_init);3158print_status_trim(vs, cb->cb_print_vdev_trim);3159}31603161(void) printf("\n");31623163for (c = 0; c < children; c++) {3164uint64_t islog = B_FALSE, ishole = B_FALSE;31653166/* Don't print logs or holes here */3167(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,3168&islog);3169(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,3170&ishole);3171if (islog || ishole)3172continue;3173/* Only print normal classes here */3174if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))3175continue;31763177/* Provide vdev_rebuild_stats to children if available */3178if (vrs == NULL) {3179(void) nvlist_lookup_uint64_array(nv,3180ZPOOL_CONFIG_REBUILD_STATS,3181(uint64_t **)&vrs, &i);3182}31833184vname = zpool_vdev_name(g_zfs, zhp, child[c],3185cb->cb_name_flags | VDEV_NAME_TYPE_ID);3186print_status_config(zhp, cb, vname, child[c], depth + 2,3187isspare, vrs);3188free(vname);3189}3190}31913192/*3193* Print the configuration of an exported pool. Iterate over all vdevs in the3194* pool, printing out the name and status for each one.3195*/3196static void3197print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,3198int depth)3199{3200nvlist_t **child;3201uint_t c, children;3202vdev_stat_t *vs;3203const char *type;3204char *vname;32053206verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);3207if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||3208strcmp(type, VDEV_TYPE_HOLE) == 0)3209return;32103211verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,3212(uint64_t **)&vs, &c) == 0);32133214(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);3215(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));32163217if (vs->vs_aux != 0) {3218(void) printf(" ");32193220switch (vs->vs_aux) {3221case VDEV_AUX_OPEN_FAILED:3222(void) printf(gettext("cannot open"));3223break;32243225case VDEV_AUX_BAD_GUID_SUM:3226(void) printf(gettext("missing device"));3227break;32283229case VDEV_AUX_NO_REPLICAS:3230(void) printf(gettext("insufficient replicas"));3231break;32323233case VDEV_AUX_VERSION_NEWER:3234(void) printf(gettext("newer version"));3235break;32363237case VDEV_AUX_UNSUP_FEAT:3238(void) printf(gettext("unsupported feature(s)"));3239break;32403241case VDEV_AUX_ERR_EXCEEDED:3242(void) printf(gettext("too many errors"));3243break;32443245case VDEV_AUX_ACTIVE:3246(void) printf(gettext("currently in use"));3247break;32483249case VDEV_AUX_CHILDREN_OFFLINE:3250(void) printf(gettext("all children offline"));3251break;32523253case VDEV_AUX_BAD_LABEL:3254(void) printf(gettext("invalid label"));3255break;32563257default:3258(void) printf(gettext("corrupted data"));3259break;3260}3261}3262(void) printf("\n");32633264if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,3265&child, &children) != 0)3266return;32673268for (c = 0; c < children; c++) {3269uint64_t is_log = B_FALSE;32703271(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,3272&is_log);3273if (is_log)3274continue;3275if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))3276continue;32773278vname = zpool_vdev_name(g_zfs, NULL, child[c],3279cb->cb_name_flags | VDEV_NAME_TYPE_ID);3280print_import_config(cb, vname, child[c], depth + 2);3281free(vname);3282}32833284if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,3285&child, &children) == 0) {3286(void) printf(gettext("\tcache\n"));3287for (c = 0; c < children; c++) {3288vname = zpool_vdev_name(g_zfs, NULL, child[c],3289cb->cb_name_flags);3290(void) printf("\t %s\n", vname);3291free(vname);3292}3293}32943295if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,3296&child, &children) == 0) {3297(void) printf(gettext("\tspares\n"));3298for (c = 0; c < children; c++) {3299vname = zpool_vdev_name(g_zfs, NULL, child[c],3300cb->cb_name_flags);3301(void) printf("\t %s\n", vname);3302free(vname);3303}3304}3305}33063307/*3308* Print specialized class vdevs.3309*3310* These are recorded as top level vdevs in the main pool child array3311* but with "is_log" set to 1 or an "alloc_bias" string. We use either3312* print_status_config() or print_import_config() to print the top level3313* class vdevs then any of their children (eg mirrored slogs) are printed3314* recursively - which works because only the top level vdev is marked.3315*/3316static void3317print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,3318const char *class)3319{3320uint_t c, children;3321nvlist_t **child;3322boolean_t printed = B_FALSE;33233324assert(zhp != NULL || !cb->cb_verbose);33253326if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,3327&children) != 0)3328return;33293330for (c = 0; c < children; c++) {3331uint64_t is_log = B_FALSE;3332const char *bias = NULL;3333const char *type = NULL;33343335(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,3336&is_log);33373338if (is_log) {3339bias = (char *)VDEV_ALLOC_CLASS_LOGS;3340} else {3341(void) nvlist_lookup_string(child[c],3342ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);3343(void) nvlist_lookup_string(child[c],3344ZPOOL_CONFIG_TYPE, &type);3345}33463347if (bias == NULL || strcmp(bias, class) != 0)3348continue;3349if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)3350continue;33513352if (!printed) {3353(void) printf("\t%s\t\n", gettext(class));3354printed = B_TRUE;3355}33563357char *name = zpool_vdev_name(g_zfs, zhp, child[c],3358cb->cb_name_flags | VDEV_NAME_TYPE_ID);3359if (cb->cb_print_status)3360print_status_config(zhp, cb, name, child[c], 2,3361B_FALSE, NULL);3362else3363print_import_config(cb, name, child[c], 2);3364free(name);3365}3366}33673368/*3369* Display the status for the given pool.3370*/3371static int3372show_import(nvlist_t *config, boolean_t report_error)3373{3374uint64_t pool_state;3375vdev_stat_t *vs;3376const char *name;3377uint64_t guid;3378uint64_t hostid = 0;3379const char *msgid;3380const char *hostname = "unknown";3381nvlist_t *nvroot, *nvinfo;3382zpool_status_t reason;3383zpool_errata_t errata;3384const char *health;3385uint_t vsc;3386const char *comment;3387const char *indent;3388char buf[2048];3389status_cbdata_t cb = { 0 };33903391verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,3392&name) == 0);3393verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,3394&guid) == 0);3395verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,3396&pool_state) == 0);3397verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,3398&nvroot) == 0);33993400verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,3401(uint64_t **)&vs, &vsc) == 0);3402health = zpool_state_to_name(vs->vs_state, vs->vs_aux);34033404reason = zpool_import_status(config, &msgid, &errata);34053406/*3407* If we're importing using a cachefile, then we won't report any3408* errors unless we are in the scan phase of the import.3409*/3410if (reason != ZPOOL_STATUS_OK && !report_error)3411return (reason);34123413if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {3414indent = " ";3415} else {3416comment = NULL;3417indent = "";3418}34193420(void) printf(gettext("%s pool: %s\n"), indent, name);3421(void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);3422(void) printf(gettext("%s state: %s"), indent, health);3423if (pool_state == POOL_STATE_DESTROYED)3424(void) printf(gettext(" (DESTROYED)"));3425(void) printf("\n");34263427if (reason != ZPOOL_STATUS_OK) {3428(void) printf("%s", indent);3429printf_color(ANSI_BOLD, gettext("status: "));3430}3431switch (reason) {3432case ZPOOL_STATUS_MISSING_DEV_R:3433case ZPOOL_STATUS_MISSING_DEV_NR:3434case ZPOOL_STATUS_BAD_GUID_SUM:3435printf_color(ANSI_YELLOW, gettext("One or more devices are "3436"missing from the system.\n"));3437break;34383439case ZPOOL_STATUS_CORRUPT_LABEL_R:3440case ZPOOL_STATUS_CORRUPT_LABEL_NR:3441printf_color(ANSI_YELLOW, gettext("One or more devices "3442"contains corrupted data.\n"));3443break;34443445case ZPOOL_STATUS_CORRUPT_DATA:3446printf_color(ANSI_YELLOW, gettext("The pool data is "3447"corrupted.\n"));3448break;34493450case ZPOOL_STATUS_OFFLINE_DEV:3451printf_color(ANSI_YELLOW, gettext("One or more devices "3452"are offlined.\n"));3453break;34543455case ZPOOL_STATUS_CORRUPT_POOL:3456printf_color(ANSI_YELLOW, gettext("The pool metadata is "3457"corrupted.\n"));3458break;34593460case ZPOOL_STATUS_VERSION_OLDER:3461printf_color(ANSI_YELLOW, gettext("The pool is formatted using "3462"a legacy on-disk version.\n"));3463break;34643465case ZPOOL_STATUS_VERSION_NEWER:3466printf_color(ANSI_YELLOW, gettext("The pool is formatted using "3467"an incompatible version.\n"));3468break;34693470case ZPOOL_STATUS_FEAT_DISABLED:3471printf_color(ANSI_YELLOW, gettext("Some supported "3472"features are not enabled on the pool.\n"3473"\t%s(Note that they may be intentionally disabled if the\n"3474"\t%s'compatibility' property is set.)\n"), indent, indent);3475break;34763477case ZPOOL_STATUS_COMPATIBILITY_ERR:3478printf_color(ANSI_YELLOW, gettext("Error reading or parsing "3479"the file(s) indicated by the 'compatibility'\n"3480"\t%sproperty.\n"), indent);3481break;34823483case ZPOOL_STATUS_INCOMPATIBLE_FEAT:3484printf_color(ANSI_YELLOW, gettext("One or more features "3485"are enabled on the pool despite not being\n"3486"\t%srequested by the 'compatibility' property.\n"),3487indent);3488break;34893490case ZPOOL_STATUS_UNSUP_FEAT_READ:3491printf_color(ANSI_YELLOW, gettext("The pool uses the following "3492"feature(s) not supported on this system:\n"));3493color_start(ANSI_YELLOW);3494zpool_collect_unsup_feat(config, buf, 2048);3495(void) printf("%s", buf);3496color_end();3497break;34983499case ZPOOL_STATUS_UNSUP_FEAT_WRITE:3500printf_color(ANSI_YELLOW, gettext("The pool can only be "3501"accessed in read-only mode on this system. It\n"3502"\t%scannot be accessed in read-write mode because it uses "3503"the following\n"3504"\t%sfeature(s) not supported on this system:\n"),3505indent, indent);3506color_start(ANSI_YELLOW);3507zpool_collect_unsup_feat(config, buf, 2048);3508(void) printf("%s", buf);3509color_end();3510break;35113512case ZPOOL_STATUS_HOSTID_ACTIVE:3513printf_color(ANSI_YELLOW, gettext("The pool is currently "3514"imported by another system.\n"));3515break;35163517case ZPOOL_STATUS_HOSTID_REQUIRED:3518printf_color(ANSI_YELLOW, gettext("The pool has the "3519"multihost property on. It cannot\n"3520"\t%sbe safely imported when the system hostid is not "3521"set.\n"), indent);3522break;35233524case ZPOOL_STATUS_HOSTID_MISMATCH:3525printf_color(ANSI_YELLOW, gettext("The pool was last accessed "3526"by another system.\n"));3527break;35283529case ZPOOL_STATUS_FAULTED_DEV_R:3530case ZPOOL_STATUS_FAULTED_DEV_NR:3531printf_color(ANSI_YELLOW, gettext("One or more devices are "3532"faulted.\n"));3533break;35343535case ZPOOL_STATUS_BAD_LOG:3536printf_color(ANSI_YELLOW, gettext("An intent log record cannot "3537"be read.\n"));3538break;35393540case ZPOOL_STATUS_RESILVERING:3541case ZPOOL_STATUS_REBUILDING:3542printf_color(ANSI_YELLOW, gettext("One or more devices were "3543"being resilvered.\n"));3544break;35453546case ZPOOL_STATUS_ERRATA:3547printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),3548errata);3549break;35503551case ZPOOL_STATUS_NON_NATIVE_ASHIFT:3552printf_color(ANSI_YELLOW, gettext("One or more devices are "3553"configured to use a non-native block size.\n"3554"\t%sExpect reduced performance.\n"), indent);3555break;35563557default:3558/*3559* No other status can be seen when importing pools.3560*/3561assert(reason == ZPOOL_STATUS_OK);3562}35633564/*3565* Print out an action according to the overall state of the pool.3566*/3567if (vs->vs_state != VDEV_STATE_HEALTHY ||3568reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {3569(void) printf("%s", indent);3570(void) printf(gettext("action: "));3571}3572if (vs->vs_state == VDEV_STATE_HEALTHY) {3573if (reason == ZPOOL_STATUS_VERSION_OLDER ||3574reason == ZPOOL_STATUS_FEAT_DISABLED) {3575(void) printf(gettext("The pool can be imported using "3576"its name or numeric identifier, though\n"3577"\t%ssome features will not be available without "3578"an explicit 'zpool upgrade'.\n"), indent);3579} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {3580(void) printf(gettext("The pool can be imported using "3581"its name or numeric\n"3582"\t%sidentifier, though the file(s) indicated by "3583"its 'compatibility'\n"3584"\t%sproperty cannot be parsed at this time.\n"),3585indent, indent);3586} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {3587(void) printf(gettext("The pool can be imported using "3588"its name or numeric identifier and\n"3589"\t%sthe '-f' flag.\n"), indent);3590} else if (reason == ZPOOL_STATUS_ERRATA) {3591switch (errata) {3592case ZPOOL_ERRATA_ZOL_2094_SCRUB:3593(void) printf(gettext("The pool can be "3594"imported using its name or numeric "3595"identifier,\n"3596"\t%showever there is a compatibility "3597"issue which should be corrected\n"3598"\t%sby running 'zpool scrub'\n"),3599indent, indent);3600break;36013602case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:3603(void) printf(gettext("The pool cannot be "3604"imported with this version of ZFS due to\n"3605"\t%san active asynchronous destroy. "3606"Revert to an earlier version\n"3607"\t%sand allow the destroy to complete "3608"before updating.\n"), indent, indent);3609break;36103611case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:3612(void) printf(gettext("Existing encrypted "3613"datasets contain an on-disk "3614"incompatibility, which\n"3615"\t%sneeds to be corrected. Backup these "3616"datasets to new encrypted datasets\n"3617"\t%sand destroy the old ones.\n"),3618indent, indent);3619break;36203621case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:3622(void) printf(gettext("Existing encrypted "3623"snapshots and bookmarks contain an "3624"on-disk\n"3625"\t%sincompatibility. This may cause "3626"on-disk corruption if they are used\n"3627"\t%swith 'zfs recv'. To correct the "3628"issue, enable the bookmark_v2 feature.\n"3629"\t%sNo additional action is needed if "3630"there are no encrypted snapshots or\n"3631"\t%sbookmarks. If preserving the "3632"encrypted snapshots and bookmarks is\n"3633"\t%srequired, use a non-raw send to "3634"backup and restore them. Alternately,\n"3635"\t%sthey may be removed to resolve the "3636"incompatibility.\n"), indent, indent,3637indent, indent, indent, indent);3638break;3639default:3640/*3641* All errata must contain an action message.3642*/3643assert(errata == ZPOOL_ERRATA_NONE);3644}3645} else {3646(void) printf(gettext("The pool can be imported using "3647"its name or numeric identifier.\n"));3648}3649} else if (vs->vs_state == VDEV_STATE_DEGRADED) {3650(void) printf(gettext("The pool can be imported despite "3651"missing or damaged devices. The\n"3652"\t%sfault tolerance of the pool may be compromised if "3653"imported.\n"), indent);3654} else {3655switch (reason) {3656case ZPOOL_STATUS_VERSION_NEWER:3657(void) printf(gettext("The pool cannot be imported. "3658"Access the pool on a system running newer\n"3659"\t%ssoftware, or recreate the pool from "3660"backup.\n"), indent);3661break;3662case ZPOOL_STATUS_UNSUP_FEAT_READ:3663(void) printf(gettext("The pool cannot be imported. "3664"Access the pool on a system that supports\n"3665"\t%sthe required feature(s), or recreate the pool "3666"from backup.\n"), indent);3667break;3668case ZPOOL_STATUS_UNSUP_FEAT_WRITE:3669(void) printf(gettext("The pool cannot be imported in "3670"read-write mode. Import the pool with\n"3671"\t%s'-o readonly=on', access the pool on a system "3672"that supports the\n"3673"\t%srequired feature(s), or recreate the pool "3674"from backup.\n"), indent, indent);3675break;3676case ZPOOL_STATUS_MISSING_DEV_R:3677case ZPOOL_STATUS_MISSING_DEV_NR:3678case ZPOOL_STATUS_BAD_GUID_SUM:3679(void) printf(gettext("The pool cannot be imported. "3680"Attach the missing\n"3681"\t%sdevices and try again.\n"), indent);3682break;3683case ZPOOL_STATUS_HOSTID_ACTIVE:3684VERIFY0(nvlist_lookup_nvlist(config,3685ZPOOL_CONFIG_LOAD_INFO, &nvinfo));36863687if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))3688hostname = fnvlist_lookup_string(nvinfo,3689ZPOOL_CONFIG_MMP_HOSTNAME);36903691if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))3692hostid = fnvlist_lookup_uint64(nvinfo,3693ZPOOL_CONFIG_MMP_HOSTID);36943695(void) printf(gettext("The pool must be exported from "3696"%s (hostid=%"PRIx64")\n"3697"\t%sbefore it can be safely imported.\n"),3698hostname, hostid, indent);3699break;3700case ZPOOL_STATUS_HOSTID_REQUIRED:3701(void) printf(gettext("Set a unique system hostid with "3702"the zgenhostid(8) command.\n"));3703break;3704default:3705(void) printf(gettext("The pool cannot be imported due "3706"to damaged devices or data.\n"));3707}3708}37093710/* Print the comment attached to the pool. */3711if (comment != NULL)3712(void) printf(gettext("comment: %s\n"), comment);37133714/*3715* If the state is "closed" or "can't open", and the aux state3716* is "corrupt data":3717*/3718if ((vs->vs_state == VDEV_STATE_CLOSED ||3719vs->vs_state == VDEV_STATE_CANT_OPEN) &&3720vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {3721if (pool_state == POOL_STATE_DESTROYED)3722(void) printf(gettext("\t%sThe pool was destroyed, "3723"but can be imported using the '-Df' flags.\n"),3724indent);3725else if (pool_state != POOL_STATE_EXPORTED)3726(void) printf(gettext("\t%sThe pool may be active on "3727"another system, but can be imported using\n"3728"\t%sthe '-f' flag.\n"), indent, indent);3729}37303731if (msgid != NULL) {3732(void) printf(gettext("%s see: "3733"https://openzfs.github.io/openzfs-docs/msg/%s\n"),3734indent, msgid);3735}37363737(void) printf(gettext("%sconfig:\n\n"), indent);37383739cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),3740VDEV_NAME_TYPE_ID);3741if (cb.cb_namewidth < 10)3742cb.cb_namewidth = 10;37433744print_import_config(&cb, name, nvroot, 0);37453746print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);3747print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);3748print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);37493750if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {3751(void) printf(gettext("\n\t%sAdditional devices are known to "3752"be part of this pool, though their\n"3753"\t%sexact configuration cannot be determined.\n"),3754indent, indent);3755}3756return (0);3757}37583759static boolean_t3760zfs_force_import_required(nvlist_t *config)3761{3762uint64_t state;3763uint64_t hostid = 0;3764nvlist_t *nvinfo;37653766state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);3767nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);37683769/*3770* The hostid on LOAD_INFO comes from the MOS label via3771* spa_tryimport(). If its not there then we're likely talking to an3772* older kernel, so use the top one, which will be from the label3773* discovered in zpool_find_import(), or if a cachefile is in use, the3774* local hostid.3775*/3776if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)3777(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,3778&hostid);37793780if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())3781return (B_TRUE);37823783if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {3784mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,3785ZPOOL_CONFIG_MMP_STATE);37863787if (mmp_state != MMP_STATE_INACTIVE)3788return (B_TRUE);3789}37903791return (B_FALSE);3792}37933794/*3795* Perform the import for the given configuration. This passes the heavy3796* lifting off to zpool_import_props(), and then mounts the datasets contained3797* within the pool.3798*/3799static int3800do_import(nvlist_t *config, const char *newname, const char *mntopts,3801nvlist_t *props, int flags, uint_t mntthreads)3802{3803int ret = 0;3804int ms_status = 0;3805zpool_handle_t *zhp;3806const char *name;3807uint64_t version;38083809name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);3810version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);38113812if (!SPA_VERSION_IS_SUPPORTED(version)) {3813(void) fprintf(stderr, gettext("cannot import '%s': pool "3814"is formatted using an unsupported ZFS version\n"), name);3815return (1);3816} else if (zfs_force_import_required(config) &&3817!(flags & ZFS_IMPORT_ANY_HOST)) {3818mmp_state_t mmp_state = MMP_STATE_INACTIVE;3819nvlist_t *nvinfo;38203821nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);3822if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))3823mmp_state = fnvlist_lookup_uint64(nvinfo,3824ZPOOL_CONFIG_MMP_STATE);38253826if (mmp_state == MMP_STATE_ACTIVE) {3827const char *hostname = "<unknown>";3828uint64_t hostid = 0;38293830if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))3831hostname = fnvlist_lookup_string(nvinfo,3832ZPOOL_CONFIG_MMP_HOSTNAME);38333834if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))3835hostid = fnvlist_lookup_uint64(nvinfo,3836ZPOOL_CONFIG_MMP_HOSTID);38373838(void) fprintf(stderr, gettext("cannot import '%s': "3839"pool is imported on %s (hostid: "3840"0x%"PRIx64")\nExport the pool on the other "3841"system, then run 'zpool import'.\n"),3842name, hostname, hostid);3843} else if (mmp_state == MMP_STATE_NO_HOSTID) {3844(void) fprintf(stderr, gettext("Cannot import '%s': "3845"pool has the multihost property on and the\n"3846"system's hostid is not set. Set a unique hostid "3847"with the zgenhostid(8) command.\n"), name);3848} else {3849const char *hostname = "<unknown>";3850time_t timestamp = 0;3851uint64_t hostid = 0;38523853if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))3854hostname = fnvlist_lookup_string(nvinfo,3855ZPOOL_CONFIG_HOSTNAME);3856else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))3857hostname = fnvlist_lookup_string(config,3858ZPOOL_CONFIG_HOSTNAME);38593860if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))3861timestamp = fnvlist_lookup_uint64(config,3862ZPOOL_CONFIG_TIMESTAMP);38633864if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))3865hostid = fnvlist_lookup_uint64(nvinfo,3866ZPOOL_CONFIG_HOSTID);3867else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))3868hostid = fnvlist_lookup_uint64(config,3869ZPOOL_CONFIG_HOSTID);38703871(void) fprintf(stderr, gettext("cannot import '%s': "3872"pool was previously in use from another system.\n"3873"Last accessed by %s (hostid=%"PRIx64") at %s"3874"The pool can be imported, use 'zpool import -f' "3875"to import the pool.\n"), name, hostname,3876hostid, ctime(×tamp));3877}38783879return (1);3880}38813882if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)3883return (1);38843885if (newname != NULL)3886name = newname;38873888if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)3889return (1);38903891/*3892* Loading keys is best effort. We don't want to return immediately3893* if it fails but we do want to give the error to the caller.3894*/3895if (flags & ZFS_IMPORT_LOAD_KEYS &&3896zfs_crypto_attempt_load_keys(g_zfs, name) != 0)3897ret = 1;38983899if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&3900!(flags & ZFS_IMPORT_ONLY)) {3901ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);3902if (ms_status == EZFS_SHAREFAILED) {3903(void) fprintf(stderr, gettext("Import was "3904"successful, but unable to share some datasets\n"));3905} else if (ms_status == EZFS_MOUNTFAILED) {3906(void) fprintf(stderr, gettext("Import was "3907"successful, but unable to mount some datasets\n"));3908}3909}39103911zpool_close(zhp);3912return (ret);3913}39143915typedef struct import_parameters {3916nvlist_t *ip_config;3917const char *ip_mntopts;3918nvlist_t *ip_props;3919int ip_flags;3920uint_t ip_mntthreads;3921int *ip_err;3922} import_parameters_t;39233924static void3925do_import_task(void *arg)3926{3927import_parameters_t *ip = arg;3928*ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,3929ip->ip_props, ip->ip_flags, ip->ip_mntthreads);3930free(ip);3931}393239333934static int3935import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,3936char *orig_name, char *new_name, importargs_t *import)3937{3938nvlist_t *config = NULL;3939nvlist_t *found_config = NULL;3940uint64_t pool_state;3941boolean_t pool_specified = (import->poolname != NULL ||3942import->guid != 0);3943uint_t npools = 0;394439453946tpool_t *tp = NULL;3947if (import->do_all) {3948tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),39490, NULL);3950}39513952/*3953* At this point we have a list of import candidate configs. Even if3954* we were searching by pool name or guid, we still need to3955* post-process the list to deal with pool state and possible3956* duplicate names.3957*/3958int err = 0;3959nvpair_t *elem = NULL;3960boolean_t first = B_TRUE;3961if (!pool_specified && import->do_all) {3962while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)3963npools++;3964}3965while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {39663967verify(nvpair_value_nvlist(elem, &config) == 0);39683969verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,3970&pool_state) == 0);3971if (!import->do_destroyed &&3972pool_state == POOL_STATE_DESTROYED)3973continue;3974if (import->do_destroyed &&3975pool_state != POOL_STATE_DESTROYED)3976continue;39773978verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,3979import->policy) == 0);39803981if (!pool_specified) {3982if (first)3983first = B_FALSE;3984else if (!import->do_all)3985(void) fputc('\n', stdout);39863987if (import->do_all) {3988import_parameters_t *ip = safe_malloc(3989sizeof (import_parameters_t));39903991ip->ip_config = config;3992ip->ip_mntopts = mntopts;3993ip->ip_props = props;3994ip->ip_flags = flags;3995ip->ip_mntthreads = mount_tp_nthr / npools;3996ip->ip_err = &err;39973998(void) tpool_dispatch(tp, do_import_task,3999(void *)ip);4000} else {4001/*4002* If we're importing from cachefile, then4003* we don't want to report errors until we4004* are in the scan phase of the import. If4005* we get an error, then we return that error4006* to invoke the scan phase.4007*/4008if (import->cachefile && !import->scan)4009err = show_import(config, B_FALSE);4010else4011(void) show_import(config, B_TRUE);4012}4013} else if (import->poolname != NULL) {4014const char *name;40154016/*4017* We are searching for a pool based on name.4018*/4019verify(nvlist_lookup_string(config,4020ZPOOL_CONFIG_POOL_NAME, &name) == 0);40214022if (strcmp(name, import->poolname) == 0) {4023if (found_config != NULL) {4024(void) fprintf(stderr, gettext(4025"cannot import '%s': more than "4026"one matching pool\n"),4027import->poolname);4028(void) fprintf(stderr, gettext(4029"import by numeric ID instead\n"));4030err = B_TRUE;4031}4032found_config = config;4033}4034} else {4035uint64_t guid;40364037/*4038* Search for a pool by guid.4039*/4040verify(nvlist_lookup_uint64(config,4041ZPOOL_CONFIG_POOL_GUID, &guid) == 0);40424043if (guid == import->guid)4044found_config = config;4045}4046}4047if (import->do_all) {4048tpool_wait(tp);4049tpool_destroy(tp);4050}40514052/*4053* If we were searching for a specific pool, verify that we found a4054* pool, and then do the import.4055*/4056if (pool_specified && err == 0) {4057if (found_config == NULL) {4058(void) fprintf(stderr, gettext("cannot import '%s': "4059"no such pool available\n"), orig_name);4060err = B_TRUE;4061} else {4062err |= do_import(found_config, new_name,4063mntopts, props, flags, mount_tp_nthr);4064}4065}40664067/*4068* If we were just looking for pools, report an error if none were4069* found.4070*/4071if (!pool_specified && first)4072(void) fprintf(stderr,4073gettext("no pools available to import\n"));4074return (err);4075}40764077typedef struct target_exists_args {4078const char *poolname;4079uint64_t poolguid;4080} target_exists_args_t;40814082static int4083name_or_guid_exists(zpool_handle_t *zhp, void *data)4084{4085target_exists_args_t *args = data;4086nvlist_t *config = zpool_get_config(zhp, NULL);4087int found = 0;40884089if (config == NULL)4090return (0);40914092if (args->poolname != NULL) {4093const char *pool_name;40944095verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,4096&pool_name) == 0);4097if (strcmp(pool_name, args->poolname) == 0)4098found = 1;4099} else {4100uint64_t pool_guid;41014102verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,4103&pool_guid) == 0);4104if (pool_guid == args->poolguid)4105found = 1;4106}4107zpool_close(zhp);41084109return (found);4110}4111/*4112* zpool checkpoint <pool>4113* checkpoint --discard <pool>4114*4115* -d Discard the checkpoint from a checkpointed4116* --discard pool.4117*4118* -w Wait for discarding a checkpoint to complete.4119* --wait4120*4121* Checkpoints the specified pool, by taking a "snapshot" of its4122* current state. A pool can only have one checkpoint at a time.4123*/4124int4125zpool_do_checkpoint(int argc, char **argv)4126{4127boolean_t discard, wait;4128char *pool;4129zpool_handle_t *zhp;4130int c, err;41314132struct option long_options[] = {4133{"discard", no_argument, NULL, 'd'},4134{"wait", no_argument, NULL, 'w'},4135{0, 0, 0, 0}4136};41374138discard = B_FALSE;4139wait = B_FALSE;4140while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {4141switch (c) {4142case 'd':4143discard = B_TRUE;4144break;4145case 'w':4146wait = B_TRUE;4147break;4148case '?':4149(void) fprintf(stderr, gettext("invalid option '%c'\n"),4150optopt);4151usage(B_FALSE);4152}4153}41544155if (wait && !discard) {4156(void) fprintf(stderr, gettext("--wait only valid when "4157"--discard also specified\n"));4158usage(B_FALSE);4159}41604161argc -= optind;4162argv += optind;41634164if (argc < 1) {4165(void) fprintf(stderr, gettext("missing pool argument\n"));4166usage(B_FALSE);4167}41684169if (argc > 1) {4170(void) fprintf(stderr, gettext("too many arguments\n"));4171usage(B_FALSE);4172}41734174pool = argv[0];41754176if ((zhp = zpool_open(g_zfs, pool)) == NULL) {4177/* As a special case, check for use of '/' in the name */4178if (strchr(pool, '/') != NULL)4179(void) fprintf(stderr, gettext("'zpool checkpoint' "4180"doesn't work on datasets. To save the state "4181"of a dataset from a specific point in time "4182"please use 'zfs snapshot'\n"));4183return (1);4184}41854186if (discard) {4187err = (zpool_discard_checkpoint(zhp) != 0);4188if (err == 0 && wait)4189err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);4190} else {4191err = (zpool_checkpoint(zhp) != 0);4192}41934194zpool_close(zhp);41954196return (err);4197}41984199#define CHECKPOINT_OPT 102442004201/*4202* zpool prefetch <type> [<type opts>] <pool>4203*4204* Prefetchs a particular type of data in the specified pool.4205*/4206int4207zpool_do_prefetch(int argc, char **argv)4208{4209int c;4210char *poolname;4211char *typestr = NULL;4212zpool_prefetch_type_t type;4213zpool_handle_t *zhp;4214int err = 0;42154216while ((c = getopt(argc, argv, "t:")) != -1) {4217switch (c) {4218case 't':4219typestr = optarg;4220break;4221case ':':4222(void) fprintf(stderr, gettext("missing argument for "4223"'%c' option\n"), optopt);4224usage(B_FALSE);4225break;4226case '?':4227(void) fprintf(stderr, gettext("invalid option '%c'\n"),4228optopt);4229usage(B_FALSE);4230}4231}4232argc -= optind;4233argv += optind;42344235if (argc < 1) {4236(void) fprintf(stderr, gettext("missing pool name argument\n"));4237usage(B_FALSE);4238}42394240if (argc > 1) {4241(void) fprintf(stderr, gettext("too many arguments\n"));4242usage(B_FALSE);4243}42444245poolname = argv[0];42464247argc--;4248argv++;42494250if (strcmp(typestr, "ddt") == 0) {4251type = ZPOOL_PREFETCH_DDT;4252} else {4253(void) fprintf(stderr, gettext("unsupported prefetch type\n"));4254usage(B_FALSE);4255}42564257if ((zhp = zpool_open(g_zfs, poolname)) == NULL)4258return (1);42594260err = zpool_prefetch(zhp, type);42614262zpool_close(zhp);42634264return (err);4265}42664267/*4268* zpool import [-d dir] [-D]4269* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]4270* [-d dir | -c cachefile | -s] [-f] -a4271* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]4272* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>4273* [newpool]4274*4275* -c Read pool information from a cachefile instead of searching4276* devices. If importing from a cachefile config fails, then4277* fallback to searching for devices only in the directories that4278* exist in the cachefile.4279*4280* -d Scan in a specific directory, other than /dev/. More than4281* one directory can be specified using multiple '-d' options.4282*4283* -D Scan for previously destroyed pools or import all or only4284* specified destroyed pools.4285*4286* -R Temporarily import the pool, with all mountpoints relative to4287* the given root. The pool will remain exported when the machine4288* is rebooted.4289*4290* -V Import even in the presence of faulted vdevs. This is an4291* intentionally undocumented option for testing purposes, and4292* treats the pool configuration as complete, leaving any bad4293* vdevs in the FAULTED state. In other words, it does verbatim4294* import.4295*4296* -f Force import, even if it appears that the pool is active.4297*4298* -F Attempt rewind if necessary.4299*4300* -n See if rewind would work, but don't actually rewind.4301*4302* -N Import the pool but don't mount datasets.4303*4304* -T Specify a starting txg to use for import. This option is4305* intentionally undocumented option for testing purposes.4306*4307* -a Import all pools found.4308*4309* -l Load encryption keys while importing.4310*4311* -o Set property=value and/or temporary mount options (without '=').4312*4313* -s Scan using the default search path, the libblkid cache will4314* not be consulted.4315*4316* --rewind-to-checkpoint4317* Import the pool and revert back to the checkpoint.4318*4319* The import command scans for pools to import, and import pools based on pool4320* name and GUID. The pool can also be renamed as part of the import process.4321*/4322int4323zpool_do_import(int argc, char **argv)4324{4325char **searchdirs = NULL;4326char *env, *envdup = NULL;4327int nsearch = 0;4328int c;4329int err = 0;4330nvlist_t *pools = NULL;4331boolean_t do_all = B_FALSE;4332boolean_t do_destroyed = B_FALSE;4333char *mntopts = NULL;4334uint64_t searchguid = 0;4335char *searchname = NULL;4336char *propval;4337nvlist_t *policy = NULL;4338nvlist_t *props = NULL;4339int flags = ZFS_IMPORT_NORMAL;4340uint32_t rewind_policy = ZPOOL_NO_REWIND;4341boolean_t dryrun = B_FALSE;4342boolean_t do_rewind = B_FALSE;4343boolean_t xtreme_rewind = B_FALSE;4344boolean_t do_scan = B_FALSE;4345boolean_t pool_exists = B_FALSE;4346uint64_t txg = -1ULL;4347char *cachefile = NULL;4348importargs_t idata = { 0 };4349char *endptr;43504351struct option long_options[] = {4352{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},4353{0, 0, 0, 0}4354};43554356/* check options */4357while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",4358long_options, NULL)) != -1) {4359switch (c) {4360case 'a':4361do_all = B_TRUE;4362break;4363case 'c':4364cachefile = optarg;4365break;4366case 'd':4367searchdirs = safe_realloc(searchdirs,4368(nsearch + 1) * sizeof (char *));4369searchdirs[nsearch++] = optarg;4370break;4371case 'D':4372do_destroyed = B_TRUE;4373break;4374case 'f':4375flags |= ZFS_IMPORT_ANY_HOST;4376break;4377case 'F':4378do_rewind = B_TRUE;4379break;4380case 'l':4381flags |= ZFS_IMPORT_LOAD_KEYS;4382break;4383case 'm':4384flags |= ZFS_IMPORT_MISSING_LOG;4385break;4386case 'n':4387dryrun = B_TRUE;4388break;4389case 'N':4390flags |= ZFS_IMPORT_ONLY;4391break;4392case 'o':4393if ((propval = strchr(optarg, '=')) != NULL) {4394*propval = '\0';4395propval++;4396if (add_prop_list(optarg, propval,4397&props, B_TRUE))4398goto error;4399} else {4400mntopts = optarg;4401}4402break;4403case 'R':4404if (add_prop_list(zpool_prop_to_name(4405ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))4406goto error;4407if (add_prop_list_default(zpool_prop_to_name(4408ZPOOL_PROP_CACHEFILE), "none", &props))4409goto error;4410break;4411case 's':4412do_scan = B_TRUE;4413break;4414case 't':4415flags |= ZFS_IMPORT_TEMP_NAME;4416if (add_prop_list_default(zpool_prop_to_name(4417ZPOOL_PROP_CACHEFILE), "none", &props))4418goto error;4419break;44204421case 'T':4422errno = 0;4423txg = strtoull(optarg, &endptr, 0);4424if (errno != 0 || *endptr != '\0') {4425(void) fprintf(stderr,4426gettext("invalid txg value\n"));4427usage(B_FALSE);4428}4429rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;4430break;4431case 'V':4432flags |= ZFS_IMPORT_VERBATIM;4433break;4434case 'X':4435xtreme_rewind = B_TRUE;4436break;4437case CHECKPOINT_OPT:4438flags |= ZFS_IMPORT_CHECKPOINT;4439break;4440case ':':4441(void) fprintf(stderr, gettext("missing argument for "4442"'%c' option\n"), optopt);4443usage(B_FALSE);4444break;4445case '?':4446(void) fprintf(stderr, gettext("invalid option '%c'\n"),4447optopt);4448usage(B_FALSE);4449}4450}44514452argc -= optind;4453argv += optind;44544455if (cachefile && nsearch != 0) {4456(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));4457usage(B_FALSE);4458}44594460if (cachefile && do_scan) {4461(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));4462usage(B_FALSE);4463}44644465if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {4466(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));4467usage(B_FALSE);4468}44694470if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {4471(void) fprintf(stderr, gettext("-l is only meaningful during "4472"an import\n"));4473usage(B_FALSE);4474}44754476if ((dryrun || xtreme_rewind) && !do_rewind) {4477(void) fprintf(stderr,4478gettext("-n or -X only meaningful with -F\n"));4479usage(B_FALSE);4480}4481if (dryrun)4482rewind_policy = ZPOOL_TRY_REWIND;4483else if (do_rewind)4484rewind_policy = ZPOOL_DO_REWIND;4485if (xtreme_rewind)4486rewind_policy |= ZPOOL_EXTREME_REWIND;44874488/* In the future, we can capture further policy and include it here */4489if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||4490nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||4491nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,4492rewind_policy) != 0)4493goto error;44944495/* check argument count */4496if (do_all) {4497if (argc != 0) {4498(void) fprintf(stderr, gettext("too many arguments\n"));4499usage(B_FALSE);4500}4501} else {4502if (argc > 2) {4503(void) fprintf(stderr, gettext("too many arguments\n"));4504usage(B_FALSE);4505}4506}45074508/*4509* Check for the effective uid. We do this explicitly here because4510* otherwise any attempt to discover pools will silently fail.4511*/4512if (argc == 0 && geteuid() != 0) {4513(void) fprintf(stderr, gettext("cannot "4514"discover pools: permission denied\n"));45154516free(searchdirs);4517nvlist_free(props);4518nvlist_free(policy);4519return (1);4520}45214522/*4523* Depending on the arguments given, we do one of the following:4524*4525* <none> Iterate through all pools and display information about4526* each one.4527*4528* -a Iterate through all pools and try to import each one.4529*4530* <id> Find the pool that corresponds to the given GUID/pool4531* name and import that one.4532*4533* -D Above options applies only to destroyed pools.4534*/4535if (argc != 0) {4536char *endptr;45374538errno = 0;4539searchguid = strtoull(argv[0], &endptr, 10);4540if (errno != 0 || *endptr != '\0') {4541searchname = argv[0];4542searchguid = 0;4543}45444545/*4546* User specified a name or guid. Ensure it's unique.4547*/4548target_exists_args_t search = {searchname, searchguid};4549pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);4550}45514552/*4553* Check the environment for the preferred search path.4554*/4555if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {4556char *dir, *tmp = NULL;45574558envdup = strdup(env);45594560for (dir = strtok_r(envdup, ":", &tmp);4561dir != NULL;4562dir = strtok_r(NULL, ":", &tmp)) {4563searchdirs = safe_realloc(searchdirs,4564(nsearch + 1) * sizeof (char *));4565searchdirs[nsearch++] = dir;4566}4567}45684569idata.path = searchdirs;4570idata.paths = nsearch;4571idata.poolname = searchname;4572idata.guid = searchguid;4573idata.cachefile = cachefile;4574idata.scan = do_scan;4575idata.policy = policy;4576idata.do_destroyed = do_destroyed;4577idata.do_all = do_all;45784579libpc_handle_t lpch = {4580.lpc_lib_handle = g_zfs,4581.lpc_ops = &libzfs_config_ops,4582.lpc_printerr = B_TRUE4583};4584pools = zpool_search_import(&lpch, &idata);45854586if (pools != NULL && pool_exists &&4587(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {4588(void) fprintf(stderr, gettext("cannot import '%s': "4589"a pool with that name already exists\n"),4590argv[0]);4591(void) fprintf(stderr, gettext("use the form '%s "4592"<pool | id> <newpool>' to give it a new name\n"),4593"zpool import");4594err = 1;4595} else if (pools == NULL && pool_exists) {4596(void) fprintf(stderr, gettext("cannot import '%s': "4597"a pool with that name is already created/imported,\n"),4598argv[0]);4599(void) fprintf(stderr, gettext("and no additional pools "4600"with that name were found\n"));4601err = 1;4602} else if (pools == NULL) {4603if (argc != 0) {4604(void) fprintf(stderr, gettext("cannot import '%s': "4605"no such pool available\n"), argv[0]);4606}4607err = 1;4608}46094610if (err == 1) {4611free(searchdirs);4612free(envdup);4613nvlist_free(policy);4614nvlist_free(pools);4615nvlist_free(props);4616return (1);4617}46184619err = import_pools(pools, props, mntopts, flags,4620argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);46214622/*4623* If we're using the cachefile and we failed to import, then4624* fallback to scanning the directory for pools that match4625* those in the cachefile.4626*/4627if (err != 0 && cachefile != NULL) {4628(void) printf(gettext("cachefile import failed, retrying\n"));46294630/*4631* We use the scan flag to gather the directories that exist4632* in the cachefile. If we need to fallback to searching for4633* the pool config, we will only search devices in these4634* directories.4635*/4636idata.scan = B_TRUE;4637nvlist_free(pools);4638pools = zpool_search_import(&lpch, &idata);46394640err = import_pools(pools, props, mntopts, flags,4641argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,4642&idata);4643}46444645error:4646nvlist_free(props);4647nvlist_free(pools);4648nvlist_free(policy);4649free(searchdirs);4650free(envdup);46514652return (err ? 1 : 0);4653}46544655/*4656* zpool sync [-f] [pool] ...4657*4658* -f (undocumented) force uberblock (and config including zpool cache file)4659* update.4660*4661* Sync the specified pool(s).4662* Without arguments "zpool sync" will sync all pools.4663* This command initiates TXG sync(s) and will return after the TXG(s) commit.4664*4665*/4666static int4667zpool_do_sync(int argc, char **argv)4668{4669int ret;4670boolean_t force = B_FALSE;46714672/* check options */4673while ((ret = getopt(argc, argv, "f")) != -1) {4674switch (ret) {4675case 'f':4676force = B_TRUE;4677break;4678case '?':4679(void) fprintf(stderr, gettext("invalid option '%c'\n"),4680optopt);4681usage(B_FALSE);4682}4683}46844685argc -= optind;4686argv += optind;46874688/* if argc == 0 we will execute zpool_sync_one on all pools */4689ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,4690B_FALSE, zpool_sync_one, &force);46914692return (ret);4693}46944695typedef struct iostat_cbdata {4696uint64_t cb_flags;4697int cb_namewidth;4698int cb_iteration;4699boolean_t cb_verbose;4700boolean_t cb_literal;4701boolean_t cb_scripted;4702zpool_list_t *cb_list;4703vdev_cmd_data_list_t *vcdl;4704vdev_cbdata_t cb_vdevs;4705} iostat_cbdata_t;47064707/* iostat labels */4708typedef struct name_and_columns {4709const char *name; /* Column name */4710unsigned int columns; /* Center name to this number of columns */4711} name_and_columns_t;47124713#define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */47144715static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =4716{4717[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},4718{NULL}},4719[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},4720{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},4721{NULL}},4722[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},4723{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},4724{"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},4725[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},4726{"asyncq_wait", 2}, {NULL}},4727[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},4728{"async_read", 2}, {"async_write", 2}, {"scrub", 2},4729{"trim", 2}, {"rebuild", 2}, {NULL}},4730};47314732/* Shorthand - if "columns" field not set, default to 1 column */4733static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =4734{4735[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},4736{"write"}, {NULL}},4737[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},4738{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},4739{NULL}},4740[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},4741{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},4742{"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},4743[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},4744{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},4745{NULL}},4746[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},4747{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},4748{"ind"}, {"agg"}, {NULL}},4749};47504751static const char *histo_to_title[] = {4752[IOS_L_HISTO] = "latency",4753[IOS_RQ_HISTO] = "req_size",4754};47554756/*4757* Return the number of labels in a null-terminated name_and_columns_t4758* array.4759*4760*/4761static unsigned int4762label_array_len(const name_and_columns_t *labels)4763{4764int i = 0;47654766while (labels[i].name)4767i++;47684769return (i);4770}47714772/*4773* Return the number of strings in a null-terminated string array.4774* For example:4775*4776* const char foo[] = {"bar", "baz", NULL}4777*4778* returns 24779*/4780static uint64_t4781str_array_len(const char *array[])4782{4783uint64_t i = 0;4784while (array[i])4785i++;47864787return (i);4788}478947904791/*4792* Return a default column width for default/latency/queue columns. This does4793* not include histograms, which have their columns autosized.4794*/4795static unsigned int4796default_column_width(iostat_cbdata_t *cb, enum iostat_type type)4797{4798unsigned long column_width = 5; /* Normal niceprint */4799static unsigned long widths[] = {4800/*4801* Choose some sane default column sizes for printing the4802* raw numbers.4803*/4804[IOS_DEFAULT] = 15, /* 1PB capacity */4805[IOS_LATENCY] = 10, /* 1B ns = 10sec */4806[IOS_QUEUES] = 6, /* 1M queue entries */4807[IOS_L_HISTO] = 10, /* 1B ns = 10sec */4808[IOS_RQ_HISTO] = 6, /* 1M queue entries */4809};48104811if (cb->cb_literal)4812column_width = widths[type];48134814return (column_width);4815}48164817/*4818* Print the column labels, i.e:4819*4820* capacity operations bandwidth4821* alloc free read write read write ...4822*4823* If force_column_width is set, use it for the column width. If not set, use4824* the default column width.4825*/4826static void4827print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,4828const name_and_columns_t labels[][IOSTAT_MAX_LABELS])4829{4830int i, idx, s;4831int text_start, rw_column_width, spaces_to_end;4832uint64_t flags = cb->cb_flags;4833uint64_t f;4834unsigned int column_width = force_column_width;48354836/* For each bit set in flags */4837for (f = flags; f; f &= ~(1ULL << idx)) {4838idx = lowbit64(f) - 1;4839if (!force_column_width)4840column_width = default_column_width(cb, idx);4841/* Print our top labels centered over "read write" label. */4842for (i = 0; i < label_array_len(labels[idx]); i++) {4843const char *name = labels[idx][i].name;4844/*4845* We treat labels[][].columns == 0 as shorthand4846* for one column. It makes writing out the label4847* tables more concise.4848*/4849unsigned int columns = MAX(1, labels[idx][i].columns);4850unsigned int slen = strlen(name);48514852rw_column_width = (column_width * columns) +4853(2 * (columns - 1));48544855text_start = (int)((rw_column_width) / columns -4856slen / columns);4857if (text_start < 0)4858text_start = 0;48594860printf(" "); /* Two spaces between columns */48614862/* Space from beginning of column to label */4863for (s = 0; s < text_start; s++)4864printf(" ");48654866printf("%s", name);48674868/* Print space after label to end of column */4869spaces_to_end = rw_column_width - text_start - slen;4870if (spaces_to_end < 0)4871spaces_to_end = 0;48724873for (s = 0; s < spaces_to_end; s++)4874printf(" ");4875}4876}4877}487848794880/*4881* print_cmd_columns - Print custom column titles from -c4882*4883* If the user specified the "zpool status|iostat -c" then print their custom4884* column titles in the header. For example, print_cmd_columns() would print4885* the " col1 col2" part of this:4886*4887* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'4888* ...4889* capacity operations bandwidth4890* pool alloc free read write read write col1 col24891* ---------- ----- ----- ----- ----- ----- ----- ---- ----4892* mypool 269K 1008M 0 0 107 9464893* mirror 269K 1008M 0 0 107 9464894* sdb - - 0 0 102 473 val1 val24895* sdc - - 0 0 5 473 val1 val24896* ---------- ----- ----- ----- ----- ----- ----- ---- ----4897*/4898static void4899print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)4900{4901int i, j;4902vdev_cmd_data_t *data = &vcdl->data[0];49034904if (vcdl->count == 0 || data == NULL)4905return;49064907/*4908* Each vdev cmd should have the same column names unless the user did4909* something weird with their cmd. Just take the column names from the4910* first vdev and assume it works for all of them.4911*/4912for (i = 0; i < vcdl->uniq_cols_cnt; i++) {4913printf(" ");4914if (use_dashes) {4915for (j = 0; j < vcdl->uniq_cols_width[i]; j++)4916printf("-");4917} else {4918printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],4919vcdl->uniq_cols[i]);4920}4921}4922}492349244925/*4926* Utility function to print out a line of dashes like:4927*4928* -------------------------------- ----- ----- ----- ----- -----4929*4930* ...or a dashed named-row line like:4931*4932* logs - - - - -4933*4934* @cb: iostat data4935*4936* @force_column_width If non-zero, use the value as the column width.4937* Otherwise use the default column widths.4938*4939* @name: Print a dashed named-row line starting4940* with @name. Otherwise, print a regular4941* dashed line.4942*/4943static void4944print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,4945const char *name)4946{4947int i;4948unsigned int namewidth;4949uint64_t flags = cb->cb_flags;4950uint64_t f;4951int idx;4952const name_and_columns_t *labels;4953const char *title;495449554956if (cb->cb_flags & IOS_ANYHISTO_M) {4957title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];4958} else if (cb->cb_vdevs.cb_names_count) {4959title = "vdev";4960} else {4961title = "pool";4962}49634964namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),4965name ? strlen(name) : 0);496649674968if (name) {4969printf("%-*s", namewidth, name);4970} else {4971for (i = 0; i < namewidth; i++)4972(void) printf("-");4973}49744975/* For each bit in flags */4976for (f = flags; f; f &= ~(1ULL << idx)) {4977unsigned int column_width;4978idx = lowbit64(f) - 1;4979if (force_column_width)4980column_width = force_column_width;4981else4982column_width = default_column_width(cb, idx);49834984labels = iostat_bottom_labels[idx];4985for (i = 0; i < label_array_len(labels); i++) {4986if (name)4987printf(" %*s-", column_width - 1, " ");4988else4989printf(" %.*s", column_width,4990"--------------------");4991}4992}4993}499449954996static void4997print_iostat_separator_impl(iostat_cbdata_t *cb,4998unsigned int force_column_width)4999{5000print_iostat_dashes(cb, force_column_width, NULL);5001}50025003static void5004print_iostat_separator(iostat_cbdata_t *cb)5005{5006print_iostat_separator_impl(cb, 0);5007}50085009static void5010print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,5011const char *histo_vdev_name)5012{5013unsigned int namewidth;5014const char *title;50155016color_start(ANSI_BOLD);50175018if (cb->cb_flags & IOS_ANYHISTO_M) {5019title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];5020} else if (cb->cb_vdevs.cb_names_count) {5021title = "vdev";5022} else {5023title = "pool";5024}50255026namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),5027histo_vdev_name ? strlen(histo_vdev_name) : 0);50285029if (histo_vdev_name)5030printf("%-*s", namewidth, histo_vdev_name);5031else5032printf("%*s", namewidth, "");503350345035print_iostat_labels(cb, force_column_width, iostat_top_labels);5036printf("\n");50375038printf("%-*s", namewidth, title);50395040print_iostat_labels(cb, force_column_width, iostat_bottom_labels);5041if (cb->vcdl != NULL)5042print_cmd_columns(cb->vcdl, 0);50435044printf("\n");50455046print_iostat_separator_impl(cb, force_column_width);50475048if (cb->vcdl != NULL)5049print_cmd_columns(cb->vcdl, 1);50505051color_end();50525053printf("\n");5054}50555056static void5057print_iostat_header(iostat_cbdata_t *cb)5058{5059print_iostat_header_impl(cb, 0, NULL);5060}50615062/*5063* Prints a size string (i.e. 120M) with the suffix ("M") colored5064* by order of magnitude. Uses column_size to add padding.5065*/5066static void5067print_stat_color(const char *statbuf, unsigned int column_size)5068{5069fputs(" ", stdout);5070size_t len = strlen(statbuf);5071while (len < column_size) {5072fputc(' ', stdout);5073column_size--;5074}5075if (*statbuf == '0') {5076color_start(ANSI_GRAY);5077fputc('0', stdout);5078} else {5079for (; *statbuf; statbuf++) {5080if (*statbuf == 'K') color_start(ANSI_GREEN);5081else if (*statbuf == 'M') color_start(ANSI_YELLOW);5082else if (*statbuf == 'G') color_start(ANSI_RED);5083else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);5084else if (*statbuf == 'P') color_start(ANSI_MAGENTA);5085else if (*statbuf == 'E') color_start(ANSI_CYAN);5086fputc(*statbuf, stdout);5087if (--column_size <= 0)5088break;5089}5090}5091color_end();5092}50935094/*5095* Display a single statistic.5096*/5097static void5098print_one_stat(uint64_t value, enum zfs_nicenum_format format,5099unsigned int column_size, boolean_t scripted)5100{5101char buf[64];51025103zfs_nicenum_format(value, buf, sizeof (buf), format);51045105if (scripted)5106printf("\t%s", buf);5107else5108print_stat_color(buf, column_size);5109}51105111/*5112* Calculate the default vdev stats5113*5114* Subtract oldvs from newvs, apply a scaling factor, and save the resulting5115* stats into calcvs.5116*/5117static void5118calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,5119vdev_stat_t *calcvs)5120{5121int i;51225123memcpy(calcvs, newvs, sizeof (*calcvs));5124for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)5125calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);51265127for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)5128calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);5129}51305131/*5132* Internal representation of the extended iostats data.5133*5134* The extended iostat stats are exported in nvlists as either uint64_t arrays5135* or single uint64_t's. We make both look like arrays to make them easier5136* to process. In order to make single uint64_t's look like arrays, we set5137* __data to the stat data, and then set *data = &__data with count = 1. Then,5138* we can just use *data and count.5139*/5140struct stat_array {5141uint64_t *data;5142uint_t count; /* Number of entries in data[] */5143uint64_t __data; /* Only used when data is a single uint64_t */5144};51455146static uint64_t5147stat_histo_max(struct stat_array *nva, unsigned int len)5148{5149uint64_t max = 0;5150int i;5151for (i = 0; i < len; i++)5152max = MAX(max, array64_max(nva[i].data, nva[i].count));51535154return (max);5155}51565157/*5158* Helper function to lookup a uint64_t array or uint64_t value and store its5159* data as a stat_array. If the nvpair is a single uint64_t value, then we make5160* it look like a one element array to make it easier to process.5161*/5162static int5163nvpair64_to_stat_array(nvlist_t *nvl, const char *name,5164struct stat_array *nva)5165{5166nvpair_t *tmp;5167int ret;51685169verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);5170switch (nvpair_type(tmp)) {5171case DATA_TYPE_UINT64_ARRAY:5172ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);5173break;5174case DATA_TYPE_UINT64:5175ret = nvpair_value_uint64(tmp, &nva->__data);5176nva->data = &nva->__data;5177nva->count = 1;5178break;5179default:5180/* Not a uint64_t */5181ret = EINVAL;5182break;5183}51845185return (ret);5186}51875188/*5189* Given a list of nvlist names, look up the extended stats in newnv and oldnv,5190* subtract them, and return the results in a newly allocated stat_array.5191* You must free the returned array after you are done with it with5192* free_calc_stats().5193*5194* Additionally, you can set "oldnv" to NULL if you simply want the newnv5195* values.5196*/5197static struct stat_array *5198calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,5199nvlist_t *newnv)5200{5201nvlist_t *oldnvx = NULL, *newnvx;5202struct stat_array *oldnva, *newnva, *calcnva;5203int i, j;5204unsigned int alloc_size = (sizeof (struct stat_array)) * len;52055206/* Extract our extended stats nvlist from the main list */5207verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,5208&newnvx) == 0);5209if (oldnv) {5210verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,5211&oldnvx) == 0);5212}52135214newnva = safe_malloc(alloc_size);5215oldnva = safe_malloc(alloc_size);5216calcnva = safe_malloc(alloc_size);52175218for (j = 0; j < len; j++) {5219verify(nvpair64_to_stat_array(newnvx, names[j],5220&newnva[j]) == 0);5221calcnva[j].count = newnva[j].count;5222alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);5223calcnva[j].data = safe_malloc(alloc_size);5224memcpy(calcnva[j].data, newnva[j].data, alloc_size);52255226if (oldnvx) {5227verify(nvpair64_to_stat_array(oldnvx, names[j],5228&oldnva[j]) == 0);5229for (i = 0; i < oldnva[j].count; i++)5230calcnva[j].data[i] -= oldnva[j].data[i];5231}5232}5233free(newnva);5234free(oldnva);5235return (calcnva);5236}52375238static void5239free_calc_stats(struct stat_array *nva, unsigned int len)5240{5241int i;5242for (i = 0; i < len; i++)5243free(nva[i].data);52445245free(nva);5246}52475248static void5249print_iostat_histo(struct stat_array *nva, unsigned int len,5250iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,5251double scale)5252{5253int i, j;5254char buf[6];5255uint64_t val;5256enum zfs_nicenum_format format;5257unsigned int buckets;5258unsigned int start_bucket;52595260if (cb->cb_literal)5261format = ZFS_NICENUM_RAW;5262else5263format = ZFS_NICENUM_1024;52645265/* All these histos are the same size, so just use nva[0].count */5266buckets = nva[0].count;52675268if (cb->cb_flags & IOS_RQ_HISTO_M) {5269/* Start at 512 - req size should never be lower than this */5270start_bucket = 9;5271} else {5272start_bucket = 0;5273}52745275for (j = start_bucket; j < buckets; j++) {5276/* Print histogram bucket label */5277if (cb->cb_flags & IOS_L_HISTO_M) {5278/* Ending range of this bucket */5279val = (1UL << (j + 1)) - 1;5280zfs_nicetime(val, buf, sizeof (buf));5281} else {5282/* Request size (starting range of bucket) */5283val = (1UL << j);5284zfs_nicenum(val, buf, sizeof (buf));5285}52865287if (cb->cb_scripted)5288printf("%llu", (u_longlong_t)val);5289else5290printf("%-*s", namewidth, buf);52915292/* Print the values on the line */5293for (i = 0; i < len; i++) {5294print_one_stat(nva[i].data[j] * scale, format,5295column_width, cb->cb_scripted);5296}5297printf("\n");5298}5299}53005301static void5302print_solid_separator(unsigned int length)5303{5304while (length--)5305printf("-");5306printf("\n");5307}53085309static void5310print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,5311nvlist_t *newnv, double scale, const char *name)5312{5313unsigned int column_width;5314unsigned int namewidth;5315unsigned int entire_width;5316enum iostat_type type;5317struct stat_array *nva;5318const char **names;5319unsigned int names_len;53205321/* What type of histo are we? */5322type = IOS_HISTO_IDX(cb->cb_flags);53235324/* Get NULL-terminated array of nvlist names for our histo */5325names = vsx_type_to_nvlist[type];5326names_len = str_array_len(names); /* num of names */53275328nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);53295330if (cb->cb_literal) {5331column_width = MAX(5,5332(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);5333} else {5334column_width = 5;5335}53365337namewidth = MAX(cb->cb_namewidth,5338strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));53395340/*5341* Calculate the entire line width of what we're printing. The5342* +2 is for the two spaces between columns:5343*/5344/* read write */5345/* ----- ----- */5346/* |___| <---------- column_width */5347/* */5348/* |__________| <--- entire_width */5349/* */5350entire_width = namewidth + (column_width + 2) *5351label_array_len(iostat_bottom_labels[type]);53525353if (cb->cb_scripted)5354printf("%s\n", name);5355else5356print_iostat_header_impl(cb, column_width, name);53575358print_iostat_histo(nva, names_len, cb, column_width,5359namewidth, scale);53605361free_calc_stats(nva, names_len);5362if (!cb->cb_scripted)5363print_solid_separator(entire_width);5364}53655366/*5367* Calculate the average latency of a power-of-two latency histogram5368*/5369static uint64_t5370single_histo_average(uint64_t *histo, unsigned int buckets)5371{5372int i;5373uint64_t count = 0, total = 0;53745375for (i = 0; i < buckets; i++) {5376/*5377* Our buckets are power-of-two latency ranges. Use the5378* midpoint latency of each bucket to calculate the average.5379* For example:5380*5381* Bucket Midpoint5382* 8ns-15ns: 12ns5383* 16ns-31ns: 24ns5384* ...5385*/5386if (histo[i] != 0) {5387total += histo[i] * (((1UL << i) + ((1UL << i)/2)));5388count += histo[i];5389}5390}53915392/* Prevent divide by zero */5393return (count == 0 ? 0 : total / count);5394}53955396static void5397print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)5398{5399const char *names[] = {5400ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,5401ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,5402ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,5403ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,5404ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,5405ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,5406ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,5407ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,5408ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,5409ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,5410ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,5411ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,5412ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,5413ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,5414};54155416struct stat_array *nva;54175418unsigned int column_width = default_column_width(cb, IOS_QUEUES);5419enum zfs_nicenum_format format;54205421nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);54225423if (cb->cb_literal)5424format = ZFS_NICENUM_RAW;5425else5426format = ZFS_NICENUM_1024;54275428for (int i = 0; i < ARRAY_SIZE(names); i++) {5429uint64_t val = nva[i].data[0];5430print_one_stat(val, format, column_width, cb->cb_scripted);5431}54325433free_calc_stats(nva, ARRAY_SIZE(names));5434}54355436static void5437print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,5438nvlist_t *newnv)5439{5440int i;5441uint64_t val;5442const char *names[] = {5443ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,5444ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,5445ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,5446ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,5447ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,5448ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,5449ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,5450ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,5451ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,5452ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,5453ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,5454};5455struct stat_array *nva;54565457unsigned int column_width = default_column_width(cb, IOS_LATENCY);5458enum zfs_nicenum_format format;54595460nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);54615462if (cb->cb_literal)5463format = ZFS_NICENUM_RAWTIME;5464else5465format = ZFS_NICENUM_TIME;54665467/* Print our avg latencies on the line */5468for (i = 0; i < ARRAY_SIZE(names); i++) {5469/* Compute average latency for a latency histo */5470val = single_histo_average(nva[i].data, nva[i].count);5471print_one_stat(val, format, column_width, cb->cb_scripted);5472}5473free_calc_stats(nva, ARRAY_SIZE(names));5474}54755476/*5477* Print default statistics (capacity/operations/bandwidth)5478*/5479static void5480print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)5481{5482unsigned int column_width = default_column_width(cb, IOS_DEFAULT);5483enum zfs_nicenum_format format;5484char na; /* char to print for "not applicable" values */54855486if (cb->cb_literal) {5487format = ZFS_NICENUM_RAW;5488na = '0';5489} else {5490format = ZFS_NICENUM_1024;5491na = '-';5492}54935494/* only toplevel vdevs have capacity stats */5495if (vs->vs_space == 0) {5496if (cb->cb_scripted)5497printf("\t%c\t%c", na, na);5498else5499printf(" %*c %*c", column_width, na, column_width,5500na);5501} else {5502print_one_stat(vs->vs_alloc, format, column_width,5503cb->cb_scripted);5504print_one_stat(vs->vs_space - vs->vs_alloc, format,5505column_width, cb->cb_scripted);5506}55075508print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),5509format, column_width, cb->cb_scripted);5510print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),5511format, column_width, cb->cb_scripted);5512print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),5513format, column_width, cb->cb_scripted);5514print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),5515format, column_width, cb->cb_scripted);5516}55175518static const char *const class_name[] = {5519VDEV_ALLOC_BIAS_DEDUP,5520VDEV_ALLOC_BIAS_SPECIAL,5521VDEV_ALLOC_CLASS_LOGS5522};55235524/*5525* Print out all the statistics for the given vdev. This can either be the5526* toplevel configuration, or called recursively. If 'name' is NULL, then this5527* is a verbose output, and we don't want to display the toplevel pool stats.5528*5529* Returns the number of stat lines printed.5530*/5531static unsigned int5532print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,5533nvlist_t *newnv, iostat_cbdata_t *cb, int depth)5534{5535nvlist_t **oldchild, **newchild;5536uint_t c, children, oldchildren;5537vdev_stat_t *oldvs, *newvs, *calcvs;5538vdev_stat_t zerovs = { 0 };5539char *vname;5540int i;5541int ret = 0;5542uint64_t tdelta;5543double scale;55445545if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)5546return (ret);55475548calcvs = safe_malloc(sizeof (*calcvs));55495550if (oldnv != NULL) {5551verify(nvlist_lookup_uint64_array(oldnv,5552ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);5553} else {5554oldvs = &zerovs;5555}55565557/* Do we only want to see a specific vdev? */5558for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {5559/* Yes we do. Is this the vdev? */5560if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {5561/*5562* This is our vdev. Since it is the only vdev we5563* will be displaying, make depth = 0 so that it5564* doesn't get indented.5565*/5566depth = 0;5567break;5568}5569}55705571if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {5572/* Couldn't match the name */5573goto children;5574}557555765577verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,5578(uint64_t **)&newvs, &c) == 0);55795580/*5581* Print the vdev name unless it's is a histogram. Histograms5582* display the vdev name in the header itself.5583*/5584if (!(cb->cb_flags & IOS_ANYHISTO_M)) {5585if (cb->cb_scripted) {5586printf("%s", name);5587} else {5588if (strlen(name) + depth > cb->cb_namewidth)5589(void) printf("%*s%s", depth, "", name);5590else5591(void) printf("%*s%s%*s", depth, "", name,5592(int)(cb->cb_namewidth - strlen(name) -5593depth), "");5594}5595}55965597/* Calculate our scaling factor */5598tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;5599if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {5600/*5601* If we specify printing histograms with no time interval, then5602* print the histogram numbers over the entire lifetime of the5603* vdev.5604*/5605scale = 1;5606} else {5607if (tdelta == 0)5608scale = 1.0;5609else5610scale = (double)NANOSEC / tdelta;5611}56125613if (cb->cb_flags & IOS_DEFAULT_M) {5614calc_default_iostats(oldvs, newvs, calcvs);5615print_iostat_default(calcvs, cb, scale);5616}5617if (cb->cb_flags & IOS_LATENCY_M)5618print_iostat_latency(cb, oldnv, newnv);5619if (cb->cb_flags & IOS_QUEUES_M)5620print_iostat_queues(cb, newnv);5621if (cb->cb_flags & IOS_ANYHISTO_M) {5622printf("\n");5623print_iostat_histos(cb, oldnv, newnv, scale, name);5624}56255626if (cb->vcdl != NULL) {5627const char *path;5628if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,5629&path) == 0) {5630printf(" ");5631zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);5632}5633}56345635if (!(cb->cb_flags & IOS_ANYHISTO_M))5636printf("\n");56375638ret++;56395640children:56415642free(calcvs);56435644if (!cb->cb_verbose)5645return (ret);56465647if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,5648&newchild, &children) != 0)5649return (ret);56505651if (oldnv) {5652if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,5653&oldchild, &oldchildren) != 0)5654return (ret);56555656children = MIN(oldchildren, children);5657}56585659/*5660* print normal top-level devices5661*/5662for (c = 0; c < children; c++) {5663uint64_t ishole = B_FALSE, islog = B_FALSE;56645665(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,5666&ishole);56675668(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,5669&islog);56705671if (ishole || islog)5672continue;56735674if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))5675continue;56765677vname = zpool_vdev_name(g_zfs, zhp, newchild[c],5678cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);5679ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,5680newchild[c], cb, depth + 2);5681free(vname);5682}56835684/*5685* print all other top-level devices5686*/5687for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {5688boolean_t printed = B_FALSE;56895690for (c = 0; c < children; c++) {5691uint64_t islog = B_FALSE;5692const char *bias = NULL;5693const char *type = NULL;56945695(void) nvlist_lookup_uint64(newchild[c],5696ZPOOL_CONFIG_IS_LOG, &islog);5697if (islog) {5698bias = VDEV_ALLOC_CLASS_LOGS;5699} else {5700(void) nvlist_lookup_string(newchild[c],5701ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);5702(void) nvlist_lookup_string(newchild[c],5703ZPOOL_CONFIG_TYPE, &type);5704}5705if (bias == NULL || strcmp(bias, class_name[n]) != 0)5706continue;5707if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)5708continue;57095710if (!printed) {5711if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&5712!cb->cb_scripted &&5713!cb->cb_vdevs.cb_names) {5714print_iostat_dashes(cb, 0,5715class_name[n]);5716}5717printf("\n");5718printed = B_TRUE;5719}57205721vname = zpool_vdev_name(g_zfs, zhp, newchild[c],5722cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);5723ret += print_vdev_stats(zhp, vname, oldnv ?5724oldchild[c] : NULL, newchild[c], cb, depth + 2);5725free(vname);5726}5727}57285729/*5730* Include level 2 ARC devices in iostat output5731*/5732if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,5733&newchild, &children) != 0)5734return (ret);57355736if (oldnv) {5737if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,5738&oldchild, &oldchildren) != 0)5739return (ret);57405741children = MIN(oldchildren, children);5742}57435744if (children > 0) {5745if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&5746!cb->cb_vdevs.cb_names) {5747print_iostat_dashes(cb, 0, "cache");5748}5749printf("\n");57505751for (c = 0; c < children; c++) {5752vname = zpool_vdev_name(g_zfs, zhp, newchild[c],5753cb->cb_vdevs.cb_name_flags);5754ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]5755: NULL, newchild[c], cb, depth + 2);5756free(vname);5757}5758}57595760return (ret);5761}57625763/*5764* Callback to print out the iostats for the given pool.5765*/5766static int5767print_iostat(zpool_handle_t *zhp, void *data)5768{5769iostat_cbdata_t *cb = data;5770nvlist_t *oldconfig, *newconfig;5771nvlist_t *oldnvroot, *newnvroot;5772int ret;57735774newconfig = zpool_get_config(zhp, &oldconfig);57755776if (cb->cb_iteration == 1)5777oldconfig = NULL;57785779verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,5780&newnvroot) == 0);57815782if (oldconfig == NULL)5783oldnvroot = NULL;5784else5785verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,5786&oldnvroot) == 0);57875788ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,5789cb, 0);5790if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&5791!cb->cb_scripted && cb->cb_verbose &&5792!cb->cb_vdevs.cb_names_count) {5793print_iostat_separator(cb);5794if (cb->vcdl != NULL) {5795print_cmd_columns(cb->vcdl, 1);5796}5797printf("\n");5798}57995800return (ret);5801}58025803static int5804get_columns(void)5805{5806struct winsize ws;5807int columns = 80;5808int error;58095810if (isatty(STDOUT_FILENO)) {5811error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);5812if (error == 0)5813columns = ws.ws_col;5814} else {5815columns = 999;5816}58175818return (columns);5819}58205821/*5822* Return the required length of the pool/vdev name column. The minimum5823* allowed width and output formatting flags must be provided.5824*/5825static int5826get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)5827{5828nvlist_t *config, *nvroot;5829int width = min_width;58305831if ((config = zpool_get_config(zhp, NULL)) != NULL) {5832verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,5833&nvroot) == 0);5834size_t poolname_len = strlen(zpool_get_name(zhp));5835if (verbose == B_FALSE) {5836width = MAX(poolname_len, min_width);5837} else {5838width = MAX(poolname_len,5839max_width(zhp, nvroot, 0, min_width, flags));5840}5841}58425843return (width);5844}58455846/*5847* Parse the input string, get the 'interval' and 'count' value if there is one.5848*/5849static void5850get_interval_count(int *argcp, char **argv, float *iv,5851unsigned long *cnt)5852{5853float interval = 0;5854unsigned long count = 0;5855int argc = *argcp;58565857/*5858* Determine if the last argument is an integer or a pool name5859*/5860if (argc > 0 && zfs_isnumber(argv[argc - 1])) {5861char *end;58625863errno = 0;5864interval = strtof(argv[argc - 1], &end);58655866if (*end == '\0' && errno == 0) {5867if (interval == 0) {5868(void) fprintf(stderr, gettext(5869"interval cannot be zero\n"));5870usage(B_FALSE);5871}5872/*5873* Ignore the last parameter5874*/5875argc--;5876} else {5877/*5878* If this is not a valid number, just plow on. The5879* user will get a more informative error message later5880* on.5881*/5882interval = 0;5883}5884}58855886/*5887* If the last argument is also an integer, then we have both a count5888* and an interval.5889*/5890if (argc > 0 && zfs_isnumber(argv[argc - 1])) {5891char *end;58925893errno = 0;5894count = interval;5895interval = strtof(argv[argc - 1], &end);58965897if (*end == '\0' && errno == 0) {5898if (interval == 0) {5899(void) fprintf(stderr, gettext(5900"interval cannot be zero\n"));5901usage(B_FALSE);5902}59035904/*5905* Ignore the last parameter5906*/5907argc--;5908} else {5909interval = 0;5910}5911}59125913*iv = interval;5914*cnt = count;5915*argcp = argc;5916}59175918static void5919get_timestamp_arg(char c)5920{5921if (c == 'u')5922timestamp_fmt = UDATE;5923else if (c == 'd')5924timestamp_fmt = DDATE;5925else5926usage(B_FALSE);5927}59285929/*5930* Return stat flags that are supported by all pools by both the module and5931* zpool iostat. "*data" should be initialized to all 0xFFs before running.5932* It will get ANDed down until only the flags that are supported on all pools5933* remain.5934*/5935static int5936get_stat_flags_cb(zpool_handle_t *zhp, void *data)5937{5938uint64_t *mask = data;5939nvlist_t *config, *nvroot, *nvx;5940uint64_t flags = 0;5941int i, j;59425943config = zpool_get_config(zhp, NULL);5944verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,5945&nvroot) == 0);59465947/* Default stats are always supported, but for completeness.. */5948if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))5949flags |= IOS_DEFAULT_M;59505951/* Get our extended stats nvlist from the main list */5952if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,5953&nvx) != 0) {5954/*5955* No extended stats; they're probably running an older5956* module. No big deal, we support that too.5957*/5958goto end;5959}59605961/* For each extended stat, make sure all its nvpairs are supported */5962for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {5963if (!vsx_type_to_nvlist[j][0])5964continue;59655966/* Start off by assuming the flag is supported, then check */5967flags |= (1ULL << j);5968for (i = 0; vsx_type_to_nvlist[j][i]; i++) {5969if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {5970/* flag isn't supported */5971flags = flags & ~(1ULL << j);5972break;5973}5974}5975}5976end:5977*mask = *mask & flags;5978return (0);5979}59805981/*5982* Return a bitmask of stats that are supported on all pools by both the module5983* and zpool iostat.5984*/5985static uint64_t5986get_stat_flags(zpool_list_t *list)5987{5988uint64_t mask = -1;59895990/*5991* get_stat_flags_cb() will lop off bits from "mask" until only the5992* flags that are supported on all pools remain.5993*/5994pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);5995return (mask);5996}59975998/*5999* Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.6000*/6001static int6002is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)6003{6004uint64_t guid;6005vdev_cbdata_t *cb = cb_data;6006zpool_handle_t *zhp = zhp_data;60076008if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)6009return (0);60106011return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));6012}60136014/*6015* Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.6016*/6017static int6018is_vdev(zpool_handle_t *zhp, void *cb_data)6019{6020return (for_each_vdev(zhp, is_vdev_cb, cb_data));6021}60226023/*6024* Check if vdevs are in a pool6025*6026* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise6027* return 0. If pool_name is NULL, then search all pools.6028*/6029static int6030are_vdevs_in_pool(int argc, char **argv, char *pool_name,6031vdev_cbdata_t *cb)6032{6033char **tmp_name;6034int ret = 0;6035int i;6036int pool_count = 0;60376038if ((argc == 0) || !*argv)6039return (0);60406041if (pool_name)6042pool_count = 1;60436044/* Temporarily hijack cb_names for a second... */6045tmp_name = cb->cb_names;60466047/* Go though our list of prospective vdev names */6048for (i = 0; i < argc; i++) {6049cb->cb_names = argv + i;60506051/* Is this name a vdev in our pools? */6052ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,6053ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);6054if (!ret) {6055/* No match */6056break;6057}6058}60596060cb->cb_names = tmp_name;60616062return (ret);6063}60646065static int6066is_pool_cb(zpool_handle_t *zhp, void *data)6067{6068char *name = data;6069if (strcmp(name, zpool_get_name(zhp)) == 0)6070return (1);60716072return (0);6073}60746075/*6076* Do we have a pool named *name? If so, return 1, otherwise 0.6077*/6078static int6079is_pool(char *name)6080{6081return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,6082is_pool_cb, name));6083}60846085/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */6086static int6087are_all_pools(int argc, char **argv)6088{6089if ((argc == 0) || !*argv)6090return (0);60916092while (--argc >= 0)6093if (!is_pool(argv[argc]))6094return (0);60956096return (1);6097}60986099/*6100* Helper function to print out vdev/pool names we can't resolve. Used for an6101* error message.6102*/6103static void6104error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,6105vdev_cbdata_t *cb)6106{6107int i;6108char *name;6109char *str;6110for (i = 0; i < argc; i++) {6111name = argv[i];61126113if (is_pool(name))6114str = gettext("pool");6115else if (are_vdevs_in_pool(1, &name, pool_name, cb))6116str = gettext("vdev in this pool");6117else if (are_vdevs_in_pool(1, &name, NULL, cb))6118str = gettext("vdev in another pool");6119else6120str = gettext("unknown");61216122fprintf(stderr, "\t%s (%s)\n", name, str);6123}6124}61256126/*6127* Same as get_interval_count(), but with additional checks to not misinterpret6128* guids as interval/count values. Assumes VDEV_NAME_GUID is set in6129* cb.cb_vdevs.cb_name_flags.6130*/6131static void6132get_interval_count_filter_guids(int *argc, char **argv, float *interval,6133unsigned long *count, iostat_cbdata_t *cb)6134{6135int argc_for_interval = 0;61366137/* Is the last arg an interval value? Or a guid? */6138if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,6139&cb->cb_vdevs)) {6140/*6141* The last arg is not a guid, so it's probably an6142* interval value.6143*/6144argc_for_interval++;61456146if (*argc >= 2 &&6147!are_vdevs_in_pool(1, &argv[*argc - 2], NULL,6148&cb->cb_vdevs)) {6149/*6150* The 2nd to last arg is not a guid, so it's probably6151* an interval value.6152*/6153argc_for_interval++;6154}6155}61566157/* Point to our list of possible intervals */6158char **tmpargv = &argv[*argc - argc_for_interval];61596160*argc = *argc - argc_for_interval;6161get_interval_count(&argc_for_interval, tmpargv,6162interval, count);6163}61646165/*6166* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or6167* if we were unable to determine its size.6168*/6169static int6170terminal_height(void)6171{6172struct winsize win;61736174if (isatty(STDOUT_FILENO) == 0)6175return (-1);61766177if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)6178return (win.ws_row);61796180return (-1);6181}61826183/*6184* Run one of the zpool status/iostat -c scripts with the help (-h) option and6185* print the result.6186*6187* name: Short name of the script ('iostat').6188* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');6189*/6190static void6191print_zpool_script_help(char *name, char *path)6192{6193char *argv[] = {path, (char *)"-h", NULL};6194char **lines = NULL;6195int lines_cnt = 0;6196int rc;61976198rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,6199&lines_cnt);6200if (rc != 0 || lines == NULL || lines_cnt <= 0) {6201if (lines != NULL)6202libzfs_free_str_array(lines, lines_cnt);6203return;6204}62056206for (int i = 0; i < lines_cnt; i++)6207if (!is_blank_str(lines[i]))6208printf(" %-14s %s\n", name, lines[i]);62096210libzfs_free_str_array(lines, lines_cnt);6211}62126213/*6214* Go though the zpool status/iostat -c scripts in the user's path, run their6215* help option (-h), and print out the results.6216*/6217static void6218print_zpool_dir_scripts(char *dirpath)6219{6220DIR *dir;6221struct dirent *ent;6222char fullpath[MAXPATHLEN];6223struct stat dir_stat;62246225if ((dir = opendir(dirpath)) != NULL) {6226/* print all the files and directories within directory */6227while ((ent = readdir(dir)) != NULL) {6228if (snprintf(fullpath, sizeof (fullpath), "%s/%s",6229dirpath, ent->d_name) >= sizeof (fullpath)) {6230(void) fprintf(stderr,6231gettext("internal error: "6232"ZPOOL_SCRIPTS_PATH too large.\n"));6233exit(1);6234}62356236/* Print the scripts */6237if (stat(fullpath, &dir_stat) == 0)6238if (dir_stat.st_mode & S_IXUSR &&6239S_ISREG(dir_stat.st_mode))6240print_zpool_script_help(ent->d_name,6241fullpath);6242}6243closedir(dir);6244}6245}62466247/*6248* Print out help text for all zpool status/iostat -c scripts.6249*/6250static void6251print_zpool_script_list(const char *subcommand)6252{6253char *dir, *sp, *tmp;62546255printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);62566257sp = zpool_get_cmd_search_path();6258if (sp == NULL)6259return;62606261for (dir = strtok_r(sp, ":", &tmp);6262dir != NULL;6263dir = strtok_r(NULL, ":", &tmp))6264print_zpool_dir_scripts(dir);62656266free(sp);6267}62686269/*6270* Set the minimum pool/vdev name column width. The width must be at least 10,6271* but may be as large as the column width - 42 so it still fits on one line.6272* NOTE: 42 is the width of the default capacity/operations/bandwidth output6273*/6274static int6275get_namewidth_iostat(zpool_handle_t *zhp, void *data)6276{6277iostat_cbdata_t *cb = data;6278int width, available_width;62796280/*6281* get_namewidth() returns the maximum width of any name in that column6282* for any pool/vdev/device line that will be output.6283*/6284width = get_namewidth(zhp, cb->cb_namewidth,6285cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);62866287/*6288* The width we are calculating is the width of the header and also the6289* padding width for names that are less than maximum width. The stats6290* take up 42 characters, so the width available for names is:6291*/6292available_width = get_columns() - 42;62936294/*6295* If the maximum width fits on a screen, then great! Make everything6296* line up by justifying all lines to the same width. If that max6297* width is larger than what's available, the name plus stats won't fit6298* on one line, and justifying to that width would cause every line to6299* wrap on the screen. We only want lines with long names to wrap.6300* Limit the padding to what won't wrap.6301*/6302if (width > available_width)6303width = available_width;63046305/*6306* And regardless of whatever the screen width is (get_columns can6307* return 0 if the width is not known or less than 42 for a narrow6308* terminal) have the width be a minimum of 10.6309*/6310if (width < 10)6311width = 10;63126313/* Save the calculated width */6314cb->cb_namewidth = width;63156316return (0);6317}63186319/*6320* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]6321* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]6322* [interval [count]]6323*6324* -c CMD For each vdev, run command CMD6325* -g Display guid for individual vdev name.6326* -L Follow links when resolving vdev path name.6327* -P Display full path for vdev name.6328* -v Display statistics for individual vdevs6329* -h Display help6330* -p Display values in parsable (exact) format.6331* -H Scripted mode. Don't display headers, and separate properties6332* by a single tab.6333* -l Display average latency6334* -q Display queue depths6335* -w Display latency histograms6336* -r Display request size histogram6337* -T Display a timestamp in date(1) or Unix format6338* -n Only print headers once6339*6340* This command can be tricky because we want to be able to deal with pool6341* creation/destruction as well as vdev configuration changes. The bulk of this6342* processing is handled by the pool_list_* routines in zpool_iter.c. We rely6343* on pool_list_refresh() to detect the addition and removal of pools.6344* Configuration changes are all handled within libzfs.6345*/6346int6347zpool_do_iostat(int argc, char **argv)6348{6349int c;6350int ret;6351float interval = 0;6352unsigned long count = 0;6353zpool_list_t *list;6354boolean_t verbose = B_FALSE;6355boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;6356boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;6357boolean_t omit_since_boot = B_FALSE;6358boolean_t guid = B_FALSE;6359boolean_t follow_links = B_FALSE;6360boolean_t full_name = B_FALSE;6361boolean_t headers_once = B_FALSE;6362iostat_cbdata_t cb = { 0 };6363char *cmd = NULL;63646365/* Used for printing error message */6366const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',6367[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};63686369uint64_t unsupported_flags;63706371/* check options */6372while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {6373switch (c) {6374case 'c':6375if (cmd != NULL) {6376fprintf(stderr,6377gettext("Can't set -c flag twice\n"));6378exit(1);6379}63806381if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&6382!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {6383fprintf(stderr, gettext(6384"Can't run -c, disabled by "6385"ZPOOL_SCRIPTS_ENABLED.\n"));6386exit(1);6387}63886389if ((getuid() <= 0 || geteuid() <= 0) &&6390!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {6391fprintf(stderr, gettext(6392"Can't run -c with root privileges "6393"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));6394exit(1);6395}6396cmd = optarg;6397verbose = B_TRUE;6398break;6399case 'g':6400guid = B_TRUE;6401break;6402case 'L':6403follow_links = B_TRUE;6404break;6405case 'P':6406full_name = B_TRUE;6407break;6408case 'T':6409get_timestamp_arg(*optarg);6410break;6411case 'v':6412verbose = B_TRUE;6413break;6414case 'p':6415parsable = B_TRUE;6416break;6417case 'l':6418latency = B_TRUE;6419break;6420case 'q':6421queues = B_TRUE;6422break;6423case 'H':6424scripted = B_TRUE;6425break;6426case 'w':6427l_histo = B_TRUE;6428break;6429case 'r':6430rq_histo = B_TRUE;6431break;6432case 'y':6433omit_since_boot = B_TRUE;6434break;6435case 'n':6436headers_once = B_TRUE;6437break;6438case 'h':6439usage(B_FALSE);6440break;6441case '?':6442if (optopt == 'c') {6443print_zpool_script_list("iostat");6444exit(0);6445} else {6446fprintf(stderr,6447gettext("invalid option '%c'\n"), optopt);6448}6449usage(B_FALSE);6450}6451}64526453argc -= optind;6454argv += optind;64556456cb.cb_literal = parsable;6457cb.cb_scripted = scripted;64586459if (guid)6460cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;6461if (follow_links)6462cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;6463if (full_name)6464cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;6465cb.cb_iteration = 0;6466cb.cb_namewidth = 0;6467cb.cb_verbose = verbose;64686469/* Get our interval and count values (if any) */6470if (guid) {6471get_interval_count_filter_guids(&argc, argv, &interval,6472&count, &cb);6473} else {6474get_interval_count(&argc, argv, &interval, &count);6475}64766477if (argc == 0) {6478/* No args, so just print the defaults. */6479} else if (are_all_pools(argc, argv)) {6480/* All the args are pool names */6481} else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {6482/* All the args are vdevs */6483cb.cb_vdevs.cb_names = argv;6484cb.cb_vdevs.cb_names_count = argc;6485argc = 0; /* No pools to process */6486} else if (are_all_pools(1, argv)) {6487/* The first arg is a pool name */6488if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],6489&cb.cb_vdevs)) {6490/* ...and the rest are vdev names */6491cb.cb_vdevs.cb_names = argv + 1;6492cb.cb_vdevs.cb_names_count = argc - 1;6493argc = 1; /* One pool to process */6494} else {6495fprintf(stderr, gettext("Expected either a list of "));6496fprintf(stderr, gettext("pools, or list of vdevs in"));6497fprintf(stderr, " \"%s\", ", argv[0]);6498fprintf(stderr, gettext("but got:\n"));6499error_list_unresolved_vdevs(argc - 1, argv + 1,6500argv[0], &cb.cb_vdevs);6501fprintf(stderr, "\n");6502usage(B_FALSE);6503return (1);6504}6505} else {6506/*6507* The args don't make sense. The first arg isn't a pool name,6508* nor are all the args vdevs.6509*/6510fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));6511fprintf(stderr, "\n");6512return (1);6513}65146515if (cb.cb_vdevs.cb_names_count != 0) {6516/*6517* If user specified vdevs, it implies verbose.6518*/6519cb.cb_verbose = B_TRUE;6520}65216522/*6523* Construct the list of all interesting pools.6524*/6525ret = 0;6526if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,6527&ret)) == NULL)6528return (1);65296530if (pool_list_count(list) == 0 && argc != 0) {6531pool_list_free(list);6532return (1);6533}65346535if (pool_list_count(list) == 0 && interval == 0) {6536pool_list_free(list);6537(void) fprintf(stderr, gettext("no pools available\n"));6538return (1);6539}65406541if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {6542pool_list_free(list);6543(void) fprintf(stderr,6544gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));6545usage(B_FALSE);6546return (1);6547}65486549if (l_histo && rq_histo) {6550pool_list_free(list);6551(void) fprintf(stderr,6552gettext("Only one of [-r|-w] can be passed at a time\n"));6553usage(B_FALSE);6554return (1);6555}65566557/*6558* Enter the main iostat loop.6559*/6560cb.cb_list = list;65616562if (l_histo) {6563/*6564* Histograms tables look out of place when you try to display6565* them with the other stats, so make a rule that you can only6566* print histograms by themselves.6567*/6568cb.cb_flags = IOS_L_HISTO_M;6569} else if (rq_histo) {6570cb.cb_flags = IOS_RQ_HISTO_M;6571} else {6572cb.cb_flags = IOS_DEFAULT_M;6573if (latency)6574cb.cb_flags |= IOS_LATENCY_M;6575if (queues)6576cb.cb_flags |= IOS_QUEUES_M;6577}65786579/*6580* See if the module supports all the stats we want to display.6581*/6582unsupported_flags = cb.cb_flags & ~get_stat_flags(list);6583if (unsupported_flags) {6584uint64_t f;6585int idx;6586fprintf(stderr,6587gettext("The loaded zfs module doesn't support:"));65886589/* for each bit set in unsupported_flags */6590for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {6591idx = lowbit64(f) - 1;6592fprintf(stderr, " -%c", flag_to_arg[idx]);6593}65946595fprintf(stderr, ". Try running a newer module.\n");6596pool_list_free(list);65976598return (1);6599}66006601int last_npools = 0;6602for (;;) {6603/*6604* Refresh all pools in list, adding or removing pools as6605* necessary.6606*/6607int npools = pool_list_refresh(list);6608if (npools == 0) {6609(void) fprintf(stderr, gettext("no pools available\n"));6610} else {6611/*6612* If the list of pools has changed since last time6613* around, reset the iteration count to force the6614* header to be redisplayed.6615*/6616if (last_npools != npools)6617cb.cb_iteration = 0;66186619/*6620* If this is the first iteration and -y was supplied6621* we skip any printing.6622*/6623boolean_t skip = (omit_since_boot &&6624cb.cb_iteration == 0);66256626/*6627* Iterate over all pools to determine the maximum width6628* for the pool / device name column across all pools.6629*/6630cb.cb_namewidth = 0;6631(void) pool_list_iter(list, B_FALSE,6632get_namewidth_iostat, &cb);66336634if (timestamp_fmt != NODATE)6635print_timestamp(timestamp_fmt);66366637if (cmd != NULL && cb.cb_verbose &&6638!(cb.cb_flags & IOS_ANYHISTO_M)) {6639cb.vcdl = all_pools_for_each_vdev_run(argc,6640argv, cmd, g_zfs, cb.cb_vdevs.cb_names,6641cb.cb_vdevs.cb_names_count,6642cb.cb_vdevs.cb_name_flags);6643} else {6644cb.vcdl = NULL;6645}664666476648/*6649* Check terminal size so we can print headers6650* even when terminal window has its height6651* changed.6652*/6653int winheight = terminal_height();6654/*6655* Are we connected to TTY? If not, headers_once6656* should be true, to avoid breaking scripts.6657*/6658if (winheight < 0)6659headers_once = B_TRUE;66606661/*6662* If it's the first time and we're not skipping it,6663* or either skip or verbose mode, print the header.6664*6665* The histogram code explicitly prints its header on6666* every vdev, so skip this for histograms.6667*/6668if (((++cb.cb_iteration == 1 && !skip) ||6669(skip != verbose) ||6670(!headers_once &&6671(cb.cb_iteration % winheight) == 0)) &&6672(!(cb.cb_flags & IOS_ANYHISTO_M)) &&6673!cb.cb_scripted)6674print_iostat_header(&cb);66756676if (skip) {6677(void) fflush(stdout);6678(void) fsleep(interval);6679last_npools = npools;6680continue;6681}66826683pool_list_iter(list, B_FALSE, print_iostat, &cb);66846685/*6686* If there's more than one pool, and we're not in6687* verbose mode (which prints a separator for us),6688* then print a separator.6689*6690* In addition, if we're printing specific vdevs then6691* we also want an ending separator.6692*/6693if (((npools > 1 && !verbose &&6694!(cb.cb_flags & IOS_ANYHISTO_M)) ||6695(!(cb.cb_flags & IOS_ANYHISTO_M) &&6696cb.cb_vdevs.cb_names_count)) &&6697!cb.cb_scripted) {6698print_iostat_separator(&cb);6699if (cb.vcdl != NULL)6700print_cmd_columns(cb.vcdl, 1);6701printf("\n");6702}67036704if (cb.vcdl != NULL)6705free_vdev_cmd_data_list(cb.vcdl);67066707}67086709if (interval == 0)6710break;67116712if (count != 0 && --count == 0)6713break;67146715(void) fflush(stdout);6716(void) fsleep(interval);67176718last_npools = npools;6719}67206721pool_list_free(list);67226723return (ret);6724}67256726typedef struct list_cbdata {6727boolean_t cb_verbose;6728int cb_name_flags;6729int cb_namewidth;6730boolean_t cb_json;6731boolean_t cb_scripted;6732zprop_list_t *cb_proplist;6733boolean_t cb_literal;6734nvlist_t *cb_jsobj;6735boolean_t cb_json_as_int;6736boolean_t cb_json_pool_key_guid;6737} list_cbdata_t;673867396740/*6741* Given a list of columns to display, output appropriate headers for each one.6742*/6743static void6744print_header(list_cbdata_t *cb)6745{6746zprop_list_t *pl = cb->cb_proplist;6747char headerbuf[ZPOOL_MAXPROPLEN];6748const char *header;6749boolean_t first = B_TRUE;6750boolean_t right_justify;6751size_t width = 0;67526753for (; pl != NULL; pl = pl->pl_next) {6754width = pl->pl_width;6755if (first && cb->cb_verbose) {6756/*6757* Reset the width to accommodate the verbose listing6758* of devices.6759*/6760width = cb->cb_namewidth;6761}67626763if (!first)6764(void) fputs(" ", stdout);6765else6766first = B_FALSE;67676768right_justify = B_FALSE;6769if (pl->pl_prop != ZPROP_USERPROP) {6770header = zpool_prop_column_name(pl->pl_prop);6771right_justify = zpool_prop_align_right(pl->pl_prop);6772} else {6773int i;67746775for (i = 0; pl->pl_user_prop[i] != '\0'; i++)6776headerbuf[i] = toupper(pl->pl_user_prop[i]);6777headerbuf[i] = '\0';6778header = headerbuf;6779}67806781if (pl->pl_next == NULL && !right_justify)6782(void) fputs(header, stdout);6783else if (right_justify)6784(void) printf("%*s", (int)width, header);6785else6786(void) printf("%-*s", (int)width, header);6787}67886789(void) fputc('\n', stdout);6790}67916792/*6793* Given a pool and a list of properties, print out all the properties according6794* to the described layout. Used by zpool_do_list().6795*/6796static void6797collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)6798{6799zprop_list_t *pl = cb->cb_proplist;6800boolean_t first = B_TRUE;6801char property[ZPOOL_MAXPROPLEN];6802const char *propstr;6803boolean_t right_justify;6804size_t width;6805zprop_source_t sourcetype = ZPROP_SRC_NONE;6806nvlist_t *item, *d, *props;6807item = d = props = NULL;68086809if (cb->cb_json) {6810item = fnvlist_alloc();6811props = fnvlist_alloc();6812d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");6813if (d == NULL) {6814fprintf(stderr, "pools obj not found.\n");6815exit(1);6816}6817fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);6818}68196820for (; pl != NULL; pl = pl->pl_next) {68216822width = pl->pl_width;6823if (first && cb->cb_verbose) {6824/*6825* Reset the width to accommodate the verbose listing6826* of devices.6827*/6828width = cb->cb_namewidth;6829}68306831if (!cb->cb_json && !first) {6832if (cb->cb_scripted)6833(void) fputc('\t', stdout);6834else6835(void) fputs(" ", stdout);6836} else {6837first = B_FALSE;6838}68396840right_justify = B_FALSE;6841if (pl->pl_prop != ZPROP_USERPROP) {6842if (zpool_get_prop(zhp, pl->pl_prop, property,6843sizeof (property), &sourcetype,6844cb->cb_literal) != 0)6845propstr = "-";6846else6847propstr = property;68486849right_justify = zpool_prop_align_right(pl->pl_prop);6850} else if ((zpool_prop_feature(pl->pl_user_prop) ||6851zpool_prop_unsupported(pl->pl_user_prop)) &&6852zpool_prop_get_feature(zhp, pl->pl_user_prop, property,6853sizeof (property)) == 0) {6854propstr = property;6855sourcetype = ZPROP_SRC_LOCAL;6856} else if (zfs_prop_user(pl->pl_user_prop) &&6857zpool_get_userprop(zhp, pl->pl_user_prop, property,6858sizeof (property), &sourcetype) == 0) {6859propstr = property;6860} else {6861propstr = "-";6862}68636864if (cb->cb_json) {6865if (pl->pl_prop == ZPOOL_PROP_NAME)6866continue;6867const char *prop_name;6868if (pl->pl_prop != ZPROP_USERPROP)6869prop_name = zpool_prop_to_name(pl->pl_prop);6870else6871prop_name = pl->pl_user_prop;6872(void) zprop_nvlist_one_property(6873prop_name, propstr,6874sourcetype, NULL, NULL, props, cb->cb_json_as_int);6875} else {6876/*6877* If this is being called in scripted mode, or if this6878* is the last column and it is left-justified, don't6879* include a width format specifier.6880*/6881if (cb->cb_scripted || (pl->pl_next == NULL &&6882!right_justify))6883(void) fputs(propstr, stdout);6884else if (right_justify)6885(void) printf("%*s", (int)width, propstr);6886else6887(void) printf("%-*s", (int)width, propstr);6888}6889}68906891if (cb->cb_json) {6892fnvlist_add_nvlist(item, "properties", props);6893if (cb->cb_json_pool_key_guid) {6894char pool_guid[256];6895uint64_t guid = fnvlist_lookup_uint64(6896zpool_get_config(zhp, NULL),6897ZPOOL_CONFIG_POOL_GUID);6898snprintf(pool_guid, 256, "%llu",6899(u_longlong_t)guid);6900fnvlist_add_nvlist(d, pool_guid, item);6901} else {6902fnvlist_add_nvlist(d, zpool_get_name(zhp),6903item);6904}6905fnvlist_free(props);6906fnvlist_free(item);6907} else6908(void) fputc('\n', stdout);6909}69106911static void6912collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,6913boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,6914boolean_t json, nvlist_t *nvl, boolean_t as_int)6915{6916char propval[64];6917boolean_t fixed;6918size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);69196920switch (prop) {6921case ZPOOL_PROP_SIZE:6922case ZPOOL_PROP_EXPANDSZ:6923case ZPOOL_PROP_CHECKPOINT:6924case ZPOOL_PROP_DEDUPRATIO:6925case ZPOOL_PROP_DEDUPCACHED:6926if (value == 0)6927(void) strlcpy(propval, "-", sizeof (propval));6928else6929zfs_nicenum_format(value, propval, sizeof (propval),6930format);6931break;6932case ZPOOL_PROP_FRAGMENTATION:6933if (value == ZFS_FRAG_INVALID) {6934(void) strlcpy(propval, "-", sizeof (propval));6935} else if (format == ZFS_NICENUM_RAW) {6936(void) snprintf(propval, sizeof (propval), "%llu",6937(unsigned long long)value);6938} else {6939(void) snprintf(propval, sizeof (propval), "%llu%%",6940(unsigned long long)value);6941}6942break;6943case ZPOOL_PROP_CAPACITY:6944/* capacity value is in parts-per-10,000 (aka permyriad) */6945if (format == ZFS_NICENUM_RAW)6946(void) snprintf(propval, sizeof (propval), "%llu",6947(unsigned long long)value / 100);6948else6949(void) snprintf(propval, sizeof (propval),6950value < 1000 ? "%1.2f%%" : value < 10000 ?6951"%2.1f%%" : "%3.0f%%", value / 100.0);6952break;6953case ZPOOL_PROP_HEALTH:6954width = 8;6955(void) strlcpy(propval, str, sizeof (propval));6956break;6957default:6958zfs_nicenum_format(value, propval, sizeof (propval), format);6959}69606961if (!valid)6962(void) strlcpy(propval, "-", sizeof (propval));69636964if (json) {6965zprop_nvlist_one_property(zpool_prop_to_name(prop), propval,6966ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);6967} else {6968if (scripted)6969(void) printf("\t%s", propval);6970else6971(void) printf(" %*s", (int)width, propval);6972}6973}69746975/*6976* print static default line per vdev6977*/6978static void6979collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,6980list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)6981{6982nvlist_t **child;6983vdev_stat_t *vs;6984uint_t c, children = 0;6985char *vname;6986boolean_t scripted = cb->cb_scripted;6987uint64_t islog = B_FALSE;6988nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;6989props = ent = ch = obj = sp = l2c = NULL;6990const char *dashes = "%-*s - - - - "6991"- - - - -\n";69926993verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,6994(uint64_t **)&vs, &c) == 0);69956996if (name != NULL) {6997boolean_t toplevel = (vs->vs_space != 0);6998uint64_t cap;6999enum zfs_nicenum_format format;7000const char *state;70017002if (cb->cb_literal)7003format = ZFS_NICENUM_RAW;7004else7005format = ZFS_NICENUM_1024;70067007if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)7008return;70097010if (cb->cb_json) {7011props = fnvlist_alloc();7012ent = fnvlist_alloc();7013fill_vdev_info(ent, zhp, (char *)name, B_FALSE,7014cb->cb_json_as_int);7015} else {7016if (scripted)7017(void) printf("\t%s", name);7018else if (strlen(name) + depth > cb->cb_namewidth)7019(void) printf("%*s%s", depth, "", name);7020else7021(void) printf("%*s%s%*s", depth, "", name,7022(int)(cb->cb_namewidth - strlen(name) -7023depth), "");7024}70257026/*7027* Print the properties for the individual vdevs. Some7028* properties are only applicable to toplevel vdevs. The7029* 'toplevel' boolean value is passed to the print_one_column()7030* to indicate that the value is valid.7031*/7032for (zprop_list_t *pl = cb->cb_proplist; pl != NULL;7033pl = pl->pl_next) {7034switch (pl->pl_prop) {7035case ZPOOL_PROP_SIZE:7036if (VDEV_STAT_VALID(vs_pspace, c) &&7037vs->vs_pspace) {7038collect_vdev_prop(7039ZPOOL_PROP_SIZE, vs->vs_pspace,7040NULL, scripted, B_TRUE, format,7041cb->cb_json, props,7042cb->cb_json_as_int);7043} else {7044collect_vdev_prop(7045ZPOOL_PROP_SIZE, vs->vs_space, NULL,7046scripted, toplevel, format,7047cb->cb_json, props,7048cb->cb_json_as_int);7049}7050break;7051case ZPOOL_PROP_ALLOCATED:7052collect_vdev_prop(ZPOOL_PROP_ALLOCATED,7053vs->vs_alloc, NULL, scripted, toplevel,7054format, cb->cb_json, props,7055cb->cb_json_as_int);7056break;70577058case ZPOOL_PROP_FREE:7059collect_vdev_prop(ZPOOL_PROP_FREE,7060vs->vs_space - vs->vs_alloc, NULL, scripted,7061toplevel, format, cb->cb_json, props,7062cb->cb_json_as_int);7063break;70647065case ZPOOL_PROP_CHECKPOINT:7066collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,7067vs->vs_checkpoint_space, NULL, scripted,7068toplevel, format, cb->cb_json, props,7069cb->cb_json_as_int);7070break;70717072case ZPOOL_PROP_EXPANDSZ:7073collect_vdev_prop(ZPOOL_PROP_EXPANDSZ,7074vs->vs_esize, NULL, scripted, B_TRUE,7075format, cb->cb_json, props,7076cb->cb_json_as_int);7077break;70787079case ZPOOL_PROP_FRAGMENTATION:7080collect_vdev_prop(7081ZPOOL_PROP_FRAGMENTATION,7082vs->vs_fragmentation, NULL, scripted,7083(vs->vs_fragmentation != ZFS_FRAG_INVALID &&7084toplevel),7085format, cb->cb_json, props,7086cb->cb_json_as_int);7087break;70887089case ZPOOL_PROP_CAPACITY:7090cap = (vs->vs_space == 0) ?70910 : (vs->vs_alloc * 10000 / vs->vs_space);7092collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap,7093NULL, scripted, toplevel, format,7094cb->cb_json, props, cb->cb_json_as_int);7095break;70967097case ZPOOL_PROP_HEALTH:7098state = zpool_state_to_name(vs->vs_state,7099vs->vs_aux);7100if (isspare) {7101if (vs->vs_aux == VDEV_AUX_SPARED)7102state = "INUSE";7103else if (vs->vs_state ==7104VDEV_STATE_HEALTHY)7105state = "AVAIL";7106}7107collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state,7108scripted, B_TRUE, format, cb->cb_json,7109props, cb->cb_json_as_int);7110break;71117112case ZPOOL_PROP_NAME:7113break;71147115default:7116collect_vdev_prop(pl->pl_prop, 0,7117NULL, scripted, B_FALSE, format,7118cb->cb_json, props, cb->cb_json_as_int);71197120}712171227123}71247125if (cb->cb_json) {7126fnvlist_add_nvlist(ent, "properties", props);7127fnvlist_free(props);7128} else7129(void) fputc('\n', stdout);7130}71317132if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,7133&child, &children) != 0) {7134if (cb->cb_json) {7135fnvlist_add_nvlist(item, name, ent);7136fnvlist_free(ent);7137}7138return;7139}71407141if (cb->cb_json) {7142ch = fnvlist_alloc();7143}71447145/* list the normal vdevs first */7146for (c = 0; c < children; c++) {7147uint64_t ishole = B_FALSE;71487149if (nvlist_lookup_uint64(child[c],7150ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)7151continue;71527153if (nvlist_lookup_uint64(child[c],7154ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)7155continue;71567157if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))7158continue;71597160vname = zpool_vdev_name(g_zfs, zhp, child[c],7161cb->cb_name_flags | VDEV_NAME_TYPE_ID);71627163if (name == NULL || cb->cb_json != B_TRUE)7164collect_list_stats(zhp, vname, child[c], cb, depth + 2,7165B_FALSE, item);7166else if (cb->cb_json) {7167collect_list_stats(zhp, vname, child[c], cb, depth + 2,7168B_FALSE, ch);7169}7170free(vname);7171}71727173if (cb->cb_json) {7174if (!nvlist_empty(ch))7175fnvlist_add_nvlist(ent, "vdevs", ch);7176fnvlist_free(ch);7177}71787179/* list the classes: 'logs', 'dedup', and 'special' */7180for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {7181boolean_t printed = B_FALSE;7182if (cb->cb_json)7183obj = fnvlist_alloc();7184for (c = 0; c < children; c++) {7185const char *bias = NULL;7186const char *type = NULL;71877188if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,7189&islog) == 0 && islog) {7190bias = VDEV_ALLOC_CLASS_LOGS;7191} else {7192(void) nvlist_lookup_string(child[c],7193ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);7194(void) nvlist_lookup_string(child[c],7195ZPOOL_CONFIG_TYPE, &type);7196}7197if (bias == NULL || strcmp(bias, class_name[n]) != 0)7198continue;7199if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)7200continue;72017202if (!printed && !cb->cb_json) {7203/* LINTED E_SEC_PRINTF_VAR_FMT */7204(void) printf(dashes, cb->cb_namewidth,7205class_name[n]);7206printed = B_TRUE;7207}7208vname = zpool_vdev_name(g_zfs, zhp, child[c],7209cb->cb_name_flags | VDEV_NAME_TYPE_ID);7210collect_list_stats(zhp, vname, child[c], cb, depth + 2,7211B_FALSE, obj);7212free(vname);7213}7214if (cb->cb_json) {7215if (!nvlist_empty(obj))7216fnvlist_add_nvlist(item, class_name[n], obj);7217fnvlist_free(obj);7218}7219}72207221if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,7222&child, &children) == 0 && children > 0) {7223if (cb->cb_json) {7224l2c = fnvlist_alloc();7225} else {7226/* LINTED E_SEC_PRINTF_VAR_FMT */7227(void) printf(dashes, cb->cb_namewidth, "cache");7228}7229for (c = 0; c < children; c++) {7230vname = zpool_vdev_name(g_zfs, zhp, child[c],7231cb->cb_name_flags);7232collect_list_stats(zhp, vname, child[c], cb, depth + 2,7233B_FALSE, l2c);7234free(vname);7235}7236if (cb->cb_json) {7237if (!nvlist_empty(l2c))7238fnvlist_add_nvlist(item, "l2cache", l2c);7239fnvlist_free(l2c);7240}7241}72427243if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,7244&children) == 0 && children > 0) {7245if (cb->cb_json) {7246sp = fnvlist_alloc();7247} else {7248/* LINTED E_SEC_PRINTF_VAR_FMT */7249(void) printf(dashes, cb->cb_namewidth, "spare");7250}7251for (c = 0; c < children; c++) {7252vname = zpool_vdev_name(g_zfs, zhp, child[c],7253cb->cb_name_flags);7254collect_list_stats(zhp, vname, child[c], cb, depth + 2,7255B_TRUE, sp);7256free(vname);7257}7258if (cb->cb_json) {7259if (!nvlist_empty(sp))7260fnvlist_add_nvlist(item, "spares", sp);7261fnvlist_free(sp);7262}7263}72647265if (name != NULL && cb->cb_json) {7266fnvlist_add_nvlist(item, name, ent);7267fnvlist_free(ent);7268}7269}72707271/*7272* Generic callback function to list a pool.7273*/7274static int7275list_callback(zpool_handle_t *zhp, void *data)7276{7277nvlist_t *p, *d, *nvdevs;7278uint64_t guid;7279char pool_guid[256];7280const char *pool_name = zpool_get_name(zhp);7281list_cbdata_t *cbp = data;7282p = d = nvdevs = NULL;72837284collect_pool(zhp, cbp);72857286if (cbp->cb_verbose) {7287nvlist_t *config, *nvroot;7288config = zpool_get_config(zhp, NULL);7289verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,7290&nvroot) == 0);7291if (cbp->cb_json) {7292d = fnvlist_lookup_nvlist(cbp->cb_jsobj,7293"pools");7294if (cbp->cb_json_pool_key_guid) {7295guid = fnvlist_lookup_uint64(config,7296ZPOOL_CONFIG_POOL_GUID);7297snprintf(pool_guid, 256, "%llu",7298(u_longlong_t)guid);7299p = fnvlist_lookup_nvlist(d, pool_guid);7300} else {7301p = fnvlist_lookup_nvlist(d, pool_name);7302}7303nvdevs = fnvlist_alloc();7304}7305collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);7306if (cbp->cb_json) {7307fnvlist_add_nvlist(p, "vdevs", nvdevs);7308if (cbp->cb_json_pool_key_guid)7309fnvlist_add_nvlist(d, pool_guid, p);7310else7311fnvlist_add_nvlist(d, pool_name, p);7312fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);7313fnvlist_free(nvdevs);7314}7315}73167317return (0);7318}73197320/*7321* Set the minimum pool/vdev name column width. The width must be at least 9,7322* but may be as large as needed.7323*/7324static int7325get_namewidth_list(zpool_handle_t *zhp, void *data)7326{7327list_cbdata_t *cb = data;7328int width;73297330width = get_namewidth(zhp, cb->cb_namewidth,7331cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);73327333if (width < 9)7334width = 9;73357336cb->cb_namewidth = width;73377338return (0);7339}73407341/*7342* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]7343*7344* -g Display guid for individual vdev name.7345* -H Scripted mode. Don't display headers, and separate properties7346* by a single tab.7347* -L Follow links when resolving vdev path name.7348* -o List of properties to display. Defaults to7349* "name,size,allocated,free,expandsize,fragmentation,capacity,"7350* "dedupratio,health,altroot"7351* -p Display values in parsable (exact) format.7352* -P Display full path for vdev name.7353* -T Display a timestamp in date(1) or Unix format7354* -j Display the output in JSON format7355* --json-int Display the numbers as integer instead of strings.7356* --json-pool-key-guid Set pool GUID as key for pool objects.7357*7358* List all pools in the system, whether or not they're healthy. Output space7359* statistics for each one, as well as health status summary.7360*/7361int7362zpool_do_list(int argc, char **argv)7363{7364int c;7365int ret = 0;7366list_cbdata_t cb = { 0 };7367static char default_props[] =7368"name,size,allocated,free,checkpoint,expandsize,fragmentation,"7369"capacity,dedupratio,health,altroot";7370char *props = default_props;7371float interval = 0;7372unsigned long count = 0;7373zpool_list_t *list;7374boolean_t first = B_TRUE;7375nvlist_t *data = NULL;7376current_prop_type = ZFS_TYPE_POOL;73777378struct option long_options[] = {7379{"json", no_argument, NULL, 'j'},7380{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},7381{"json-pool-key-guid", no_argument, NULL,7382ZPOOL_OPTION_POOL_KEY_GUID},7383{0, 0, 0, 0}7384};73857386/* check options */7387while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,7388NULL)) != -1) {7389switch (c) {7390case 'g':7391cb.cb_name_flags |= VDEV_NAME_GUID;7392break;7393case 'H':7394cb.cb_scripted = B_TRUE;7395break;7396case 'L':7397cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;7398break;7399case 'o':7400props = optarg;7401break;7402case 'P':7403cb.cb_name_flags |= VDEV_NAME_PATH;7404break;7405case 'p':7406cb.cb_literal = B_TRUE;7407break;7408case 'j':7409cb.cb_json = B_TRUE;7410break;7411case ZPOOL_OPTION_JSON_NUMS_AS_INT:7412cb.cb_json_as_int = B_TRUE;7413cb.cb_literal = B_TRUE;7414break;7415case ZPOOL_OPTION_POOL_KEY_GUID:7416cb.cb_json_pool_key_guid = B_TRUE;7417break;7418case 'T':7419get_timestamp_arg(*optarg);7420break;7421case 'v':7422cb.cb_verbose = B_TRUE;7423cb.cb_namewidth = 8; /* 8 until precalc is avail */7424break;7425case ':':7426(void) fprintf(stderr, gettext("missing argument for "7427"'%c' option\n"), optopt);7428usage(B_FALSE);7429break;7430case '?':7431(void) fprintf(stderr, gettext("invalid option '%c'\n"),7432optopt);7433usage(B_FALSE);7434}7435}74367437argc -= optind;7438argv += optind;74397440if (!cb.cb_json && cb.cb_json_as_int) {7441(void) fprintf(stderr, gettext("'--json-int' only works with"7442" '-j' option\n"));7443usage(B_FALSE);7444}74457446if (!cb.cb_json && cb.cb_json_pool_key_guid) {7447(void) fprintf(stderr, gettext("'json-pool-key-guid' only"7448" works with '-j' option\n"));7449usage(B_FALSE);7450}74517452get_interval_count(&argc, argv, &interval, &count);74537454if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)7455usage(B_FALSE);74567457for (;;) {7458if ((list = pool_list_get(argc, argv, &cb.cb_proplist,7459ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)7460return (1);74617462if (pool_list_count(list) == 0)7463break;74647465if (cb.cb_json) {7466cb.cb_jsobj = zpool_json_schema(0, 1);7467data = fnvlist_alloc();7468fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);7469fnvlist_free(data);7470}74717472cb.cb_namewidth = 0;7473(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);74747475if (timestamp_fmt != NODATE) {7476if (cb.cb_json) {7477if (cb.cb_json_as_int) {7478fnvlist_add_uint64(cb.cb_jsobj, "time",7479time(NULL));7480} else {7481char ts[128];7482get_timestamp(timestamp_fmt, ts, 128);7483fnvlist_add_string(cb.cb_jsobj, "time",7484ts);7485}7486} else7487print_timestamp(timestamp_fmt);7488}74897490if (!cb.cb_scripted && (first || cb.cb_verbose) &&7491!cb.cb_json) {7492print_header(&cb);7493first = B_FALSE;7494}7495ret = pool_list_iter(list, B_TRUE, list_callback, &cb);74967497if (ret == 0 && cb.cb_json)7498zcmd_print_json(cb.cb_jsobj);7499else if (ret != 0 && cb.cb_json)7500nvlist_free(cb.cb_jsobj);75017502if (interval == 0)7503break;75047505if (count != 0 && --count == 0)7506break;75077508pool_list_free(list);75097510(void) fflush(stdout);7511(void) fsleep(interval);7512}75137514if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&7515pool_list_count(list) == 0) {7516(void) printf(gettext("no pools available\n"));7517ret = 0;7518}75197520pool_list_free(list);7521zprop_free_list(cb.cb_proplist);7522return (ret);7523}75247525static int7526zpool_do_attach_or_replace(int argc, char **argv, int replacing)7527{7528boolean_t force = B_FALSE;7529boolean_t rebuild = B_FALSE;7530boolean_t wait = B_FALSE;7531int c;7532nvlist_t *nvroot;7533char *poolname, *old_disk, *new_disk;7534zpool_handle_t *zhp;7535nvlist_t *props = NULL;7536char *propval;7537int ret;75387539/* check options */7540while ((c = getopt(argc, argv, "fo:sw")) != -1) {7541switch (c) {7542case 'f':7543force = B_TRUE;7544break;7545case 'o':7546if ((propval = strchr(optarg, '=')) == NULL) {7547(void) fprintf(stderr, gettext("missing "7548"'=' for -o option\n"));7549usage(B_FALSE);7550}7551*propval = '\0';7552propval++;75537554if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||7555(add_prop_list(optarg, propval, &props, B_TRUE)))7556usage(B_FALSE);7557break;7558case 's':7559rebuild = B_TRUE;7560break;7561case 'w':7562wait = B_TRUE;7563break;7564case '?':7565(void) fprintf(stderr, gettext("invalid option '%c'\n"),7566optopt);7567usage(B_FALSE);7568}7569}75707571argc -= optind;7572argv += optind;75737574/* get pool name and check number of arguments */7575if (argc < 1) {7576(void) fprintf(stderr, gettext("missing pool name argument\n"));7577usage(B_FALSE);7578}75797580poolname = argv[0];75817582if (argc < 2) {7583(void) fprintf(stderr,7584gettext("missing <device> specification\n"));7585usage(B_FALSE);7586}75877588old_disk = argv[1];75897590if (argc < 3) {7591if (!replacing) {7592(void) fprintf(stderr,7593gettext("missing <new_device> specification\n"));7594usage(B_FALSE);7595}7596new_disk = old_disk;7597argc -= 1;7598argv += 1;7599} else {7600new_disk = argv[2];7601argc -= 2;7602argv += 2;7603}76047605if (argc > 1) {7606(void) fprintf(stderr, gettext("too many arguments\n"));7607usage(B_FALSE);7608}76097610if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {7611nvlist_free(props);7612return (1);7613}76147615if (zpool_get_config(zhp, NULL) == NULL) {7616(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),7617poolname);7618zpool_close(zhp);7619nvlist_free(props);7620return (1);7621}76227623/* unless manually specified use "ashift" pool property (if set) */7624if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {7625int intval;7626zprop_source_t src;7627char strval[ZPOOL_MAXPROPLEN];76287629intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);7630if (src != ZPROP_SRC_DEFAULT) {7631(void) sprintf(strval, "%" PRId32, intval);7632verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,7633&props, B_TRUE) == 0);7634}7635}76367637nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,7638argc, argv);7639if (nvroot == NULL) {7640zpool_close(zhp);7641nvlist_free(props);7642return (1);7643}76447645ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,7646rebuild);76477648if (ret == 0 && wait) {7649zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;7650char raidz_prefix[] = "raidz";7651if (replacing) {7652activity = ZPOOL_WAIT_REPLACE;7653} else if (strncmp(old_disk,7654raidz_prefix, strlen(raidz_prefix)) == 0) {7655activity = ZPOOL_WAIT_RAIDZ_EXPAND;7656}7657ret = zpool_wait(zhp, activity);7658}76597660nvlist_free(props);7661nvlist_free(nvroot);7662zpool_close(zhp);76637664return (ret);7665}76667667/*7668* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>7669*7670* -f Force attach, even if <new_device> appears to be in use.7671* -s Use sequential instead of healing reconstruction for resilver.7672* -o Set property=value.7673* -w Wait for replacing to complete before returning7674*7675* Replace <device> with <new_device>.7676*/7677int7678zpool_do_replace(int argc, char **argv)7679{7680return (zpool_do_attach_or_replace(argc, argv, B_TRUE));7681}76827683/*7684* zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device>7685*7686* -f Force attach, even if <new_device> appears to be in use.7687* -s Use sequential instead of healing reconstruction for resilver.7688* -o Set property=value.7689* -w Wait for resilvering (mirror) or expansion (raidz) to complete7690* before returning.7691*7692* Attach <new_device> to a <vdev>, where the vdev can be of type7693* device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will7694* be transformed into a mirror of <vdev> and <new_device>. When a mirror7695* is involved, <new_device> will begin life with a DTL of [0, now], and will7696* immediately begin to resilver itself. For the raidz case, a expansion will7697* commence and reflow the raidz data across all the disks including the7698* <new_device>.7699*/7700int7701zpool_do_attach(int argc, char **argv)7702{7703return (zpool_do_attach_or_replace(argc, argv, B_FALSE));7704}77057706/*7707* zpool detach [-f] <pool> <device>7708*7709* -f Force detach of <device>, even if DTLs argue against it7710* (not supported yet)7711*7712* Detach a device from a mirror. The operation will be refused if <device>7713* is the last device in the mirror, or if the DTLs indicate that this device7714* has the only valid copy of some data.7715*/7716int7717zpool_do_detach(int argc, char **argv)7718{7719int c;7720char *poolname, *path;7721zpool_handle_t *zhp;7722int ret;77237724/* check options */7725while ((c = getopt(argc, argv, "")) != -1) {7726switch (c) {7727case '?':7728(void) fprintf(stderr, gettext("invalid option '%c'\n"),7729optopt);7730usage(B_FALSE);7731}7732}77337734argc -= optind;7735argv += optind;77367737/* get pool name and check number of arguments */7738if (argc < 1) {7739(void) fprintf(stderr, gettext("missing pool name argument\n"));7740usage(B_FALSE);7741}77427743if (argc < 2) {7744(void) fprintf(stderr,7745gettext("missing <device> specification\n"));7746usage(B_FALSE);7747}77487749poolname = argv[0];7750path = argv[1];77517752if ((zhp = zpool_open(g_zfs, poolname)) == NULL)7753return (1);77547755ret = zpool_vdev_detach(zhp, path);77567757zpool_close(zhp);77587759return (ret);7760}77617762/*7763* zpool split [-gLnP] [-o prop=val] ...7764* [-o mntopt] ...7765* [-R altroot] <pool> <newpool> [<device> ...]7766*7767* -g Display guid for individual vdev name.7768* -L Follow links when resolving vdev path name.7769* -n Do not split the pool, but display the resulting layout if7770* it were to be split.7771* -o Set property=value, or set mount options.7772* -P Display full path for vdev name.7773* -R Mount the split-off pool under an alternate root.7774* -l Load encryption keys while importing.7775*7776* Splits the named pool and gives it the new pool name. Devices to be split7777* off may be listed, provided that no more than one device is specified7778* per top-level vdev mirror. The newly split pool is left in an exported7779* state unless -R is specified.7780*7781* Restrictions: the top-level of the pool pool must only be made up of7782* mirrors; all devices in the pool must be healthy; no device may be7783* undergoing a resilvering operation.7784*/7785int7786zpool_do_split(int argc, char **argv)7787{7788char *srcpool, *newpool, *propval;7789char *mntopts = NULL;7790splitflags_t flags;7791int c, ret = 0;7792int ms_status = 0;7793boolean_t loadkeys = B_FALSE;7794zpool_handle_t *zhp;7795nvlist_t *config, *props = NULL;77967797flags.dryrun = B_FALSE;7798flags.import = B_FALSE;7799flags.name_flags = 0;78007801/* check options */7802while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {7803switch (c) {7804case 'g':7805flags.name_flags |= VDEV_NAME_GUID;7806break;7807case 'L':7808flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;7809break;7810case 'R':7811flags.import = B_TRUE;7812if (add_prop_list(7813zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,7814&props, B_TRUE) != 0) {7815nvlist_free(props);7816usage(B_FALSE);7817}7818break;7819case 'l':7820loadkeys = B_TRUE;7821break;7822case 'n':7823flags.dryrun = B_TRUE;7824break;7825case 'o':7826if ((propval = strchr(optarg, '=')) != NULL) {7827*propval = '\0';7828propval++;7829if (add_prop_list(optarg, propval,7830&props, B_TRUE) != 0) {7831nvlist_free(props);7832usage(B_FALSE);7833}7834} else {7835mntopts = optarg;7836}7837break;7838case 'P':7839flags.name_flags |= VDEV_NAME_PATH;7840break;7841case ':':7842(void) fprintf(stderr, gettext("missing argument for "7843"'%c' option\n"), optopt);7844usage(B_FALSE);7845break;7846case '?':7847(void) fprintf(stderr, gettext("invalid option '%c'\n"),7848optopt);7849usage(B_FALSE);7850break;7851}7852}78537854if (!flags.import && mntopts != NULL) {7855(void) fprintf(stderr, gettext("setting mntopts is only "7856"valid when importing the pool\n"));7857usage(B_FALSE);7858}78597860if (!flags.import && loadkeys) {7861(void) fprintf(stderr, gettext("loading keys is only "7862"valid when importing the pool\n"));7863usage(B_FALSE);7864}78657866argc -= optind;7867argv += optind;78687869if (argc < 1) {7870(void) fprintf(stderr, gettext("Missing pool name\n"));7871usage(B_FALSE);7872}7873if (argc < 2) {7874(void) fprintf(stderr, gettext("Missing new pool name\n"));7875usage(B_FALSE);7876}78777878srcpool = argv[0];7879newpool = argv[1];78807881argc -= 2;7882argv += 2;78837884if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {7885nvlist_free(props);7886return (1);7887}78887889config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);7890if (config == NULL) {7891ret = 1;7892} else {7893if (flags.dryrun) {7894(void) printf(gettext("would create '%s' with the "7895"following layout:\n\n"), newpool);7896print_vdev_tree(NULL, newpool, config, 0, "",7897flags.name_flags);7898print_vdev_tree(NULL, "dedup", config, 0,7899VDEV_ALLOC_BIAS_DEDUP, 0);7900print_vdev_tree(NULL, "special", config, 0,7901VDEV_ALLOC_BIAS_SPECIAL, 0);7902}7903}79047905zpool_close(zhp);79067907if (ret != 0 || flags.dryrun || !flags.import) {7908nvlist_free(config);7909nvlist_free(props);7910return (ret);7911}79127913/*7914* The split was successful. Now we need to open the new7915* pool and import it.7916*/7917if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {7918nvlist_free(config);7919nvlist_free(props);7920return (1);7921}79227923if (loadkeys) {7924ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);7925if (ret != 0)7926ret = 1;7927}79287929if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {7930ms_status = zpool_enable_datasets(zhp, mntopts, 0,7931mount_tp_nthr);7932if (ms_status == EZFS_SHAREFAILED) {7933(void) fprintf(stderr, gettext("Split was successful, "7934"datasets are mounted but sharing of some datasets "7935"has failed\n"));7936} else if (ms_status == EZFS_MOUNTFAILED) {7937(void) fprintf(stderr, gettext("Split was successful"7938", but some datasets could not be mounted\n"));7939(void) fprintf(stderr, gettext("Try doing '%s' with a "7940"different altroot\n"), "zpool import");7941}7942}7943zpool_close(zhp);7944nvlist_free(config);7945nvlist_free(props);79467947return (ret);7948}794979507951/*7952* zpool online [--power] <pool> <device> ...7953*7954* --power: Power on the enclosure slot to the drive (if possible)7955*/7956int7957zpool_do_online(int argc, char **argv)7958{7959int c, i;7960char *poolname;7961zpool_handle_t *zhp;7962int ret = 0;7963vdev_state_t newstate;7964int flags = 0;7965boolean_t is_power_on = B_FALSE;7966struct option long_options[] = {7967{"power", no_argument, NULL, ZPOOL_OPTION_POWER},7968{0, 0, 0, 0}7969};79707971/* check options */7972while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {7973switch (c) {7974case 'e':7975flags |= ZFS_ONLINE_EXPAND;7976break;7977case ZPOOL_OPTION_POWER:7978is_power_on = B_TRUE;7979break;7980case '?':7981(void) fprintf(stderr, gettext("invalid option '%c'\n"),7982optopt);7983usage(B_FALSE);7984}7985}79867987if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))7988is_power_on = B_TRUE;79897990argc -= optind;7991argv += optind;79927993/* get pool name and check number of arguments */7994if (argc < 1) {7995(void) fprintf(stderr, gettext("missing pool name\n"));7996usage(B_FALSE);7997}7998if (argc < 2) {7999(void) fprintf(stderr, gettext("missing device name\n"));8000usage(B_FALSE);8001}80028003poolname = argv[0];80048005if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {8006(void) fprintf(stderr, gettext("failed to open pool "8007"\"%s\""), poolname);8008return (1);8009}80108011for (i = 1; i < argc; i++) {8012vdev_state_t oldstate;8013boolean_t avail_spare, l2cache;8014int rc;80158016if (is_power_on) {8017rc = zpool_power_on_and_disk_wait(zhp, argv[i]);8018if (rc == ENOTSUP) {8019(void) fprintf(stderr,8020gettext("Power control not supported\n"));8021}8022if (rc != 0)8023return (rc);8024}80258026nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,8027&l2cache, NULL);8028if (tgt == NULL) {8029ret = 1;8030(void) fprintf(stderr, gettext("couldn't find device "8031"\"%s\" in pool \"%s\"\n"), argv[i], poolname);8032continue;8033}8034uint_t vsc;8035oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,8036ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;8037if ((rc = zpool_vdev_online(zhp, argv[i], flags,8038&newstate)) == 0) {8039if (newstate != VDEV_STATE_HEALTHY) {8040(void) printf(gettext("warning: device '%s' "8041"onlined, but remains in faulted state\n"),8042argv[i]);8043if (newstate == VDEV_STATE_FAULTED)8044(void) printf(gettext("use 'zpool "8045"clear' to restore a faulted "8046"device\n"));8047else8048(void) printf(gettext("use 'zpool "8049"replace' to replace devices "8050"that are no longer present\n"));8051if ((flags & ZFS_ONLINE_EXPAND)) {8052(void) printf(gettext("%s: failed "8053"to expand usable space on "8054"unhealthy device '%s'\n"),8055(oldstate >= VDEV_STATE_DEGRADED ?8056"error" : "warning"), argv[i]);8057if (oldstate >= VDEV_STATE_DEGRADED) {8058ret = 1;8059break;8060}8061}8062}8063} else {8064(void) fprintf(stderr, gettext("Failed to online "8065"\"%s\" in pool \"%s\": %d\n"),8066argv[i], poolname, rc);8067ret = 1;8068}8069}80708071zpool_close(zhp);80728073return (ret);8074}80758076/*8077* zpool offline [-ft]|[--power] <pool> <device> ...8078*8079*8080* -f Force the device into a faulted state.8081*8082* -t Only take the device off-line temporarily. The offline/faulted8083* state will not be persistent across reboots.8084*8085* --power Power off the enclosure slot to the drive (if possible)8086*/8087int8088zpool_do_offline(int argc, char **argv)8089{8090int c, i;8091char *poolname;8092zpool_handle_t *zhp;8093int ret = 0;8094boolean_t istmp = B_FALSE;8095boolean_t fault = B_FALSE;8096boolean_t is_power_off = B_FALSE;80978098struct option long_options[] = {8099{"power", no_argument, NULL, ZPOOL_OPTION_POWER},8100{0, 0, 0, 0}8101};81028103/* check options */8104while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {8105switch (c) {8106case 'f':8107fault = B_TRUE;8108break;8109case 't':8110istmp = B_TRUE;8111break;8112case ZPOOL_OPTION_POWER:8113is_power_off = B_TRUE;8114break;8115case '?':8116(void) fprintf(stderr, gettext("invalid option '%c'\n"),8117optopt);8118usage(B_FALSE);8119}8120}81218122if (is_power_off && fault) {8123(void) fprintf(stderr,8124gettext("-0 and -f cannot be used together\n"));8125usage(B_FALSE);8126return (1);8127}81288129if (is_power_off && istmp) {8130(void) fprintf(stderr,8131gettext("-0 and -t cannot be used together\n"));8132usage(B_FALSE);8133return (1);8134}81358136argc -= optind;8137argv += optind;81388139/* get pool name and check number of arguments */8140if (argc < 1) {8141(void) fprintf(stderr, gettext("missing pool name\n"));8142usage(B_FALSE);8143}8144if (argc < 2) {8145(void) fprintf(stderr, gettext("missing device name\n"));8146usage(B_FALSE);8147}81488149poolname = argv[0];81508151if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {8152(void) fprintf(stderr, gettext("failed to open pool "8153"\"%s\""), poolname);8154return (1);8155}81568157for (i = 1; i < argc; i++) {8158uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);8159if (is_power_off) {8160/*8161* Note: we have to power off first, then set REMOVED,8162* or else zpool_vdev_set_removed_state() returns8163* EAGAIN.8164*/8165ret = zpool_power_off(zhp, argv[i]);8166if (ret != 0) {8167(void) fprintf(stderr, "%s %s %d\n",8168gettext("unable to power off slot for"),8169argv[i], ret);8170}8171zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);81728173} else if (fault) {8174vdev_aux_t aux;8175if (istmp == B_FALSE) {8176/* Force the fault to persist across imports */8177aux = VDEV_AUX_EXTERNAL_PERSIST;8178} else {8179aux = VDEV_AUX_EXTERNAL;8180}81818182if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)8183ret = 1;8184} else {8185if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)8186ret = 1;8187}8188}81898190zpool_close(zhp);81918192return (ret);8193}81948195/*8196* zpool clear [-nF]|[--power] <pool> [device]8197*8198* Clear all errors associated with a pool or a particular device.8199*/8200int8201zpool_do_clear(int argc, char **argv)8202{8203int c;8204int ret = 0;8205boolean_t dryrun = B_FALSE;8206boolean_t do_rewind = B_FALSE;8207boolean_t xtreme_rewind = B_FALSE;8208boolean_t is_power_on = B_FALSE;8209uint32_t rewind_policy = ZPOOL_NO_REWIND;8210nvlist_t *policy = NULL;8211zpool_handle_t *zhp;8212char *pool, *device;82138214struct option long_options[] = {8215{"power", no_argument, NULL, ZPOOL_OPTION_POWER},8216{0, 0, 0, 0}8217};82188219/* check options */8220while ((c = getopt_long(argc, argv, "FnX", long_options,8221NULL)) != -1) {8222switch (c) {8223case 'F':8224do_rewind = B_TRUE;8225break;8226case 'n':8227dryrun = B_TRUE;8228break;8229case 'X':8230xtreme_rewind = B_TRUE;8231break;8232case ZPOOL_OPTION_POWER:8233is_power_on = B_TRUE;8234break;8235case '?':8236(void) fprintf(stderr, gettext("invalid option '%c'\n"),8237optopt);8238usage(B_FALSE);8239}8240}82418242if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))8243is_power_on = B_TRUE;82448245argc -= optind;8246argv += optind;82478248if (argc < 1) {8249(void) fprintf(stderr, gettext("missing pool name\n"));8250usage(B_FALSE);8251}82528253if (argc > 2) {8254(void) fprintf(stderr, gettext("too many arguments\n"));8255usage(B_FALSE);8256}82578258if ((dryrun || xtreme_rewind) && !do_rewind) {8259(void) fprintf(stderr,8260gettext("-n or -X only meaningful with -F\n"));8261usage(B_FALSE);8262}8263if (dryrun)8264rewind_policy = ZPOOL_TRY_REWIND;8265else if (do_rewind)8266rewind_policy = ZPOOL_DO_REWIND;8267if (xtreme_rewind)8268rewind_policy |= ZPOOL_EXTREME_REWIND;82698270/* In future, further rewind policy choices can be passed along here */8271if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||8272nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,8273rewind_policy) != 0) {8274return (1);8275}82768277pool = argv[0];8278device = argc == 2 ? argv[1] : NULL;82798280if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {8281nvlist_free(policy);8282return (1);8283}82848285if (is_power_on) {8286if (device == NULL) {8287zpool_power_on_pool_and_wait_for_devices(zhp);8288} else {8289zpool_power_on_and_disk_wait(zhp, device);8290}8291}82928293if (zpool_clear(zhp, device, policy) != 0)8294ret = 1;82958296zpool_close(zhp);82978298nvlist_free(policy);82998300return (ret);8301}83028303/*8304* zpool reguid [-g <guid>] <pool>8305*/8306int8307zpool_do_reguid(int argc, char **argv)8308{8309uint64_t guid;8310uint64_t *guidp = NULL;8311int c;8312char *endptr;8313char *poolname;8314zpool_handle_t *zhp;8315int ret = 0;83168317/* check options */8318while ((c = getopt(argc, argv, "g:")) != -1) {8319switch (c) {8320case 'g':8321errno = 0;8322guid = strtoull(optarg, &endptr, 10);8323if (errno != 0 || *endptr != '\0') {8324(void) fprintf(stderr,8325gettext("invalid GUID: %s\n"), optarg);8326usage(B_FALSE);8327}8328guidp = &guid;8329break;8330case '?':8331(void) fprintf(stderr, gettext("invalid option '%c'\n"),8332optopt);8333usage(B_FALSE);8334}8335}83368337argc -= optind;8338argv += optind;83398340/* get pool name and check number of arguments */8341if (argc < 1) {8342(void) fprintf(stderr, gettext("missing pool name\n"));8343usage(B_FALSE);8344}83458346if (argc > 1) {8347(void) fprintf(stderr, gettext("too many arguments\n"));8348usage(B_FALSE);8349}83508351poolname = argv[0];8352if ((zhp = zpool_open(g_zfs, poolname)) == NULL)8353return (1);83548355ret = zpool_set_guid(zhp, guidp);83568357zpool_close(zhp);8358return (ret);8359}836083618362/*8363* zpool reopen <pool>8364*8365* Reopen the pool so that the kernel can update the sizes of all vdevs.8366*/8367int8368zpool_do_reopen(int argc, char **argv)8369{8370int c;8371int ret = 0;8372boolean_t scrub_restart = B_TRUE;83738374/* check options */8375while ((c = getopt(argc, argv, "n")) != -1) {8376switch (c) {8377case 'n':8378scrub_restart = B_FALSE;8379break;8380case '?':8381(void) fprintf(stderr, gettext("invalid option '%c'\n"),8382optopt);8383usage(B_FALSE);8384}8385}83868387argc -= optind;8388argv += optind;83898390/* if argc == 0 we will execute zpool_reopen_one on all pools */8391ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8392B_FALSE, zpool_reopen_one, &scrub_restart);83938394return (ret);8395}83968397typedef struct scrub_cbdata {8398int cb_type;8399pool_scrub_cmd_t cb_scrub_cmd;8400time_t cb_date_start;8401time_t cb_date_end;8402} scrub_cbdata_t;84038404static boolean_t8405zpool_has_checkpoint(zpool_handle_t *zhp)8406{8407nvlist_t *config, *nvroot;84088409config = zpool_get_config(zhp, NULL);84108411if (config != NULL) {8412pool_checkpoint_stat_t *pcs = NULL;8413uint_t c;84148415nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);8416(void) nvlist_lookup_uint64_array(nvroot,8417ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);84188419if (pcs == NULL || pcs->pcs_state == CS_NONE)8420return (B_FALSE);84218422assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||8423pcs->pcs_state == CS_CHECKPOINT_DISCARDING);8424return (B_TRUE);8425}84268427return (B_FALSE);8428}84298430static int8431scrub_callback(zpool_handle_t *zhp, void *data)8432{8433scrub_cbdata_t *cb = data;8434int err;84358436/*8437* Ignore faulted pools.8438*/8439if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {8440(void) fprintf(stderr, gettext("cannot scan '%s': pool is "8441"currently unavailable\n"), zpool_get_name(zhp));8442return (1);8443}84448445err = zpool_scan_range(zhp, cb->cb_type, cb->cb_scrub_cmd,8446cb->cb_date_start, cb->cb_date_end);8447if (err == 0 && zpool_has_checkpoint(zhp) &&8448cb->cb_type == POOL_SCAN_SCRUB) {8449(void) printf(gettext("warning: will not scrub state that "8450"belongs to the checkpoint of pool '%s'\n"),8451zpool_get_name(zhp));8452}84538454return (err != 0);8455}84568457static int8458wait_callback(zpool_handle_t *zhp, void *data)8459{8460zpool_wait_activity_t *act = data;8461return (zpool_wait(zhp, *act));8462}84638464static time_t8465date_string_to_sec(const char *timestr, boolean_t rounding)8466{8467struct tm tm = {0};8468int adjustment = rounding ? 1 : 0;84698470/* Allow mktime to determine timezone. */8471tm.tm_isdst = -1;84728473if (strptime(timestr, "%Y-%m-%d %H:%M", &tm) == NULL) {8474if (strptime(timestr, "%Y-%m-%d", &tm) == NULL) {8475fprintf(stderr, gettext("Failed to parse the date.\n"));8476usage(B_FALSE);8477}8478adjustment *= 24 * 60 * 60;8479} else {8480adjustment *= 60;8481}84828483return (mktime(&tm) + adjustment);8484}84858486/*8487* zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...]8488*8489* -a Scrub all pools.8490* -e Only scrub blocks in the error log.8491* -E End date of scrub.8492* -S Start date of scrub.8493* -s Stop. Stops any in-progress scrub.8494* -p Pause. Pause in-progress scrub.8495* -w Wait. Blocks until scrub has completed.8496* -C Scrub from last saved txg.8497*/8498int8499zpool_do_scrub(int argc, char **argv)8500{8501int c;8502scrub_cbdata_t cb;8503boolean_t wait = B_FALSE;8504int error;85058506cb.cb_type = POOL_SCAN_SCRUB;8507cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;8508cb.cb_date_start = cb.cb_date_end = 0;85098510boolean_t is_error_scrub = B_FALSE;8511boolean_t is_pause = B_FALSE;8512boolean_t is_stop = B_FALSE;8513boolean_t is_txg_continue = B_FALSE;8514boolean_t scrub_all = B_FALSE;85158516/* check options */8517while ((c = getopt(argc, argv, "aspweCE:S:")) != -1) {8518switch (c) {8519case 'a':8520scrub_all = B_TRUE;8521break;8522case 'e':8523is_error_scrub = B_TRUE;8524break;8525case 'E':8526/*8527* Round the date. It's better to scrub more data than8528* less. This also makes the date inclusive.8529*/8530cb.cb_date_end = date_string_to_sec(optarg, B_TRUE);8531break;8532case 's':8533is_stop = B_TRUE;8534break;8535case 'S':8536cb.cb_date_start = date_string_to_sec(optarg, B_FALSE);8537break;8538case 'p':8539is_pause = B_TRUE;8540break;8541case 'w':8542wait = B_TRUE;8543break;8544case 'C':8545is_txg_continue = B_TRUE;8546break;8547case '?':8548(void) fprintf(stderr, gettext("invalid option '%c'\n"),8549optopt);8550usage(B_FALSE);8551}8552}85538554if (is_pause && is_stop) {8555(void) fprintf(stderr, gettext("invalid option "8556"combination: -s and -p are mutually exclusive\n"));8557usage(B_FALSE);8558} else if (is_pause && is_txg_continue) {8559(void) fprintf(stderr, gettext("invalid option "8560"combination: -p and -C are mutually exclusive\n"));8561usage(B_FALSE);8562} else if (is_stop && is_txg_continue) {8563(void) fprintf(stderr, gettext("invalid option "8564"combination: -s and -C are mutually exclusive\n"));8565usage(B_FALSE);8566} else if (is_error_scrub && is_txg_continue) {8567(void) fprintf(stderr, gettext("invalid option "8568"combination: -e and -C are mutually exclusive\n"));8569usage(B_FALSE);8570} else {8571if (is_error_scrub)8572cb.cb_type = POOL_SCAN_ERRORSCRUB;85738574if (is_pause) {8575cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;8576} else if (is_stop) {8577cb.cb_type = POOL_SCAN_NONE;8578} else if (is_txg_continue) {8579cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;8580} else {8581cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;8582}8583}85848585if ((cb.cb_date_start != 0 || cb.cb_date_end != 0) &&8586cb.cb_scrub_cmd != POOL_SCRUB_NORMAL) {8587(void) fprintf(stderr, gettext("invalid option combination: "8588"start/end date is available only with normal scrub\n"));8589usage(B_FALSE);8590}8591if (cb.cb_date_start != 0 && cb.cb_date_end != 0 &&8592cb.cb_date_start > cb.cb_date_end) {8593(void) fprintf(stderr, gettext("invalid arguments: "8594"end date has to be later than start date\n"));8595usage(B_FALSE);8596}85978598if (wait && (cb.cb_type == POOL_SCAN_NONE ||8599cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {8600(void) fprintf(stderr, gettext("invalid option combination: "8601"-w cannot be used with -p or -s\n"));8602usage(B_FALSE);8603}86048605argc -= optind;8606argv += optind;86078608if (argc < 1 && !scrub_all) {8609(void) fprintf(stderr, gettext("missing pool name argument\n"));8610usage(B_FALSE);8611}86128613error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8614B_FALSE, scrub_callback, &cb);86158616if (wait && !error) {8617zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;8618error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8619B_FALSE, wait_callback, &act);8620}86218622return (error);8623}86248625/*8626* zpool resilver <pool> ...8627*8628* Restarts any in-progress resilver8629*/8630int8631zpool_do_resilver(int argc, char **argv)8632{8633int c;8634scrub_cbdata_t cb;86358636cb.cb_type = POOL_SCAN_RESILVER;8637cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;8638cb.cb_date_start = cb.cb_date_end = 0;86398640/* check options */8641while ((c = getopt(argc, argv, "")) != -1) {8642switch (c) {8643case '?':8644(void) fprintf(stderr, gettext("invalid option '%c'\n"),8645optopt);8646usage(B_FALSE);8647}8648}86498650argc -= optind;8651argv += optind;86528653if (argc < 1) {8654(void) fprintf(stderr, gettext("missing pool name argument\n"));8655usage(B_FALSE);8656}86578658return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8659B_FALSE, scrub_callback, &cb));8660}86618662/*8663* zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...]8664*8665* -a Trim all pools.8666* -c Cancel. Ends any in-progress trim.8667* -d Secure trim. Requires kernel and device support.8668* -r <rate> Sets the TRIM rate in bytes (per second). Supports8669* adding a multiplier suffix such as 'k' or 'm'.8670* -s Suspend. TRIM can then be restarted with no flags.8671* -w Wait. Blocks until trimming has completed.8672*/8673int8674zpool_do_trim(int argc, char **argv)8675{8676struct option long_options[] = {8677{"cancel", no_argument, NULL, 'c'},8678{"secure", no_argument, NULL, 'd'},8679{"rate", required_argument, NULL, 'r'},8680{"suspend", no_argument, NULL, 's'},8681{"wait", no_argument, NULL, 'w'},8682{"all", no_argument, NULL, 'a'},8683{0, 0, 0, 0}8684};86858686pool_trim_func_t cmd_type = POOL_TRIM_START;8687uint64_t rate = 0;8688boolean_t secure = B_FALSE;8689boolean_t wait = B_FALSE;8690boolean_t trimall = B_FALSE;8691int error;86928693int c;8694while ((c = getopt_long(argc, argv, "acdr:sw", long_options, NULL))8695!= -1) {8696switch (c) {8697case 'a':8698trimall = B_TRUE;8699break;8700case 'c':8701if (cmd_type != POOL_TRIM_START &&8702cmd_type != POOL_TRIM_CANCEL) {8703(void) fprintf(stderr, gettext("-c cannot be "8704"combined with other options\n"));8705usage(B_FALSE);8706}8707cmd_type = POOL_TRIM_CANCEL;8708break;8709case 'd':8710if (cmd_type != POOL_TRIM_START) {8711(void) fprintf(stderr, gettext("-d cannot be "8712"combined with the -c or -s options\n"));8713usage(B_FALSE);8714}8715secure = B_TRUE;8716break;8717case 'r':8718if (cmd_type != POOL_TRIM_START) {8719(void) fprintf(stderr, gettext("-r cannot be "8720"combined with the -c or -s options\n"));8721usage(B_FALSE);8722}8723if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {8724(void) fprintf(stderr, "%s: %s\n",8725gettext("invalid value for rate"),8726libzfs_error_description(g_zfs));8727usage(B_FALSE);8728}8729break;8730case 's':8731if (cmd_type != POOL_TRIM_START &&8732cmd_type != POOL_TRIM_SUSPEND) {8733(void) fprintf(stderr, gettext("-s cannot be "8734"combined with other options\n"));8735usage(B_FALSE);8736}8737cmd_type = POOL_TRIM_SUSPEND;8738break;8739case 'w':8740wait = B_TRUE;8741break;8742case '?':8743if (optopt != 0) {8744(void) fprintf(stderr,8745gettext("invalid option '%c'\n"), optopt);8746} else {8747(void) fprintf(stderr,8748gettext("invalid option '%s'\n"),8749argv[optind - 1]);8750}8751usage(B_FALSE);8752}8753}87548755argc -= optind;8756argv += optind;87578758trimflags_t trim_flags = {8759.secure = secure,8760.rate = rate,8761.wait = wait,8762};87638764trim_cbdata_t cbdata = {8765.trim_flags = trim_flags,8766.cmd_type = cmd_type8767};87688769if (argc < 1 && !trimall) {8770(void) fprintf(stderr, gettext("missing pool name argument\n"));8771usage(B_FALSE);8772return (-1);8773}87748775if (wait && (cmd_type != POOL_TRIM_START)) {8776(void) fprintf(stderr, gettext("-w cannot be used with -c or "8777"-s options\n"));8778usage(B_FALSE);8779}87808781if (trimall && argc > 0) {8782(void) fprintf(stderr, gettext("-a cannot be combined with "8783"individual zpools or vdevs\n"));8784usage(B_FALSE);8785}87868787if (argc == 0 && trimall) {8788cbdata.trim_flags.fullpool = B_TRUE;8789/* Trim each pool recursively */8790error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,8791B_FALSE, zpool_trim_one, &cbdata);8792} else if (argc == 1) {8793char *poolname = argv[0];8794zpool_handle_t *zhp = zpool_open(g_zfs, poolname);8795if (zhp == NULL)8796return (-1);8797/* no individual leaf vdevs specified, so add them all */8798error = zpool_trim_one(zhp, &cbdata);8799zpool_close(zhp);8800} else {8801char *poolname = argv[0];8802zpool_handle_t *zhp = zpool_open(g_zfs, poolname);8803if (zhp == NULL)8804return (-1);8805/* leaf vdevs specified, trim only those */8806cbdata.trim_flags.fullpool = B_FALSE;8807nvlist_t *vdevs = fnvlist_alloc();8808for (int i = 1; i < argc; i++) {8809fnvlist_add_boolean(vdevs, argv[i]);8810}8811error = zpool_trim(zhp, cbdata.cmd_type, vdevs,8812&cbdata.trim_flags);8813fnvlist_free(vdevs);8814zpool_close(zhp);8815}88168817return (error);8818}88198820/*8821* Converts a total number of seconds to a human readable string broken8822* down in to days/hours/minutes/seconds.8823*/8824static void8825secs_to_dhms(uint64_t total, char *buf)8826{8827uint64_t days = total / 60 / 60 / 24;8828uint64_t hours = (total / 60 / 60) % 24;8829uint64_t mins = (total / 60) % 60;8830uint64_t secs = (total % 60);88318832if (days > 0) {8833(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",8834(u_longlong_t)days, (u_longlong_t)hours,8835(u_longlong_t)mins, (u_longlong_t)secs);8836} else {8837(void) sprintf(buf, "%02llu:%02llu:%02llu",8838(u_longlong_t)hours, (u_longlong_t)mins,8839(u_longlong_t)secs);8840}8841}88428843/*8844* Print out detailed error scrub status.8845*/8846static void8847print_err_scrub_status(pool_scan_stat_t *ps)8848{8849time_t start, end, pause;8850uint64_t total_secs_left;8851uint64_t secs_left, mins_left, hours_left, days_left;8852uint64_t examined, to_be_examined;88538854if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {8855return;8856}88578858(void) printf(gettext(" scrub: "));88598860start = ps->pss_error_scrub_start;8861end = ps->pss_error_scrub_end;8862pause = ps->pss_pass_error_scrub_pause;8863examined = ps->pss_error_scrub_examined;8864to_be_examined = ps->pss_error_scrub_to_be_examined;88658866assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);88678868if (ps->pss_error_scrub_state == DSS_FINISHED) {8869total_secs_left = end - start;8870days_left = total_secs_left / 60 / 60 / 24;8871hours_left = (total_secs_left / 60 / 60) % 24;8872mins_left = (total_secs_left / 60) % 60;8873secs_left = (total_secs_left % 60);88748875(void) printf(gettext("scrubbed %llu error blocks in %llu days "8876"%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,8877(u_longlong_t)days_left, (u_longlong_t)hours_left,8878(u_longlong_t)mins_left, (u_longlong_t)secs_left,8879ctime(&end));88808881return;8882} else if (ps->pss_error_scrub_state == DSS_CANCELED) {8883(void) printf(gettext("error scrub canceled on %s"),8884ctime(&end));8885return;8886}8887assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);88888889/* Error scrub is in progress. */8890if (pause == 0) {8891(void) printf(gettext("error scrub in progress since %s"),8892ctime(&start));8893} else {8894(void) printf(gettext("error scrub paused since %s"),8895ctime(&pause));8896(void) printf(gettext("\terror scrub started on %s"),8897ctime(&start));8898}88998900double fraction_done = (double)examined / (to_be_examined + examined);8901(void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"8902" blocks"), 100 * fraction_done, (u_longlong_t)examined);89038904(void) printf("\n");8905}89068907/*8908* Print out detailed scrub status.8909*/8910static void8911print_scan_scrub_resilver_status(pool_scan_stat_t *ps)8912{8913time_t start, end, pause;8914uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;8915uint64_t elapsed, scan_rate, issue_rate;8916double fraction_done;8917char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];8918char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];89198920printf(" ");8921printf_color(ANSI_BOLD, gettext("scan:"));8922printf(" ");89238924/* If there's never been a scan, there's not much to say. */8925if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||8926ps->pss_func >= POOL_SCAN_FUNCS) {8927(void) printf(gettext("none requested\n"));8928return;8929}89308931start = ps->pss_start_time;8932end = ps->pss_end_time;8933pause = ps->pss_pass_scrub_pause;89348935zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));89368937int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;8938int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;8939assert(is_resilver || is_scrub);89408941/* Scan is finished or canceled. */8942if (ps->pss_state == DSS_FINISHED) {8943secs_to_dhms(end - start, time_buf);89448945if (is_scrub) {8946(void) printf(gettext("scrub repaired %s "8947"in %s with %llu errors on %s"), processed_buf,8948time_buf, (u_longlong_t)ps->pss_errors,8949ctime(&end));8950} else if (is_resilver) {8951(void) printf(gettext("resilvered %s "8952"in %s with %llu errors on %s"), processed_buf,8953time_buf, (u_longlong_t)ps->pss_errors,8954ctime(&end));8955}8956return;8957} else if (ps->pss_state == DSS_CANCELED) {8958if (is_scrub) {8959(void) printf(gettext("scrub canceled on %s"),8960ctime(&end));8961} else if (is_resilver) {8962(void) printf(gettext("resilver canceled on %s"),8963ctime(&end));8964}8965return;8966}89678968assert(ps->pss_state == DSS_SCANNING);89698970/* Scan is in progress. Resilvers can't be paused. */8971if (is_scrub) {8972if (pause == 0) {8973(void) printf(gettext("scrub in progress since %s"),8974ctime(&start));8975} else {8976(void) printf(gettext("scrub paused since %s"),8977ctime(&pause));8978(void) printf(gettext("\tscrub started on %s"),8979ctime(&start));8980}8981} else if (is_resilver) {8982(void) printf(gettext("resilver in progress since %s"),8983ctime(&start));8984}89858986scanned = ps->pss_examined;8987pass_scanned = ps->pss_pass_exam;8988issued = ps->pss_issued;8989pass_issued = ps->pss_pass_issued;8990total_s = ps->pss_to_examine;8991total_i = ps->pss_to_examine - ps->pss_skipped;89928993/* we are only done with a block once we have issued the IO for it */8994fraction_done = (double)issued / total_i;89958996/* elapsed time for this pass, rounding up to 1 if it's 0 */8997elapsed = time(NULL) - ps->pss_pass_start;8998elapsed -= ps->pss_pass_scrub_spent_paused;8999elapsed = (elapsed != 0) ? elapsed : 1;90009001scan_rate = pass_scanned / elapsed;9002issue_rate = pass_issued / elapsed;90039004/* format all of the numbers we will be reporting */9005zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));9006zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));9007zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));9008zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));90099010/* do not print estimated time if we have a paused scrub */9011(void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);9012if (pause == 0 && scan_rate > 0) {9013zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));9014(void) printf(gettext(" at %s/s"), srate_buf);9015}9016(void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);9017if (pause == 0 && issue_rate > 0) {9018zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));9019(void) printf(gettext(" at %s/s"), irate_buf);9020}9021(void) printf(gettext("\n"));90229023if (is_resilver) {9024(void) printf(gettext("\t%s resilvered, %.2f%% done"),9025processed_buf, 100 * fraction_done);9026} else if (is_scrub) {9027(void) printf(gettext("\t%s repaired, %.2f%% done"),9028processed_buf, 100 * fraction_done);9029}90309031if (pause == 0) {9032/*9033* Only provide an estimate iff:9034* 1) we haven't yet issued all we expected, and9035* 2) the issue rate exceeds 10 MB/s, and9036* 3) it's either:9037* a) a resilver which has started repairs, or9038* b) a scrub which has entered the issue phase.9039*/9040if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&9041((is_resilver && ps->pss_processed > 0) ||9042(is_scrub && issued > 0))) {9043secs_to_dhms((total_i - issued) / issue_rate, time_buf);9044(void) printf(gettext(", %s to go\n"), time_buf);9045} else {9046(void) printf(gettext(", no estimated "9047"completion time\n"));9048}9049} else {9050(void) printf(gettext("\n"));9051}9052}90539054static void9055print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)9056{9057if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)9058return;90599060printf(" ");9061printf_color(ANSI_BOLD, gettext("scan:"));9062printf(" ");90639064uint64_t bytes_scanned = vrs->vrs_bytes_scanned;9065uint64_t bytes_issued = vrs->vrs_bytes_issued;9066uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;9067uint64_t bytes_est_s = vrs->vrs_bytes_est;9068uint64_t bytes_est_i = vrs->vrs_bytes_est;9069if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)9070bytes_est_i -= vrs->vrs_pass_bytes_skipped;9071uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /9072(vrs->vrs_pass_time_ms + 1)) * 1000;9073uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /9074(vrs->vrs_pass_time_ms + 1)) * 1000;9075double scan_pct = MIN((double)bytes_scanned * 100 /9076(bytes_est_s + 1), 100);90779078/* Format all of the numbers we will be reporting */9079char bytes_scanned_buf[7], bytes_issued_buf[7];9080char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];9081char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];9082zfs_nicebytes(bytes_scanned, bytes_scanned_buf,9083sizeof (bytes_scanned_buf));9084zfs_nicebytes(bytes_issued, bytes_issued_buf,9085sizeof (bytes_issued_buf));9086zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,9087sizeof (bytes_rebuilt_buf));9088zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));9089zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));90909091time_t start = vrs->vrs_start_time;9092time_t end = vrs->vrs_end_time;90939094/* Rebuild is finished or canceled. */9095if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {9096secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);9097(void) printf(gettext("resilvered (%s) %s in %s "9098"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,9099time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));9100return;9101} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {9102(void) printf(gettext("resilver (%s) canceled on %s"),9103vdev_name, ctime(&end));9104return;9105} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {9106(void) printf(gettext("resilver (%s) in progress since %s"),9107vdev_name, ctime(&start));9108}91099110assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);91119112(void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,9113bytes_est_s_buf);9114if (scan_rate > 0) {9115zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));9116(void) printf(gettext(" at %s/s"), scan_rate_buf);9117}9118(void) printf(gettext(", %s / %s issued"), bytes_issued_buf,9119bytes_est_i_buf);9120if (issue_rate > 0) {9121zfs_nicebytes(issue_rate, issue_rate_buf,9122sizeof (issue_rate_buf));9123(void) printf(gettext(" at %s/s"), issue_rate_buf);9124}9125(void) printf(gettext("\n"));91269127(void) printf(gettext("\t%s resilvered, %.2f%% done"),9128bytes_rebuilt_buf, scan_pct);91299130if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {9131if (bytes_est_s >= bytes_scanned &&9132scan_rate >= 10 * 1024 * 1024) {9133secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,9134time_buf);9135(void) printf(gettext(", %s to go\n"), time_buf);9136} else {9137(void) printf(gettext(", no estimated "9138"completion time\n"));9139}9140} else {9141(void) printf(gettext("\n"));9142}9143}91449145/*9146* Print rebuild status for top-level vdevs.9147*/9148static void9149print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)9150{9151nvlist_t **child;9152uint_t children;91539154if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,9155&child, &children) != 0)9156children = 0;91579158for (uint_t c = 0; c < children; c++) {9159vdev_rebuild_stat_t *vrs;9160uint_t i;91619162if (nvlist_lookup_uint64_array(child[c],9163ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {9164char *name = zpool_vdev_name(g_zfs, zhp,9165child[c], VDEV_NAME_TYPE_ID);9166print_rebuild_status_impl(vrs, i, name);9167free(name);9168}9169}9170}91719172/*9173* As we don't scrub checkpointed blocks, we want to warn the user that we9174* skipped scanning some blocks if a checkpoint exists or existed at any9175* time during the scan. If a sequential instead of healing reconstruction9176* was performed then the blocks were reconstructed. However, their checksums9177* have not been verified so we still print the warning.9178*/9179static void9180print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)9181{9182if (ps == NULL || pcs == NULL)9183return;91849185if (pcs->pcs_state == CS_NONE ||9186pcs->pcs_state == CS_CHECKPOINT_DISCARDING)9187return;91889189assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);91909191if (ps->pss_state == DSS_NONE)9192return;91939194if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&9195ps->pss_end_time < pcs->pcs_start_time)9196return;91979198if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {9199(void) printf(gettext(" scan warning: skipped blocks "9200"that are only referenced by the checkpoint.\n"));9201} else {9202assert(ps->pss_state == DSS_SCANNING);9203(void) printf(gettext(" scan warning: skipping blocks "9204"that are only referenced by the checkpoint.\n"));9205}9206}92079208/*9209* Returns B_TRUE if there is an active rebuild in progress. Otherwise,9210* B_FALSE is returned and 'rebuild_end_time' is set to the end time for9211* the last completed (or cancelled) rebuild.9212*/9213static boolean_t9214check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)9215{9216nvlist_t **child;9217uint_t children;9218boolean_t rebuilding = B_FALSE;9219uint64_t end_time = 0;92209221if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,9222&child, &children) != 0)9223children = 0;92249225for (uint_t c = 0; c < children; c++) {9226vdev_rebuild_stat_t *vrs;9227uint_t i;92289229if (nvlist_lookup_uint64_array(child[c],9230ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {92319232if (vrs->vrs_end_time > end_time)9233end_time = vrs->vrs_end_time;92349235if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {9236rebuilding = B_TRUE;9237end_time = 0;9238break;9239}9240}9241}92429243if (rebuild_end_time != NULL)9244*rebuild_end_time = end_time;92459246return (rebuilding);9247}92489249static void9250vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,9251int depth, boolean_t isspare, char *parent, nvlist_t *item)9252{9253nvlist_t *vds, **child, *ch = NULL;9254uint_t vsc, children;9255vdev_stat_t *vs;9256char *vname;9257uint64_t notpresent;9258const char *type, *path;92599260if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,9261&child, &children) != 0)9262children = 0;9263verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,9264(uint64_t **)&vs, &vsc) == 0);9265verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);9266if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)9267return;92689269if (cb->cb_print_unhealthy && depth > 0 &&9270for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {9271return;9272}9273vname = zpool_vdev_name(g_zfs, zhp, nv,9274cb->cb_name_flags | VDEV_NAME_TYPE_ID);9275vds = fnvlist_alloc();9276fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);9277if (cb->cb_flat_vdevs && parent != NULL) {9278fnvlist_add_string(vds, "parent", parent);9279}92809281if (isspare) {9282if (vs->vs_aux == VDEV_AUX_SPARED) {9283fnvlist_add_string(vds, "state", "INUSE");9284used_by_other(zhp, nv, vds);9285} else if (vs->vs_state == VDEV_STATE_HEALTHY)9286fnvlist_add_string(vds, "state", "AVAIL");9287} else {9288if (vs->vs_alloc) {9289nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,9290cb->cb_literal, cb->cb_json_as_int,9291ZFS_NICENUM_BYTES);9292}9293if (vs->vs_space) {9294nice_num_str_nvlist(vds, "total_space", vs->vs_space,9295cb->cb_literal, cb->cb_json_as_int,9296ZFS_NICENUM_BYTES);9297}9298if (vs->vs_dspace) {9299nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,9300cb->cb_literal, cb->cb_json_as_int,9301ZFS_NICENUM_BYTES);9302}9303if (vs->vs_rsize) {9304nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,9305cb->cb_literal, cb->cb_json_as_int,9306ZFS_NICENUM_BYTES);9307}9308if (vs->vs_esize) {9309nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,9310cb->cb_literal, cb->cb_json_as_int,9311ZFS_NICENUM_BYTES);9312}9313if (vs->vs_self_healed) {9314nice_num_str_nvlist(vds, "self_healed",9315vs->vs_self_healed, cb->cb_literal,9316cb->cb_json_as_int, ZFS_NICENUM_BYTES);9317}9318if (vs->vs_pspace) {9319nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,9320cb->cb_literal, cb->cb_json_as_int,9321ZFS_NICENUM_BYTES);9322}9323nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,9324cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9325nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,9326cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9327nice_num_str_nvlist(vds, "checksum_errors",9328vs->vs_checksum_errors, cb->cb_literal,9329cb->cb_json_as_int, ZFS_NICENUM_1024);9330if (vs->vs_scan_processed) {9331nice_num_str_nvlist(vds, "scan_processed",9332vs->vs_scan_processed, cb->cb_literal,9333cb->cb_json_as_int, ZFS_NICENUM_BYTES);9334}9335if (vs->vs_checkpoint_space) {9336nice_num_str_nvlist(vds, "checkpoint_space",9337vs->vs_checkpoint_space, cb->cb_literal,9338cb->cb_json_as_int, ZFS_NICENUM_BYTES);9339}9340if (vs->vs_resilver_deferred) {9341nice_num_str_nvlist(vds, "resilver_deferred",9342vs->vs_resilver_deferred, B_TRUE,9343cb->cb_json_as_int, ZFS_NICENUM_1024);9344}9345if (children == 0) {9346nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,9347cb->cb_literal, cb->cb_json_as_int,9348ZFS_NICENUM_1024);9349}9350if (cb->cb_print_power) {9351if (children == 0) {9352/* Only leaf vdevs have physical slots */9353switch (zpool_power_current_state(zhp, (char *)9354fnvlist_lookup_string(nv,9355ZPOOL_CONFIG_PATH))) {9356case 0:9357fnvlist_add_string(vds, "power_state",9358"off");9359break;9360case 1:9361fnvlist_add_string(vds, "power_state",9362"on");9363break;9364default:9365fnvlist_add_string(vds, "power_state",9366"-");9367}9368} else {9369fnvlist_add_string(vds, "power_state", "-");9370}9371}9372}93739374if (cb->cb_print_dio_verify) {9375nice_num_str_nvlist(vds, "dio_verify_errors",9376vs->vs_dio_verify_errors, cb->cb_literal,9377cb->cb_json_as_int, ZFS_NICENUM_1024);9378}93799380if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,9381¬present) == 0) {9382nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,93831, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9384fnvlist_add_string(vds, "was",9385fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));9386} else if (vs->vs_aux != VDEV_AUX_NONE) {9387fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);9388} else if (children == 0 && !isspare &&9389getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&9390VDEV_STAT_VALID(vs_physical_ashift, vsc) &&9391vs->vs_configured_ashift < vs->vs_physical_ashift) {9392nice_num_str_nvlist(vds, "configured_ashift",9393vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,9394ZFS_NICENUM_1024);9395nice_num_str_nvlist(vds, "physical_ashift",9396vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,9397ZFS_NICENUM_1024);9398}9399if (vs->vs_scan_removing != 0) {9400nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,9401B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9402} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {9403nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,9404B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9405}94069407if (cb->vcdl != NULL) {9408if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {9409zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),9410path, vds);9411}9412}94139414if (children == 0) {9415if (cb->cb_print_vdev_init) {9416if (vs->vs_initialize_state != 0) {9417uint64_t st = vs->vs_initialize_state;9418fnvlist_add_string(vds, "init_state",9419vdev_init_state_str[st]);9420nice_num_str_nvlist(vds, "initialized",9421vs->vs_initialize_bytes_done,9422cb->cb_literal, cb->cb_json_as_int,9423ZFS_NICENUM_BYTES);9424nice_num_str_nvlist(vds, "to_initialize",9425vs->vs_initialize_bytes_est,9426cb->cb_literal, cb->cb_json_as_int,9427ZFS_NICENUM_BYTES);9428nice_num_str_nvlist(vds, "init_time",9429vs->vs_initialize_action_time,9430cb->cb_literal, cb->cb_json_as_int,9431ZFS_NICE_TIMESTAMP);9432nice_num_str_nvlist(vds, "init_errors",9433vs->vs_initialize_errors,9434cb->cb_literal, cb->cb_json_as_int,9435ZFS_NICENUM_1024);9436} else {9437fnvlist_add_string(vds, "init_state",9438"UNINITIALIZED");9439}9440}9441if (cb->cb_print_vdev_trim) {9442if (vs->vs_trim_notsup == 0) {9443if (vs->vs_trim_state != 0) {9444uint64_t st = vs->vs_trim_state;9445fnvlist_add_string(vds, "trim_state",9446vdev_trim_state_str[st]);9447nice_num_str_nvlist(vds, "trimmed",9448vs->vs_trim_bytes_done,9449cb->cb_literal, cb->cb_json_as_int,9450ZFS_NICENUM_BYTES);9451nice_num_str_nvlist(vds, "to_trim",9452vs->vs_trim_bytes_est,9453cb->cb_literal, cb->cb_json_as_int,9454ZFS_NICENUM_BYTES);9455nice_num_str_nvlist(vds, "trim_time",9456vs->vs_trim_action_time,9457cb->cb_literal, cb->cb_json_as_int,9458ZFS_NICE_TIMESTAMP);9459nice_num_str_nvlist(vds, "trim_errors",9460vs->vs_trim_errors,9461cb->cb_literal, cb->cb_json_as_int,9462ZFS_NICENUM_1024);9463} else9464fnvlist_add_string(vds, "trim_state",9465"UNTRIMMED");9466}9467nice_num_str_nvlist(vds, "trim_notsup",9468vs->vs_trim_notsup, B_TRUE,9469cb->cb_json_as_int, ZFS_NICENUM_1024);9470}9471} else {9472ch = fnvlist_alloc();9473}94749475if (cb->cb_flat_vdevs && children == 0) {9476fnvlist_add_nvlist(item, vname, vds);9477}94789479for (int c = 0; c < children; c++) {9480uint64_t islog = B_FALSE, ishole = B_FALSE;9481(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,9482&islog);9483(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,9484&ishole);9485if (islog || ishole)9486continue;9487if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))9488continue;9489if (cb->cb_flat_vdevs) {9490vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,9491vname, item);9492}9493vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,9494vname, ch);9495}94969497if (ch != NULL) {9498if (!nvlist_empty(ch))9499fnvlist_add_nvlist(vds, "vdevs", ch);9500fnvlist_free(ch);9501}9502fnvlist_add_nvlist(item, vname, vds);9503fnvlist_free(vds);9504free(vname);9505}95069507static void9508class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,9509const char *class, nvlist_t *item)9510{9511uint_t c, children;9512nvlist_t **child;9513nvlist_t *class_obj = NULL;95149515if (!cb->cb_flat_vdevs)9516class_obj = fnvlist_alloc();95179518assert(zhp != NULL || !cb->cb_verbose);95199520if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,9521&children) != 0)9522return;95239524for (c = 0; c < children; c++) {9525uint64_t is_log = B_FALSE;9526const char *bias = NULL;9527const char *type = NULL;9528char *name = zpool_vdev_name(g_zfs, zhp, child[c],9529cb->cb_name_flags | VDEV_NAME_TYPE_ID);95309531(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,9532&is_log);95339534if (is_log) {9535bias = (char *)VDEV_ALLOC_CLASS_LOGS;9536} else {9537(void) nvlist_lookup_string(child[c],9538ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);9539(void) nvlist_lookup_string(child[c],9540ZPOOL_CONFIG_TYPE, &type);9541}95429543if (bias == NULL || strcmp(bias, class) != 0)9544continue;9545if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)9546continue;95479548if (cb->cb_flat_vdevs) {9549vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,9550NULL, item);9551} else {9552vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,9553NULL, class_obj);9554}9555free(name);9556}9557if (!cb->cb_flat_vdevs) {9558if (!nvlist_empty(class_obj))9559fnvlist_add_nvlist(item, class, class_obj);9560fnvlist_free(class_obj);9561}9562}95639564static void9565l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,9566nvlist_t *item)9567{9568nvlist_t *l2c = NULL, **l2cache;9569uint_t nl2cache;9570if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,9571&l2cache, &nl2cache) == 0) {9572if (nl2cache == 0)9573return;9574if (!cb->cb_flat_vdevs)9575l2c = fnvlist_alloc();9576for (int i = 0; i < nl2cache; i++) {9577if (cb->cb_flat_vdevs) {9578vdev_stats_nvlist(zhp, cb, l2cache[i], 2,9579B_FALSE, NULL, item);9580} else {9581vdev_stats_nvlist(zhp, cb, l2cache[i], 2,9582B_FALSE, NULL, l2c);9583}9584}9585}9586if (!cb->cb_flat_vdevs) {9587if (!nvlist_empty(l2c))9588fnvlist_add_nvlist(item, "l2cache", l2c);9589fnvlist_free(l2c);9590}9591}95929593static void9594spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,9595nvlist_t *item)9596{9597nvlist_t *sp = NULL, **spares;9598uint_t nspares;9599if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,9600&spares, &nspares) == 0) {9601if (nspares == 0)9602return;9603if (!cb->cb_flat_vdevs)9604sp = fnvlist_alloc();9605for (int i = 0; i < nspares; i++) {9606if (cb->cb_flat_vdevs) {9607vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,9608NULL, item);9609} else {9610vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,9611NULL, sp);9612}9613}9614}9615if (!cb->cb_flat_vdevs) {9616if (!nvlist_empty(sp))9617fnvlist_add_nvlist(item, "spares", sp);9618fnvlist_free(sp);9619}9620}96219622static void9623errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)9624{9625uint64_t nerr;9626nvlist_t *config = zpool_get_config(zhp, NULL);9627if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,9628&nerr) == 0) {9629nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,9630cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9631if (nerr != 0 && cb->cb_verbose) {9632nvlist_t *nverrlist = NULL;9633if (zpool_get_errlog(zhp, &nverrlist) == 0) {9634int i = 0;9635int count = 0;9636size_t len = MAXPATHLEN * 2;9637nvpair_t *elem = NULL;96389639for (nvpair_t *pair =9640nvlist_next_nvpair(nverrlist, NULL);9641pair != NULL;9642pair = nvlist_next_nvpair(nverrlist, pair))9643count++;9644char **errl = (char **)malloc(9645count * sizeof (char *));96469647while ((elem = nvlist_next_nvpair(nverrlist,9648elem)) != NULL) {9649nvlist_t *nv;9650uint64_t dsobj, obj;96519652verify(nvpair_value_nvlist(elem,9653&nv) == 0);9654verify(nvlist_lookup_uint64(nv,9655ZPOOL_ERR_DATASET, &dsobj) == 0);9656verify(nvlist_lookup_uint64(nv,9657ZPOOL_ERR_OBJECT, &obj) == 0);9658errl[i] = safe_malloc(len);9659zpool_obj_to_path(zhp, dsobj, obj,9660errl[i++], len);9661}9662nvlist_free(nverrlist);9663fnvlist_add_string_array(item, "errlist",9664(const char **)errl, count);9665for (int i = 0; i < count; ++i)9666free(errl[i]);9667free(errl);9668} else9669fnvlist_add_string(item, "errlist",9670strerror(errno));9671}9672}9673}96749675static void9676ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)9677{9678nice_num_str_nvlist(item, "blocks", dds->dds_blocks,9679cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9680nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,9681cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9682nice_num_str_nvlist(item, "physical_size", dds->dds_psize,9683cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9684nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,9685cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9686nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,9687cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9688nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,9689cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9690nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,9691cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9692nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,9693cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9694}96959696static void9697dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)9698{9699nvlist_t *config;9700if (cb->cb_dedup_stats) {9701ddt_histogram_t *ddh;9702ddt_stat_t *dds;9703ddt_object_t *ddo;9704nvlist_t *ddt_stat, *ddt_obj, *dedup;9705uint_t c;9706uint64_t cspace_prop;97079708config = zpool_get_config(zhp, NULL);9709if (nvlist_lookup_uint64_array(config,9710ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)9711return;97129713dedup = fnvlist_alloc();9714ddt_obj = fnvlist_alloc();9715nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,9716cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9717if (ddo->ddo_count == 0) {9718fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,9719ddt_obj);9720fnvlist_add_nvlist(item, "dedup_stats", dedup);9721fnvlist_free(ddt_obj);9722fnvlist_free(dedup);9723return;9724} else {9725nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,9726cb->cb_literal, cb->cb_json_as_int,9727ZFS_NICENUM_1024);9728nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,9729cb->cb_literal, cb->cb_json_as_int,9730ZFS_NICENUM_1024);9731/*9732* Squash cached size into in-core size to handle race.9733* Only include cached size if it is available.9734*/9735cspace_prop = zpool_get_prop_int(zhp,9736ZPOOL_PROP_DEDUPCACHED, NULL);9737cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);9738nice_num_str_nvlist(dedup, "cspace", cspace_prop,9739cb->cb_literal, cb->cb_json_as_int,9740ZFS_NICENUM_1024);9741}97429743ddt_stat = fnvlist_alloc();9744if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,9745(uint64_t **)&dds, &c) == 0) {9746nvlist_t *total = fnvlist_alloc();9747if (dds->dds_blocks == 0)9748fnvlist_add_string(total, "blocks", "0");9749else9750ddt_stats_nvlist(dds, cb, total);9751fnvlist_add_nvlist(ddt_stat, "total", total);9752fnvlist_free(total);9753}9754if (nvlist_lookup_uint64_array(config,9755ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {9756nvlist_t *hist = fnvlist_alloc();9757nvlist_t *entry = NULL;9758char buf[16];9759for (int h = 0; h < 64; h++) {9760if (ddh->ddh_stat[h].dds_blocks != 0) {9761entry = fnvlist_alloc();9762ddt_stats_nvlist(&ddh->ddh_stat[h], cb,9763entry);9764snprintf(buf, 16, "%d", h);9765fnvlist_add_nvlist(hist, buf, entry);9766fnvlist_free(entry);9767}9768}9769if (!nvlist_empty(hist))9770fnvlist_add_nvlist(ddt_stat, "histogram", hist);9771fnvlist_free(hist);9772}97739774if (!nvlist_empty(ddt_obj)) {9775fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,9776ddt_obj);9777}9778fnvlist_free(ddt_obj);9779if (!nvlist_empty(ddt_stat)) {9780fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,9781ddt_stat);9782}9783fnvlist_free(ddt_stat);9784if (!nvlist_empty(dedup))9785fnvlist_add_nvlist(item, "dedup_stats", dedup);9786fnvlist_free(dedup);9787}9788}97899790static void9791raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,9792nvlist_t *nvroot, nvlist_t *item)9793{9794uint_t c;9795pool_raidz_expand_stat_t *pres = NULL;9796if (nvlist_lookup_uint64_array(nvroot,9797ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {9798nvlist_t **child;9799uint_t children;9800nvlist_t *nv = fnvlist_alloc();9801verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,9802&child, &children) == 0);9803assert(pres->pres_expanding_vdev < children);9804char *name =9805zpool_vdev_name(g_zfs, zhp,9806child[pres->pres_expanding_vdev], 0);9807fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);9808fnvlist_add_string(nv, "state",9809pool_scan_state_str[pres->pres_state]);9810nice_num_str_nvlist(nv, "expanding_vdev",9811pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,9812ZFS_NICENUM_1024);9813nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,9814cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9815nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,9816cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9817nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,9818cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9819nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,9820cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9821nice_num_str_nvlist(nv, "waiting_for_resilver",9822pres->pres_waiting_for_resilver, B_TRUE,9823cb->cb_json_as_int, ZFS_NICENUM_1024);9824fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);9825fnvlist_free(nv);9826free(name);9827}9828}98299830static void9831checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,9832nvlist_t *item)9833{9834uint_t c;9835pool_checkpoint_stat_t *pcs = NULL;9836if (nvlist_lookup_uint64_array(nvroot,9837ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {9838nvlist_t *nv = fnvlist_alloc();9839fnvlist_add_string(nv, "state",9840checkpoint_state_str[pcs->pcs_state]);9841nice_num_str_nvlist(nv, "start_time",9842pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,9843ZFS_NICE_TIMESTAMP);9844nice_num_str_nvlist(nv, "space",9845pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,9846ZFS_NICENUM_BYTES);9847fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);9848fnvlist_free(nv);9849}9850}98519852static void9853removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,9854nvlist_t *nvroot, nvlist_t *item)9855{9856uint_t c;9857pool_removal_stat_t *prs = NULL;9858if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,9859(uint64_t **)&prs, &c) == 0) {9860if (prs->prs_state != DSS_NONE) {9861nvlist_t **child;9862uint_t children;9863verify(nvlist_lookup_nvlist_array(nvroot,9864ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);9865assert(prs->prs_removing_vdev < children);9866char *vdev_name = zpool_vdev_name(g_zfs, zhp,9867child[prs->prs_removing_vdev], B_TRUE);9868nvlist_t *nv = fnvlist_alloc();9869fill_vdev_info(nv, zhp, vdev_name, B_FALSE,9870cb->cb_json_as_int);9871fnvlist_add_string(nv, "state",9872pool_scan_state_str[prs->prs_state]);9873nice_num_str_nvlist(nv, "removing_vdev",9874prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,9875ZFS_NICENUM_1024);9876nice_num_str_nvlist(nv, "start_time",9877prs->prs_start_time, cb->cb_literal,9878cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9879nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,9880cb->cb_literal, cb->cb_json_as_int,9881ZFS_NICE_TIMESTAMP);9882nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,9883cb->cb_literal, cb->cb_json_as_int,9884ZFS_NICENUM_BYTES);9885nice_num_str_nvlist(nv, "copied", prs->prs_copied,9886cb->cb_literal, cb->cb_json_as_int,9887ZFS_NICENUM_BYTES);9888nice_num_str_nvlist(nv, "mapping_memory",9889prs->prs_mapping_memory, cb->cb_literal,9890cb->cb_json_as_int, ZFS_NICENUM_BYTES);9891fnvlist_add_nvlist(item,9892ZPOOL_CONFIG_REMOVAL_STATS, nv);9893fnvlist_free(nv);9894free(vdev_name);9895}9896}9897}98989899static void9900scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,9901nvlist_t *nvroot, nvlist_t *item)9902{9903pool_scan_stat_t *ps = NULL;9904uint_t c;9905nvlist_t *scan = fnvlist_alloc();9906nvlist_t **child;9907uint_t children;99089909if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,9910(uint64_t **)&ps, &c) == 0) {9911fnvlist_add_string(scan, "function",9912pool_scan_func_str[ps->pss_func]);9913fnvlist_add_string(scan, "state",9914pool_scan_state_str[ps->pss_state]);9915nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,9916cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9917nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,9918cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9919nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,9920cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9921nice_num_str_nvlist(scan, "examined", ps->pss_examined,9922cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9923nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,9924cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9925nice_num_str_nvlist(scan, "processed", ps->pss_processed,9926cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9927nice_num_str_nvlist(scan, "errors", ps->pss_errors,9928cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);9929nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,9930cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9931nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,9932B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9933nice_num_str_nvlist(scan, "scrub_pause",9934ps->pss_pass_scrub_pause, cb->cb_literal,9935cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);9936nice_num_str_nvlist(scan, "scrub_spent_paused",9937ps->pss_pass_scrub_spent_paused,9938B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9939nice_num_str_nvlist(scan, "issued_bytes_per_scan",9940ps->pss_pass_issued, cb->cb_literal,9941cb->cb_json_as_int, ZFS_NICENUM_BYTES);9942nice_num_str_nvlist(scan, "issued", ps->pss_issued,9943cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);9944if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&9945ps->pss_error_scrub_start > ps->pss_start_time) {9946fnvlist_add_string(scan, "err_scrub_func",9947pool_scan_func_str[ps->pss_error_scrub_func]);9948fnvlist_add_string(scan, "err_scrub_state",9949pool_scan_state_str[ps->pss_error_scrub_state]);9950nice_num_str_nvlist(scan, "err_scrub_start_time",9951ps->pss_error_scrub_start,9952cb->cb_literal, cb->cb_json_as_int,9953ZFS_NICE_TIMESTAMP);9954nice_num_str_nvlist(scan, "err_scrub_end_time",9955ps->pss_error_scrub_end,9956cb->cb_literal, cb->cb_json_as_int,9957ZFS_NICE_TIMESTAMP);9958nice_num_str_nvlist(scan, "err_scrub_examined",9959ps->pss_error_scrub_examined,9960cb->cb_literal, cb->cb_json_as_int,9961ZFS_NICENUM_1024);9962nice_num_str_nvlist(scan, "err_scrub_to_examine",9963ps->pss_error_scrub_to_be_examined,9964cb->cb_literal, cb->cb_json_as_int,9965ZFS_NICENUM_1024);9966nice_num_str_nvlist(scan, "err_scrub_pause",9967ps->pss_pass_error_scrub_pause,9968B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);9969}9970}99719972if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,9973&child, &children) == 0) {9974vdev_rebuild_stat_t *vrs;9975uint_t i;9976char *name;9977nvlist_t *nv;9978nvlist_t *rebuild = fnvlist_alloc();9979uint64_t st;9980for (uint_t c = 0; c < children; c++) {9981if (nvlist_lookup_uint64_array(child[c],9982ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,9983&i) == 0) {9984if (vrs->vrs_state != VDEV_REBUILD_NONE) {9985nv = fnvlist_alloc();9986name = zpool_vdev_name(g_zfs, zhp,9987child[c], VDEV_NAME_TYPE_ID);9988fill_vdev_info(nv, zhp, name, B_FALSE,9989cb->cb_json_as_int);9990st = vrs->vrs_state;9991fnvlist_add_string(nv, "state",9992vdev_rebuild_state_str[st]);9993nice_num_str_nvlist(nv, "start_time",9994vrs->vrs_start_time, cb->cb_literal,9995cb->cb_json_as_int,9996ZFS_NICE_TIMESTAMP);9997nice_num_str_nvlist(nv, "end_time",9998vrs->vrs_end_time, cb->cb_literal,9999cb->cb_json_as_int,10000ZFS_NICE_TIMESTAMP);10001nice_num_str_nvlist(nv, "scan_time",10002vrs->vrs_scan_time_ms * 1000000,10003cb->cb_literal, cb->cb_json_as_int,10004ZFS_NICENUM_TIME);10005nice_num_str_nvlist(nv, "scanned",10006vrs->vrs_bytes_scanned,10007cb->cb_literal, cb->cb_json_as_int,10008ZFS_NICENUM_BYTES);10009nice_num_str_nvlist(nv, "issued",10010vrs->vrs_bytes_issued,10011cb->cb_literal, cb->cb_json_as_int,10012ZFS_NICENUM_BYTES);10013nice_num_str_nvlist(nv, "rebuilt",10014vrs->vrs_bytes_rebuilt,10015cb->cb_literal, cb->cb_json_as_int,10016ZFS_NICENUM_BYTES);10017nice_num_str_nvlist(nv, "to_scan",10018vrs->vrs_bytes_est, cb->cb_literal,10019cb->cb_json_as_int,10020ZFS_NICENUM_BYTES);10021nice_num_str_nvlist(nv, "errors",10022vrs->vrs_errors, cb->cb_literal,10023cb->cb_json_as_int,10024ZFS_NICENUM_1024);10025nice_num_str_nvlist(nv, "pass_time",10026vrs->vrs_pass_time_ms * 1000000,10027cb->cb_literal, cb->cb_json_as_int,10028ZFS_NICENUM_TIME);10029nice_num_str_nvlist(nv, "pass_scanned",10030vrs->vrs_pass_bytes_scanned,10031cb->cb_literal, cb->cb_json_as_int,10032ZFS_NICENUM_BYTES);10033nice_num_str_nvlist(nv, "pass_issued",10034vrs->vrs_pass_bytes_issued,10035cb->cb_literal, cb->cb_json_as_int,10036ZFS_NICENUM_BYTES);10037nice_num_str_nvlist(nv, "pass_skipped",10038vrs->vrs_pass_bytes_skipped,10039cb->cb_literal, cb->cb_json_as_int,10040ZFS_NICENUM_BYTES);10041fnvlist_add_nvlist(rebuild, name, nv);10042free(name);10043}10044}10045}10046if (!nvlist_empty(rebuild))10047fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);10048fnvlist_free(rebuild);10049}1005010051if (!nvlist_empty(scan))10052fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);10053fnvlist_free(scan);10054}1005510056/*10057* Print the scan status.10058*/10059static void10060print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)10061{10062uint64_t rebuild_end_time = 0, resilver_end_time = 0;10063boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;10064boolean_t have_errorscrub = B_FALSE;10065boolean_t active_resilver = B_FALSE;10066pool_checkpoint_stat_t *pcs = NULL;10067pool_scan_stat_t *ps = NULL;10068uint_t c;10069time_t scrub_start = 0, errorscrub_start = 0;1007010071if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,10072(uint64_t **)&ps, &c) == 0) {10073if (ps->pss_func == POOL_SCAN_RESILVER) {10074resilver_end_time = ps->pss_end_time;10075active_resilver = (ps->pss_state == DSS_SCANNING);10076}1007710078have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);10079have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);10080scrub_start = ps->pss_start_time;10081if (c > offsetof(pool_scan_stat_t,10082pss_pass_error_scrub_pause) / 8) {10083have_errorscrub = (ps->pss_error_scrub_func ==10084POOL_SCAN_ERRORSCRUB);10085errorscrub_start = ps->pss_error_scrub_start;10086}10087}1008810089boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);10090boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));1009110092/* Always print the scrub status when available. */10093if (have_scrub && scrub_start > errorscrub_start)10094print_scan_scrub_resilver_status(ps);10095else if (have_errorscrub && errorscrub_start >= scrub_start)10096print_err_scrub_status(ps);1009710098/*10099* When there is an active resilver or rebuild print its status.10100* Otherwise print the status of the last resilver or rebuild.10101*/10102if (active_resilver || (!active_rebuild && have_resilver &&10103resilver_end_time && resilver_end_time > rebuild_end_time)) {10104print_scan_scrub_resilver_status(ps);10105} else if (active_rebuild || (!active_resilver && have_rebuild &&10106rebuild_end_time && rebuild_end_time > resilver_end_time)) {10107print_rebuild_status(zhp, nvroot);10108}1010910110(void) nvlist_lookup_uint64_array(nvroot,10111ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);10112print_checkpoint_scan_warning(ps, pcs);10113}1011410115/*10116* Print out detailed removal status.10117*/10118static void10119print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)10120{10121char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];10122time_t start, end;10123nvlist_t *config, *nvroot;10124nvlist_t **child;10125uint_t children;10126char *vdev_name;1012710128if (prs == NULL || prs->prs_state == DSS_NONE)10129return;1013010131/*10132* Determine name of vdev.10133*/10134config = zpool_get_config(zhp, NULL);10135nvroot = fnvlist_lookup_nvlist(config,10136ZPOOL_CONFIG_VDEV_TREE);10137verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,10138&child, &children) == 0);10139assert(prs->prs_removing_vdev < children);10140vdev_name = zpool_vdev_name(g_zfs, zhp,10141child[prs->prs_removing_vdev], B_TRUE);1014210143printf_color(ANSI_BOLD, gettext("remove: "));1014410145start = prs->prs_start_time;10146end = prs->prs_end_time;10147zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));1014810149/*10150* Removal is finished or canceled.10151*/10152if (prs->prs_state == DSS_FINISHED) {10153uint64_t minutes_taken = (end - start) / 60;1015410155(void) printf(gettext("Removal of vdev %llu copied %s "10156"in %lluh%um, completed on %s"),10157(longlong_t)prs->prs_removing_vdev,10158copied_buf,10159(u_longlong_t)(minutes_taken / 60),10160(uint_t)(minutes_taken % 60),10161ctime((time_t *)&end));10162} else if (prs->prs_state == DSS_CANCELED) {10163(void) printf(gettext("Removal of %s canceled on %s"),10164vdev_name, ctime(&end));10165} else {10166uint64_t copied, total, elapsed, rate, mins_left, hours_left;10167double fraction_done;1016810169assert(prs->prs_state == DSS_SCANNING);1017010171/*10172* Removal is in progress.10173*/10174(void) printf(gettext(10175"Evacuation of %s in progress since %s"),10176vdev_name, ctime(&start));1017710178copied = prs->prs_copied > 0 ? prs->prs_copied : 1;10179total = prs->prs_to_copy;10180fraction_done = (double)copied / total;1018110182/* elapsed time for this pass */10183elapsed = time(NULL) - prs->prs_start_time;10184elapsed = elapsed > 0 ? elapsed : 1;10185rate = copied / elapsed;10186rate = rate > 0 ? rate : 1;10187mins_left = ((total - copied) / rate) / 60;10188hours_left = mins_left / 60;1018910190zfs_nicenum(copied, examined_buf, sizeof (examined_buf));10191zfs_nicenum(total, total_buf, sizeof (total_buf));10192zfs_nicenum(rate, rate_buf, sizeof (rate_buf));1019310194/*10195* do not print estimated time if hours_left is more than10196* 30 days10197*/10198(void) printf(gettext(10199"\t%s copied out of %s at %s/s, %.2f%% done"),10200examined_buf, total_buf, rate_buf, 100 * fraction_done);10201if (hours_left < (30 * 24)) {10202(void) printf(gettext(", %lluh%um to go\n"),10203(u_longlong_t)hours_left, (uint_t)(mins_left % 60));10204} else {10205(void) printf(gettext(10206", (copy is slow, no estimated time)\n"));10207}10208}10209free(vdev_name);1021010211if (prs->prs_mapping_memory > 0) {10212char mem_buf[7];10213zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));10214(void) printf(gettext(10215"\t%s memory used for removed device mappings\n"),10216mem_buf);10217}10218}1021910220/*10221* Print out detailed raidz expansion status.10222*/10223static void10224print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)10225{10226char copied_buf[7];1022710228if (pres == NULL || pres->pres_state == DSS_NONE)10229return;1023010231/*10232* Determine name of vdev.10233*/10234nvlist_t *config = zpool_get_config(zhp, NULL);10235nvlist_t *nvroot = fnvlist_lookup_nvlist(config,10236ZPOOL_CONFIG_VDEV_TREE);10237nvlist_t **child;10238uint_t children;10239verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,10240&child, &children) == 0);10241assert(pres->pres_expanding_vdev < children);1024210243printf_color(ANSI_BOLD, gettext("expand: "));1024410245time_t start = pres->pres_start_time;10246time_t end = pres->pres_end_time;10247char *vname =10248zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);10249zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));1025010251/*10252* Expansion is finished or canceled.10253*/10254if (pres->pres_state == DSS_FINISHED) {10255char time_buf[32];10256secs_to_dhms(end - start, time_buf);1025710258(void) printf(gettext("expanded %s-%u copied %s in %s, "10259"on %s"), vname, (int)pres->pres_expanding_vdev,10260copied_buf, time_buf, ctime((time_t *)&end));10261} else {10262char examined_buf[7], total_buf[7], rate_buf[7];10263uint64_t copied, total, elapsed, rate, secs_left;10264double fraction_done;1026510266assert(pres->pres_state == DSS_SCANNING);1026710268/*10269* Expansion is in progress.10270*/10271(void) printf(gettext(10272"expansion of %s-%u in progress since %s"),10273vname, (int)pres->pres_expanding_vdev, ctime(&start));1027410275copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;10276total = pres->pres_to_reflow;10277fraction_done = (double)copied / total;1027810279/* elapsed time for this pass */10280elapsed = time(NULL) - pres->pres_start_time;10281elapsed = elapsed > 0 ? elapsed : 1;10282rate = copied / elapsed;10283rate = rate > 0 ? rate : 1;10284secs_left = (total - copied) / rate;1028510286zfs_nicenum(copied, examined_buf, sizeof (examined_buf));10287zfs_nicenum(total, total_buf, sizeof (total_buf));10288zfs_nicenum(rate, rate_buf, sizeof (rate_buf));1028910290/*10291* do not print estimated time if hours_left is more than10292* 30 days10293*/10294(void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),10295examined_buf, total_buf, rate_buf, 100 * fraction_done);10296if (pres->pres_waiting_for_resilver) {10297(void) printf(gettext(", paused for resilver or "10298"clear\n"));10299} else if (secs_left < (30 * 24 * 3600)) {10300char time_buf[32];10301secs_to_dhms(secs_left, time_buf);10302(void) printf(gettext(", %s to go\n"), time_buf);10303} else {10304(void) printf(gettext(10305", (copy is slow, no estimated time)\n"));10306}10307}10308free(vname);10309}10310static void10311print_checkpoint_status(pool_checkpoint_stat_t *pcs)10312{10313time_t start;10314char space_buf[7];1031510316if (pcs == NULL || pcs->pcs_state == CS_NONE)10317return;1031810319(void) printf(gettext("checkpoint: "));1032010321start = pcs->pcs_start_time;10322zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));1032310324if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {10325char *date = ctime(&start);1032610327/*10328* ctime() adds a newline at the end of the generated10329* string, thus the weird format specifier and the10330* strlen() call used to chop it off from the output.10331*/10332(void) printf(gettext("created %.*s, consumes %s\n"),10333(int)(strlen(date) - 1), date, space_buf);10334return;10335}1033610337assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);1033810339(void) printf(gettext("discarding, %s remaining.\n"),10340space_buf);10341}1034210343static void10344print_error_log(zpool_handle_t *zhp)10345{10346nvlist_t *nverrlist = NULL;10347nvpair_t *elem;10348char *pathname;10349size_t len = MAXPATHLEN * 2;1035010351if (zpool_get_errlog(zhp, &nverrlist) != 0)10352return;1035310354(void) printf("errors: Permanent errors have been "10355"detected in the following files:\n\n");1035610357pathname = safe_malloc(len);10358elem = NULL;10359while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {10360nvlist_t *nv;10361uint64_t dsobj, obj;1036210363verify(nvpair_value_nvlist(elem, &nv) == 0);10364verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,10365&dsobj) == 0);10366verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,10367&obj) == 0);10368zpool_obj_to_path(zhp, dsobj, obj, pathname, len);10369(void) printf("%7s %s\n", "", pathname);10370}10371free(pathname);10372nvlist_free(nverrlist);10373}1037410375static void10376print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,10377uint_t nspares)10378{10379uint_t i;10380char *name;1038110382if (nspares == 0)10383return;1038410385(void) printf(gettext("\tspares\n"));1038610387for (i = 0; i < nspares; i++) {10388name = zpool_vdev_name(g_zfs, zhp, spares[i],10389cb->cb_name_flags);10390print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);10391free(name);10392}10393}1039410395static void10396print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,10397uint_t nl2cache)10398{10399uint_t i;10400char *name;1040110402if (nl2cache == 0)10403return;1040410405(void) printf(gettext("\tcache\n"));1040610407for (i = 0; i < nl2cache; i++) {10408name = zpool_vdev_name(g_zfs, zhp, l2cache[i],10409cb->cb_name_flags);10410print_status_config(zhp, cb, name, l2cache[i], 2,10411B_FALSE, NULL);10412free(name);10413}10414}1041510416static void10417print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)10418{10419ddt_histogram_t *ddh;10420ddt_stat_t *dds;10421ddt_object_t *ddo;10422uint_t c;10423/* Extra space provided for literal display */10424char dspace[32], mspace[32], cspace[32];10425uint64_t cspace_prop;10426enum zfs_nicenum_format format;10427zprop_source_t src;1042810429/*10430* If the pool was faulted then we may not have been able to10431* obtain the config. Otherwise, if we have anything in the dedup10432* table continue processing the stats.10433*/10434if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,10435(uint64_t **)&ddo, &c) != 0)10436return;1043710438(void) printf("\n");10439(void) printf(gettext(" dedup: "));10440if (ddo->ddo_count == 0) {10441(void) printf(gettext("no DDT entries\n"));10442return;10443}1044410445/*10446* Squash cached size into in-core size to handle race.10447* Only include cached size if it is available.10448*/10449cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);10450cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);10451format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;10452zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);10453zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);10454zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);10455(void) printf("DDT entries %llu, size %s on disk, %s in core",10456(u_longlong_t)ddo->ddo_count,10457dspace,10458mspace);10459if (src != ZPROP_SRC_DEFAULT) {10460(void) printf(", %s cached (%.02f%%)",10461cspace,10462(double)cspace_prop / (double)ddo->ddo_mspace * 100.0);10463}10464(void) printf("\n");1046510466verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,10467(uint64_t **)&dds, &c) == 0);10468verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,10469(uint64_t **)&ddh, &c) == 0);10470zpool_dump_ddt(dds, ddh);10471}1047210473#define ST_SIZE 409610474#define AC_SIZE 20481047510476static void10477print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,10478zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)10479{10480char status[ST_SIZE];10481char action[AC_SIZE];10482memset(status, 0, ST_SIZE);10483memset(action, 0, AC_SIZE);1048410485switch (reason) {10486case ZPOOL_STATUS_MISSING_DEV_R:10487snprintf(status, ST_SIZE, gettext("One or more devices could "10488"not be opened. Sufficient replicas exist for\n\tthe pool "10489"to continue functioning in a degraded state.\n"));10490snprintf(action, AC_SIZE, gettext("Attach the missing device "10491"and online it using 'zpool online'.\n"));10492break;1049310494case ZPOOL_STATUS_MISSING_DEV_NR:10495snprintf(status, ST_SIZE, gettext("One or more devices could "10496"not be opened. There are insufficient\n\treplicas for the"10497" pool to continue functioning.\n"));10498snprintf(action, AC_SIZE, gettext("Attach the missing device "10499"and online it using 'zpool online'.\n"));10500break;1050110502case ZPOOL_STATUS_CORRUPT_LABEL_R:10503snprintf(status, ST_SIZE, gettext("One or more devices could "10504"not be used because the label is missing or\n\tinvalid. "10505"Sufficient replicas exist for the pool to continue\n\t"10506"functioning in a degraded state.\n"));10507snprintf(action, AC_SIZE, gettext("Replace the device using "10508"'zpool replace'.\n"));10509break;1051010511case ZPOOL_STATUS_CORRUPT_LABEL_NR:10512snprintf(status, ST_SIZE, gettext("One or more devices could "10513"not be used because the label is missing \n\tor invalid. "10514"There are insufficient replicas for the pool to "10515"continue\n\tfunctioning.\n"));10516zpool_explain_recover(zpool_get_handle(zhp),10517zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),10518action, AC_SIZE);10519break;1052010521case ZPOOL_STATUS_FAILING_DEV:10522snprintf(status, ST_SIZE, gettext("One or more devices has "10523"experienced an unrecoverable error. An\n\tattempt was "10524"made to correct the error. Applications are "10525"unaffected.\n"));10526snprintf(action, AC_SIZE, gettext("Determine if the "10527"device needs to be replaced, and clear the errors\n\tusing"10528" 'zpool clear' or replace the device with 'zpool "10529"replace'.\n"));10530break;1053110532case ZPOOL_STATUS_OFFLINE_DEV:10533snprintf(status, ST_SIZE, gettext("One or more devices has "10534"been taken offline by the administrator.\n\tSufficient "10535"replicas exist for the pool to continue functioning in "10536"a\n\tdegraded state.\n"));10537snprintf(action, AC_SIZE, gettext("Online the device "10538"using 'zpool online' or replace the device with\n\t'zpool "10539"replace'.\n"));10540break;1054110542case ZPOOL_STATUS_REMOVED_DEV:10543snprintf(status, ST_SIZE, gettext("One or more devices have "10544"been removed.\n\tSufficient replicas exist for the pool "10545"to continue functioning in a\n\tdegraded state.\n"));10546snprintf(action, AC_SIZE, gettext("Online the device "10547"using zpool online' or replace the device with\n\t'zpool "10548"replace'.\n"));10549break;1055010551case ZPOOL_STATUS_RESILVERING:10552case ZPOOL_STATUS_REBUILDING:10553snprintf(status, ST_SIZE, gettext("One or more devices is "10554"currently being resilvered. The pool will\n\tcontinue "10555"to function, possibly in a degraded state.\n"));10556snprintf(action, AC_SIZE, gettext("Wait for the resilver to "10557"complete.\n"));10558break;1055910560case ZPOOL_STATUS_REBUILD_SCRUB:10561snprintf(status, ST_SIZE, gettext("One or more devices have "10562"been sequentially resilvered, scrubbing\n\tthe pool "10563"is recommended.\n"));10564snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "10565"verify all data checksums.\n"));10566break;1056710568case ZPOOL_STATUS_CORRUPT_DATA:10569snprintf(status, ST_SIZE, gettext("One or more devices has "10570"experienced an error resulting in data\n\tcorruption. "10571"Applications may be affected.\n"));10572snprintf(action, AC_SIZE, gettext("Restore the file in question"10573" if possible. Otherwise restore the\n\tentire pool from "10574"backup.\n"));10575break;1057610577case ZPOOL_STATUS_CORRUPT_POOL:10578snprintf(status, ST_SIZE, gettext("The pool metadata is "10579"corrupted and the pool cannot be opened.\n"));10580zpool_explain_recover(zpool_get_handle(zhp),10581zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),10582action, AC_SIZE);10583break;1058410585case ZPOOL_STATUS_VERSION_OLDER:10586snprintf(status, ST_SIZE, gettext("The pool is formatted using "10587"a legacy on-disk format. The pool can\n\tstill be used, "10588"but some features are unavailable.\n"));10589snprintf(action, AC_SIZE, gettext("Upgrade the pool using "10590"'zpool upgrade'. Once this is done, the\n\tpool will no "10591"longer be accessible on software that does not support\n\t"10592"feature flags.\n"));10593break;1059410595case ZPOOL_STATUS_VERSION_NEWER:10596snprintf(status, ST_SIZE, gettext("The pool has been upgraded "10597"to a newer, incompatible on-disk version.\n\tThe pool "10598"cannot be accessed on this system.\n"));10599snprintf(action, AC_SIZE, gettext("Access the pool from a "10600"system running more recent software, or\n\trestore the "10601"pool from backup.\n"));10602break;1060310604case ZPOOL_STATUS_FEAT_DISABLED:10605snprintf(status, ST_SIZE, gettext("Some supported and "10606"requested features are not enabled on the pool.\n\t"10607"The pool can still be used, but some features are "10608"unavailable.\n"));10609snprintf(action, AC_SIZE, gettext("Enable all features using "10610"'zpool upgrade'. Once this is done,\n\tthe pool may no "10611"longer be accessible by software that does not support\n\t"10612"the features. See zpool-features(7) for details.\n"));10613break;1061410615case ZPOOL_STATUS_COMPATIBILITY_ERR:10616snprintf(status, ST_SIZE, gettext("This pool has a "10617"compatibility list specified, but it could not be\n\t"10618"read/parsed at this time. The pool can still be used, "10619"but this\n\tshould be investigated.\n"));10620snprintf(action, AC_SIZE, gettext("Check the value of the "10621"'compatibility' property against the\n\t"10622"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "10623ZPOOL_DATA_COMPAT_D ".\n"));10624break;1062510626case ZPOOL_STATUS_INCOMPATIBLE_FEAT:10627snprintf(status, ST_SIZE, gettext("One or more features "10628"are enabled on the pool despite not being\n\t"10629"requested by the 'compatibility' property.\n"));10630snprintf(action, AC_SIZE, gettext("Consider setting "10631"'compatibility' to an appropriate value, or\n\t"10632"adding needed features to the relevant file in\n\t"10633ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));10634break;1063510636case ZPOOL_STATUS_UNSUP_FEAT_READ:10637snprintf(status, ST_SIZE, gettext("The pool cannot be accessed "10638"on this system because it uses the\n\tfollowing feature(s)"10639" not supported on this system:\n"));10640zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,106411024);10642snprintf(action, AC_SIZE, gettext("Access the pool from a "10643"system that supports the required feature(s),\n\tor "10644"restore the pool from backup.\n"));10645break;1064610647case ZPOOL_STATUS_UNSUP_FEAT_WRITE:10648snprintf(status, ST_SIZE, gettext("The pool can only be "10649"accessed in read-only mode on this system. It\n\tcannot be"10650" accessed in read-write mode because it uses the "10651"following\n\tfeature(s) not supported on this system:\n"));10652zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,106531024);10654snprintf(action, AC_SIZE, gettext("The pool cannot be accessed "10655"in read-write mode. Import the pool with\n"10656"\t\"-o readonly=on\", access the pool from a system that "10657"supports the\n\trequired feature(s), or restore the "10658"pool from backup.\n"));10659break;1066010661case ZPOOL_STATUS_FAULTED_DEV_R:10662snprintf(status, ST_SIZE, gettext("One or more devices are "10663"faulted in response to persistent errors.\n\tSufficient "10664"replicas exist for the pool to continue functioning "10665"in a\n\tdegraded state.\n"));10666snprintf(action, AC_SIZE, gettext("Replace the faulted device, "10667"or use 'zpool clear' to mark the device\n\trepaired.\n"));10668break;1066910670case ZPOOL_STATUS_FAULTED_DEV_NR:10671snprintf(status, ST_SIZE, gettext("One or more devices are "10672"faulted in response to persistent errors. There are "10673"insufficient replicas for the pool to\n\tcontinue "10674"functioning.\n"));10675snprintf(action, AC_SIZE, gettext("Destroy and re-create the "10676"pool from a backup source. Manually marking the device\n"10677"\trepaired using 'zpool clear' may allow some data "10678"to be recovered.\n"));10679break;1068010681case ZPOOL_STATUS_IO_FAILURE_MMP:10682snprintf(status, ST_SIZE, gettext("The pool is suspended "10683"because multihost writes failed or were delayed;\n\t"10684"another system could import the pool undetected.\n"));10685snprintf(action, AC_SIZE, gettext("Make sure the pool's devices"10686" are connected, then reboot your system and\n\timport the "10687"pool or run 'zpool clear' to resume the pool.\n"));10688break;1068910690case ZPOOL_STATUS_IO_FAILURE_WAIT:10691case ZPOOL_STATUS_IO_FAILURE_CONTINUE:10692snprintf(status, ST_SIZE, gettext("One or more devices are "10693"faulted in response to IO failures.\n"));10694snprintf(action, AC_SIZE, gettext("Make sure the affected "10695"devices are connected, then run 'zpool clear'.\n"));10696break;1069710698case ZPOOL_STATUS_BAD_LOG:10699snprintf(status, ST_SIZE, gettext("An intent log record "10700"could not be read.\n"10701"\tWaiting for administrator intervention to fix the "10702"faulted pool.\n"));10703snprintf(action, AC_SIZE, gettext("Either restore the affected "10704"device(s) and run 'zpool online',\n"10705"\tor ignore the intent log records by running "10706"'zpool clear'.\n"));10707break;1070810709case ZPOOL_STATUS_NON_NATIVE_ASHIFT:10710snprintf(status, ST_SIZE, gettext("One or more devices are "10711"configured to use a non-native block size.\n"10712"\tExpect reduced performance.\n"));10713snprintf(action, AC_SIZE, gettext("Replace affected devices "10714"with devices that support the\n\tconfigured block size, "10715"or migrate data to a properly configured\n\tpool.\n"));10716break;1071710718case ZPOOL_STATUS_HOSTID_MISMATCH:10719snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid"10720" and system hostid on imported pool.\n\tThis pool was "10721"previously imported into a system with a different "10722"hostid,\n\tand then was verbatim imported into this "10723"system.\n"));10724snprintf(action, AC_SIZE, gettext("Export this pool on all "10725"systems on which it is imported.\n"10726"\tThen import it to correct the mismatch.\n"));10727break;1072810729case ZPOOL_STATUS_ERRATA:10730snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"),10731errata);10732switch (errata) {10733case ZPOOL_ERRATA_NONE:10734break;1073510736case ZPOOL_ERRATA_ZOL_2094_SCRUB:10737snprintf(action, AC_SIZE, gettext("To correct the issue"10738" run 'zpool scrub'.\n"));10739break;1074010741case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:10742(void) strlcat(status, gettext("\tExisting encrypted "10743"datasets contain an on-disk incompatibility\n\t "10744"which needs to be corrected.\n"), ST_SIZE);10745snprintf(action, AC_SIZE, gettext("To correct the issue"10746" backup existing encrypted datasets to new\n\t"10747"encrypted datasets and destroy the old ones. "10748"'zfs mount -o ro' can\n\tbe used to temporarily "10749"mount existing encrypted datasets readonly.\n"));10750break;1075110752case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:10753(void) strlcat(status, gettext("\tExisting encrypted "10754"snapshots and bookmarks contain an on-disk\n\t"10755"incompatibility. This may cause on-disk "10756"corruption if they are used\n\twith "10757"'zfs recv'.\n"), ST_SIZE);10758snprintf(action, AC_SIZE, gettext("To correct the"10759"issue, enable the bookmark_v2 feature. No "10760"additional\n\taction is needed if there are no "10761"encrypted snapshots or bookmarks.\n\tIf preserving"10762"the encrypted snapshots and bookmarks is required,"10763" use\n\ta non-raw send to backup and restore them."10764" Alternately, they may be\n\tremoved to resolve "10765"the incompatibility.\n"));10766break;1076710768default:10769/*10770* All errata which allow the pool to be imported10771* must contain an action message.10772*/10773assert(0);10774}10775break;1077610777default:10778/*10779* The remaining errors can't actually be generated, yet.10780*/10781assert(reason == ZPOOL_STATUS_OK);10782}1078310784if (status[0] != 0) {10785if (cbp->cb_json)10786fnvlist_add_string(item, "status", status);10787else {10788printf_color(ANSI_BOLD, gettext("status: "));10789printf_color(ANSI_YELLOW, status);10790}10791}1079210793if (action[0] != 0) {10794if (cbp->cb_json)10795fnvlist_add_string(item, "action", action);10796else {10797printf_color(ANSI_BOLD, gettext("action: "));10798printf_color(ANSI_YELLOW, action);10799}10800}10801}1080210803static int10804status_callback_json(zpool_handle_t *zhp, void *data)10805{10806status_cbdata_t *cbp = data;10807nvlist_t *config, *nvroot;10808const char *msgid;10809char pool_guid[256];10810char msgbuf[256];10811uint64_t guid;10812zpool_status_t reason;10813zpool_errata_t errata;10814uint_t c;10815vdev_stat_t *vs;10816nvlist_t *item, *d, *load_info, *vds;1081710818/* If dedup stats were requested, also fetch dedupcached. */10819if (cbp->cb_dedup_stats > 1)10820zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);10821reason = zpool_get_status(zhp, &msgid, &errata);10822/*10823* If we were given 'zpool status -x', only report those pools with10824* problems.10825*/10826if (cbp->cb_explain &&10827(reason == ZPOOL_STATUS_OK ||10828reason == ZPOOL_STATUS_VERSION_OLDER ||10829reason == ZPOOL_STATUS_FEAT_DISABLED ||10830reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||10831reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {10832return (0);10833}1083410835d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");10836item = fnvlist_alloc();10837vds = fnvlist_alloc();10838fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);10839config = zpool_get_config(zhp, NULL);1084010841if (config != NULL) {10842nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);10843verify(nvlist_lookup_uint64_array(nvroot,10844ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);10845if (cbp->cb_json_pool_key_guid) {10846guid = fnvlist_lookup_uint64(config,10847ZPOOL_CONFIG_POOL_GUID);10848snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid);10849}10850cbp->cb_count++;1085110852print_status_reason(zhp, cbp, reason, errata, item);10853if (msgid != NULL) {10854snprintf(msgbuf, 256,10855"https://openzfs.github.io/openzfs-docs/msg/%s",10856msgid);10857fnvlist_add_string(item, "msgid", msgid);10858fnvlist_add_string(item, "moreinfo", msgbuf);10859}1086010861if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,10862&load_info) == 0) {10863fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,10864load_info);10865}1086610867scan_status_nvlist(zhp, cbp, nvroot, item);10868removal_status_nvlist(zhp, cbp, nvroot, item);10869checkpoint_status_nvlist(nvroot, cbp, item);10870raidz_expand_status_nvlist(zhp, cbp, nvroot, item);10871vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);10872if (cbp->cb_flat_vdevs) {10873class_vdevs_nvlist(zhp, cbp, nvroot,10874VDEV_ALLOC_BIAS_DEDUP, vds);10875class_vdevs_nvlist(zhp, cbp, nvroot,10876VDEV_ALLOC_BIAS_SPECIAL, vds);10877class_vdevs_nvlist(zhp, cbp, nvroot,10878VDEV_ALLOC_CLASS_LOGS, vds);10879l2cache_nvlist(zhp, cbp, nvroot, vds);10880spares_nvlist(zhp, cbp, nvroot, vds);1088110882fnvlist_add_nvlist(item, "vdevs", vds);10883fnvlist_free(vds);10884} else {10885fnvlist_add_nvlist(item, "vdevs", vds);10886fnvlist_free(vds);1088710888class_vdevs_nvlist(zhp, cbp, nvroot,10889VDEV_ALLOC_BIAS_DEDUP, item);10890class_vdevs_nvlist(zhp, cbp, nvroot,10891VDEV_ALLOC_BIAS_SPECIAL, item);10892class_vdevs_nvlist(zhp, cbp, nvroot,10893VDEV_ALLOC_CLASS_LOGS, item);10894l2cache_nvlist(zhp, cbp, nvroot, item);10895spares_nvlist(zhp, cbp, nvroot, item);10896}10897dedup_stats_nvlist(zhp, cbp, item);10898errors_nvlist(zhp, cbp, item);10899}10900if (cbp->cb_json_pool_key_guid) {10901fnvlist_add_nvlist(d, pool_guid, item);10902} else {10903fnvlist_add_nvlist(d, zpool_get_name(zhp),10904item);10905}10906fnvlist_free(item);10907return (0);10908}1090910910/*10911* Display a summary of pool status. Displays a summary such as:10912*10913* pool: tank10914* status: DEGRADED10915* reason: One or more devices ...10916* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-0110917* config:10918* mirror DEGRADED10919* c1t0d0 OK10920* c2t0d0 UNAVAIL10921*10922* When given the '-v' option, we print out the complete config. If the '-e'10923* option is specified, then we print out error rate information as well.10924*/10925static int10926status_callback(zpool_handle_t *zhp, void *data)10927{10928status_cbdata_t *cbp = data;10929nvlist_t *config, *nvroot;10930const char *msgid;10931zpool_status_t reason;10932zpool_errata_t errata;10933const char *health;10934uint_t c;10935vdev_stat_t *vs;1093610937/* If dedup stats were requested, also fetch dedupcached. */10938if (cbp->cb_dedup_stats > 1)10939zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);1094010941config = zpool_get_config(zhp, NULL);10942reason = zpool_get_status(zhp, &msgid, &errata);1094310944cbp->cb_count++;1094510946/*10947* If we were given 'zpool status -x', only report those pools with10948* problems.10949*/10950if (cbp->cb_explain &&10951(reason == ZPOOL_STATUS_OK ||10952reason == ZPOOL_STATUS_VERSION_OLDER ||10953reason == ZPOOL_STATUS_FEAT_DISABLED ||10954reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||10955reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {10956if (!cbp->cb_allpools) {10957(void) printf(gettext("pool '%s' is healthy\n"),10958zpool_get_name(zhp));10959if (cbp->cb_first)10960cbp->cb_first = B_FALSE;10961}10962return (0);10963}1096410965if (cbp->cb_first)10966cbp->cb_first = B_FALSE;10967else10968(void) printf("\n");1096910970nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);10971verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,10972(uint64_t **)&vs, &c) == 0);1097310974health = zpool_get_state_str(zhp);1097510976printf(" ");10977printf_color(ANSI_BOLD, gettext("pool:"));10978printf(" %s\n", zpool_get_name(zhp));10979fputc(' ', stdout);10980printf_color(ANSI_BOLD, gettext("state: "));1098110982printf_color(health_str_to_color(health), "%s", health);1098310984fputc('\n', stdout);10985print_status_reason(zhp, cbp, reason, errata, NULL);1098610987if (msgid != NULL) {10988printf(" ");10989printf_color(ANSI_BOLD, gettext("see:"));10990printf(gettext(10991" https://openzfs.github.io/openzfs-docs/msg/%s\n"),10992msgid);10993}1099410995if (config != NULL) {10996uint64_t nerr;10997nvlist_t **spares, **l2cache;10998uint_t nspares, nl2cache;1099911000print_scan_status(zhp, nvroot);1100111002pool_removal_stat_t *prs = NULL;11003(void) nvlist_lookup_uint64_array(nvroot,11004ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);11005print_removal_status(zhp, prs);1100611007pool_checkpoint_stat_t *pcs = NULL;11008(void) nvlist_lookup_uint64_array(nvroot,11009ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);11010print_checkpoint_status(pcs);1101111012pool_raidz_expand_stat_t *pres = NULL;11013(void) nvlist_lookup_uint64_array(nvroot,11014ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);11015print_raidz_expand_status(zhp, pres);1101611017cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,11018cbp->cb_name_flags | VDEV_NAME_TYPE_ID);11019if (cbp->cb_namewidth < 10)11020cbp->cb_namewidth = 10;1102111022color_start(ANSI_BOLD);11023(void) printf(gettext("config:\n\n"));11024(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),11025cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",11026"CKSUM");11027color_end();1102811029if (cbp->cb_print_slow_ios) {11030printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));11031}1103211033if (cbp->cb_print_power) {11034printf_color(ANSI_BOLD, " %5s", gettext("POWER"));11035}1103611037if (cbp->cb_print_dio_verify) {11038printf_color(ANSI_BOLD, " %5s", gettext("DIO"));11039}1104011041if (cbp->vcdl != NULL)11042print_cmd_columns(cbp->vcdl, 0);1104311044printf("\n");1104511046print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,11047B_FALSE, NULL);1104811049print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);11050print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);11051print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);1105211053if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,11054&l2cache, &nl2cache) == 0)11055print_l2cache(zhp, cbp, l2cache, nl2cache);1105611057if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,11058&spares, &nspares) == 0)11059print_spares(zhp, cbp, spares, nspares);1106011061if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,11062&nerr) == 0) {11063(void) printf("\n");11064if (nerr == 0) {11065(void) printf(gettext(11066"errors: No known data errors\n"));11067} else if (!cbp->cb_verbose) {11068color_start(ANSI_RED);11069(void) printf(gettext("errors: %llu data "11070"errors, use '-v' for a list\n"),11071(u_longlong_t)nerr);11072color_end();11073} else {11074print_error_log(zhp);11075}11076}1107711078if (cbp->cb_dedup_stats)11079print_dedup_stats(zhp, config, cbp->cb_literal);11080} else {11081(void) printf(gettext("config: The configuration cannot be "11082"determined.\n"));11083}1108411085return (0);11086}1108711088/*11089* zpool status [-dDegiLpPstvx] [-c [script1,script2,...]] ...11090* [-j|--json [--json-flat-vdevs] [--json-int] ...11091* [--json-pool-key-guid]] [--power] [-T d|u] ...11092* [pool] [interval [count]]11093*11094* -c CMD For each vdev, run command CMD11095* -D Display dedup status (undocumented)11096* -d Display Direct I/O write verify errors11097* -e Display only unhealthy vdevs11098* -g Display guid for individual vdev name.11099* -i Display vdev initialization status.11100* -j [...] Display output in JSON format11101* --json-flat-vdevs Display vdevs in flat hierarchy11102* --json-int Display numbers in integer format instead of string11103* --json-pool-key-guid Use pool GUID as key for pool objects11104* -L Follow links when resolving vdev path name.11105* -P Display full path for vdev name.11106* -p Display values in parsable (exact) format.11107* --power Display vdev enclosure slot power status11108* -s Display slow IOs column.11109* -T Display a timestamp in date(1) or Unix format11110* -t Display vdev TRIM status.11111* -v Display complete error logs11112* -x Display only pools with potential problems11113*11114* Describes the health status of all pools or some subset.11115*/11116int11117zpool_do_status(int argc, char **argv)11118{11119int c;11120int ret;11121float interval = 0;11122unsigned long count = 0;11123status_cbdata_t cb = { 0 };11124nvlist_t *data;11125char *cmd = NULL;1112611127struct option long_options[] = {11128{"power", no_argument, NULL, ZPOOL_OPTION_POWER},11129{"json", no_argument, NULL, 'j'},11130{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},11131{"json-flat-vdevs", no_argument, NULL,11132ZPOOL_OPTION_JSON_FLAT_VDEVS},11133{"json-pool-key-guid", no_argument, NULL,11134ZPOOL_OPTION_POOL_KEY_GUID},11135{0, 0, 0, 0}11136};1113711138/* check options */11139while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,11140NULL)) != -1) {11141switch (c) {11142case 'c':11143if (cmd != NULL) {11144fprintf(stderr,11145gettext("Can't set -c flag twice\n"));11146exit(1);11147}1114811149if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&11150!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {11151fprintf(stderr, gettext(11152"Can't run -c, disabled by "11153"ZPOOL_SCRIPTS_ENABLED.\n"));11154exit(1);11155}1115611157if ((getuid() <= 0 || geteuid() <= 0) &&11158!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {11159fprintf(stderr, gettext(11160"Can't run -c with root privileges "11161"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));11162exit(1);11163}11164cmd = optarg;11165break;11166case 'd':11167cb.cb_print_dio_verify = B_TRUE;11168break;11169case 'D':11170if (++cb.cb_dedup_stats > 2)11171cb.cb_dedup_stats = 2;11172break;11173case 'e':11174cb.cb_print_unhealthy = B_TRUE;11175break;11176case 'g':11177cb.cb_name_flags |= VDEV_NAME_GUID;11178break;11179case 'i':11180cb.cb_print_vdev_init = B_TRUE;11181break;11182case 'L':11183cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;11184break;11185case 'p':11186cb.cb_literal = B_TRUE;11187break;11188case 'P':11189cb.cb_name_flags |= VDEV_NAME_PATH;11190break;11191case 's':11192cb.cb_print_slow_ios = B_TRUE;11193break;11194case 't':11195cb.cb_print_vdev_trim = B_TRUE;11196break;11197case 'T':11198get_timestamp_arg(*optarg);11199break;11200case 'v':11201cb.cb_verbose = B_TRUE;11202break;11203case 'j':11204cb.cb_json = B_TRUE;11205break;11206case 'x':11207cb.cb_explain = B_TRUE;11208break;11209case ZPOOL_OPTION_POWER:11210cb.cb_print_power = B_TRUE;11211break;11212case ZPOOL_OPTION_JSON_FLAT_VDEVS:11213cb.cb_flat_vdevs = B_TRUE;11214break;11215case ZPOOL_OPTION_JSON_NUMS_AS_INT:11216cb.cb_json_as_int = B_TRUE;11217cb.cb_literal = B_TRUE;11218break;11219case ZPOOL_OPTION_POOL_KEY_GUID:11220cb.cb_json_pool_key_guid = B_TRUE;11221break;11222case '?':11223if (optopt == 'c') {11224print_zpool_script_list("status");11225exit(0);11226} else {11227fprintf(stderr,11228gettext("invalid option '%c'\n"), optopt);11229}11230usage(B_FALSE);11231}11232}1123311234argc -= optind;11235argv += optind;1123611237get_interval_count(&argc, argv, &interval, &count);1123811239if (argc == 0)11240cb.cb_allpools = B_TRUE;1124111242cb.cb_first = B_TRUE;11243cb.cb_print_status = B_TRUE;1124411245if (cb.cb_flat_vdevs && !cb.cb_json) {11246fprintf(stderr, gettext("'--json-flat-vdevs' only works with"11247" '-j' option\n"));11248usage(B_FALSE);11249}1125011251if (cb.cb_json_as_int && !cb.cb_json) {11252(void) fprintf(stderr, gettext("'--json-int' only works with"11253" '-j' option\n"));11254usage(B_FALSE);11255}1125611257if (!cb.cb_json && cb.cb_json_pool_key_guid) {11258(void) fprintf(stderr, gettext("'json-pool-key-guid' only"11259" works with '-j' option\n"));11260usage(B_FALSE);11261}1126211263for (;;) {11264if (cb.cb_json) {11265cb.cb_jsobj = zpool_json_schema(0, 1);11266data = fnvlist_alloc();11267fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);11268fnvlist_free(data);11269}1127011271if (timestamp_fmt != NODATE) {11272if (cb.cb_json) {11273if (cb.cb_json_as_int) {11274fnvlist_add_uint64(cb.cb_jsobj, "time",11275time(NULL));11276} else {11277char ts[128];11278get_timestamp(timestamp_fmt, ts, 128);11279fnvlist_add_string(cb.cb_jsobj, "time",11280ts);11281}11282} else11283print_timestamp(timestamp_fmt);11284}1128511286if (cmd != NULL)11287cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,11288NULL, NULL, 0, 0);1128911290if (cb.cb_json) {11291ret = for_each_pool(argc, argv, B_TRUE, NULL,11292ZFS_TYPE_POOL, cb.cb_literal,11293status_callback_json, &cb);11294} else {11295ret = for_each_pool(argc, argv, B_TRUE, NULL,11296ZFS_TYPE_POOL, cb.cb_literal,11297status_callback, &cb);11298}1129911300if (cb.vcdl != NULL)11301free_vdev_cmd_data_list(cb.vcdl);1130211303if (cb.cb_json) {11304if (ret == 0)11305zcmd_print_json(cb.cb_jsobj);11306else11307nvlist_free(cb.cb_jsobj);11308} else {11309if (argc == 0 && cb.cb_count == 0) {11310(void) fprintf(stderr, "%s",11311gettext("no pools available\n"));11312} else if (cb.cb_explain && cb.cb_first &&11313cb.cb_allpools) {11314(void) printf("%s",11315gettext("all pools are healthy\n"));11316}11317}1131811319if (ret != 0)11320return (ret);1132111322if (interval == 0)11323break;1132411325if (count != 0 && --count == 0)11326break;1132711328(void) fflush(stdout);11329(void) fsleep(interval);11330}1133111332return (0);11333}1133411335typedef struct upgrade_cbdata {11336int cb_first;11337int cb_argc;11338uint64_t cb_version;11339char **cb_argv;11340} upgrade_cbdata_t;1134111342static int11343check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)11344{11345int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);11346int *count = (int *)unsupp_fs;1134711348if (zfs_version > ZPL_VERSION) {11349(void) printf(gettext("%s (v%d) is not supported by this "11350"implementation of ZFS.\n"),11351zfs_get_name(zhp), zfs_version);11352(*count)++;11353}1135411355zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);1135611357zfs_close(zhp);1135811359return (0);11360}1136111362static int11363upgrade_version(zpool_handle_t *zhp, uint64_t version)11364{11365int ret;11366nvlist_t *config;11367uint64_t oldversion;11368int unsupp_fs = 0;1136911370config = zpool_get_config(zhp, NULL);11371verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,11372&oldversion) == 0);1137311374char compat[ZFS_MAXPROPLEN];11375if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,11376ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)11377compat[0] = '\0';1137811379assert(SPA_VERSION_IS_SUPPORTED(oldversion));11380assert(oldversion < version);1138111382ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);11383if (ret != 0)11384return (ret);1138511386if (unsupp_fs) {11387(void) fprintf(stderr, gettext("Upgrade not performed due "11388"to %d unsupported filesystems (max v%d).\n"),11389unsupp_fs, (int)ZPL_VERSION);11390return (1);11391}1139211393if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {11394(void) fprintf(stderr, gettext("Upgrade not performed because "11395"'compatibility' property set to '"11396ZPOOL_COMPAT_LEGACY "'.\n"));11397return (1);11398}1139911400ret = zpool_upgrade(zhp, version);11401if (ret != 0)11402return (ret);1140311404if (version >= SPA_VERSION_FEATURES) {11405(void) printf(gettext("Successfully upgraded "11406"'%s' from version %llu to feature flags.\n"),11407zpool_get_name(zhp), (u_longlong_t)oldversion);11408} else {11409(void) printf(gettext("Successfully upgraded "11410"'%s' from version %llu to version %llu.\n"),11411zpool_get_name(zhp), (u_longlong_t)oldversion,11412(u_longlong_t)version);11413}1141411415return (0);11416}1141711418static int11419upgrade_enable_all(zpool_handle_t *zhp, int *countp)11420{11421int i, ret, count;11422boolean_t firstff = B_TRUE;11423nvlist_t *enabled = zpool_get_features(zhp);1142411425char compat[ZFS_MAXPROPLEN];11426if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,11427ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)11428compat[0] = '\0';1142911430boolean_t requested_features[SPA_FEATURES];11431if (zpool_do_load_compat(compat, requested_features) !=11432ZPOOL_COMPATIBILITY_OK)11433return (-1);1143411435count = 0;11436for (i = 0; i < SPA_FEATURES; i++) {11437const char *fname = spa_feature_table[i].fi_uname;11438const char *fguid = spa_feature_table[i].fi_guid;1143911440if (!spa_feature_table[i].fi_zfs_mod_supported ||11441(spa_feature_table[i].fi_flags & ZFEATURE_FLAG_NO_UPGRADE))11442continue;1144311444if (!nvlist_exists(enabled, fguid) && requested_features[i]) {11445char *propname;11446verify(-1 != asprintf(&propname, "feature@%s", fname));11447ret = zpool_set_prop(zhp, propname,11448ZFS_FEATURE_ENABLED);11449if (ret != 0) {11450free(propname);11451return (ret);11452}11453count++;1145411455if (firstff) {11456(void) printf(gettext("Enabled the "11457"following features on '%s':\n"),11458zpool_get_name(zhp));11459firstff = B_FALSE;11460}11461(void) printf(gettext(" %s\n"), fname);11462free(propname);11463}11464}1146511466if (countp != NULL)11467*countp = count;11468return (0);11469}1147011471static int11472upgrade_cb(zpool_handle_t *zhp, void *arg)11473{11474upgrade_cbdata_t *cbp = arg;11475nvlist_t *config;11476uint64_t version;11477boolean_t modified_pool = B_FALSE;11478int ret;1147911480config = zpool_get_config(zhp, NULL);11481verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,11482&version) == 0);1148311484assert(SPA_VERSION_IS_SUPPORTED(version));1148511486if (version < cbp->cb_version) {11487cbp->cb_first = B_FALSE;11488ret = upgrade_version(zhp, cbp->cb_version);11489if (ret != 0)11490return (ret);11491modified_pool = B_TRUE;1149211493/*11494* If they did "zpool upgrade -a", then we could11495* be doing ioctls to different pools. We need11496* to log this history once to each pool, and bypass11497* the normal history logging that happens in main().11498*/11499(void) zpool_log_history(g_zfs, history_str);11500log_history = B_FALSE;11501}1150211503if (cbp->cb_version >= SPA_VERSION_FEATURES) {11504int count;11505ret = upgrade_enable_all(zhp, &count);11506if (ret != 0)11507return (ret);1150811509if (count > 0) {11510cbp->cb_first = B_FALSE;11511modified_pool = B_TRUE;11512}11513}1151411515if (modified_pool) {11516(void) printf("\n");11517(void) after_zpool_upgrade(zhp);11518}1151911520return (0);11521}1152211523static int11524upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)11525{11526upgrade_cbdata_t *cbp = arg;11527nvlist_t *config;11528uint64_t version;1152911530config = zpool_get_config(zhp, NULL);11531verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,11532&version) == 0);1153311534assert(SPA_VERSION_IS_SUPPORTED(version));1153511536if (version < SPA_VERSION_FEATURES) {11537if (cbp->cb_first) {11538(void) printf(gettext("The following pools are "11539"formatted with legacy version numbers and can\n"11540"be upgraded to use feature flags. After "11541"being upgraded, these pools\nwill no "11542"longer be accessible by software that does not "11543"support feature\nflags.\n\n"11544"Note that setting a pool's 'compatibility' "11545"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"11546"inhibit upgrades.\n\n"));11547(void) printf(gettext("VER POOL\n"));11548(void) printf(gettext("--- ------------\n"));11549cbp->cb_first = B_FALSE;11550}1155111552(void) printf("%2llu %s\n", (u_longlong_t)version,11553zpool_get_name(zhp));11554}1155511556return (0);11557}1155811559static int11560upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)11561{11562upgrade_cbdata_t *cbp = arg;11563nvlist_t *config;11564uint64_t version;1156511566config = zpool_get_config(zhp, NULL);11567verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,11568&version) == 0);1156911570if (version >= SPA_VERSION_FEATURES) {11571int i;11572boolean_t poolfirst = B_TRUE;11573nvlist_t *enabled = zpool_get_features(zhp);1157411575for (i = 0; i < SPA_FEATURES; i++) {11576const char *fguid = spa_feature_table[i].fi_guid;11577const char *fname = spa_feature_table[i].fi_uname;1157811579if (!spa_feature_table[i].fi_zfs_mod_supported)11580continue;1158111582if (!nvlist_exists(enabled, fguid)) {11583if (cbp->cb_first) {11584(void) printf(gettext("\nSome "11585"supported features are not "11586"enabled on the following pools. "11587"Once a\nfeature is enabled the "11588"pool may become incompatible with "11589"software\nthat does not support "11590"the feature. See "11591"zpool-features(7) for "11592"details.\n\n"11593"Note that the pool "11594"'compatibility' feature can be "11595"used to inhibit\nfeature "11596"upgrades.\n\n"11597"Features marked with (*) are not "11598"applied automatically on upgrade, "11599"and\nmust be applied explicitly "11600"with zpool-set(7).\n\n"));11601(void) printf(gettext("POOL "11602"FEATURE\n"));11603(void) printf(gettext("------"11604"---------\n"));11605cbp->cb_first = B_FALSE;11606}1160711608if (poolfirst) {11609(void) printf(gettext("%s\n"),11610zpool_get_name(zhp));11611poolfirst = B_FALSE;11612}1161311614(void) printf(gettext(" %s%s\n"), fname,11615spa_feature_table[i].fi_flags &11616ZFEATURE_FLAG_NO_UPGRADE ? "(*)" : "");11617}11618/*11619* If they did "zpool upgrade -a", then we could11620* be doing ioctls to different pools. We need11621* to log this history once to each pool, and bypass11622* the normal history logging that happens in main().11623*/11624(void) zpool_log_history(g_zfs, history_str);11625log_history = B_FALSE;11626}11627}1162811629return (0);11630}1163111632static int11633upgrade_one(zpool_handle_t *zhp, void *data)11634{11635boolean_t modified_pool = B_FALSE;11636upgrade_cbdata_t *cbp = data;11637uint64_t cur_version;11638int ret;1163911640if (strcmp("log", zpool_get_name(zhp)) == 0) {11641(void) fprintf(stderr, gettext("'log' is now a reserved word\n"11642"Pool 'log' must be renamed using export and import"11643" to upgrade.\n"));11644return (1);11645}1164611647cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);11648if (cur_version > cbp->cb_version) {11649(void) printf(gettext("Pool '%s' is already formatted "11650"using more current version '%llu'.\n\n"),11651zpool_get_name(zhp), (u_longlong_t)cur_version);11652return (0);11653}1165411655if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {11656(void) printf(gettext("Pool '%s' is already formatted "11657"using version %llu.\n\n"), zpool_get_name(zhp),11658(u_longlong_t)cbp->cb_version);11659return (0);11660}1166111662if (cur_version != cbp->cb_version) {11663modified_pool = B_TRUE;11664ret = upgrade_version(zhp, cbp->cb_version);11665if (ret != 0)11666return (ret);11667}1166811669if (cbp->cb_version >= SPA_VERSION_FEATURES) {11670int count = 0;11671ret = upgrade_enable_all(zhp, &count);11672if (ret != 0)11673return (ret);1167411675if (count != 0) {11676modified_pool = B_TRUE;11677} else if (cur_version == SPA_VERSION) {11678(void) printf(gettext("Pool '%s' already has all "11679"supported and requested features enabled.\n"),11680zpool_get_name(zhp));11681}11682}1168311684if (modified_pool) {11685(void) printf("\n");11686(void) after_zpool_upgrade(zhp);11687}1168811689return (0);11690}1169111692/*11693* zpool upgrade11694* zpool upgrade -v11695* zpool upgrade [-V version] <-a | pool ...>11696*11697* With no arguments, display downrev'd ZFS pool available for upgrade.11698* Individual pools can be upgraded by specifying the pool, and '-a' will11699* upgrade all pools.11700*/11701int11702zpool_do_upgrade(int argc, char **argv)11703{11704int c;11705upgrade_cbdata_t cb = { 0 };11706int ret = 0;11707boolean_t showversions = B_FALSE;11708boolean_t upgradeall = B_FALSE;11709char *end;117101171111712/* check options */11713while ((c = getopt(argc, argv, ":avV:")) != -1) {11714switch (c) {11715case 'a':11716upgradeall = B_TRUE;11717break;11718case 'v':11719showversions = B_TRUE;11720break;11721case 'V':11722cb.cb_version = strtoll(optarg, &end, 10);11723if (*end != '\0' ||11724!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {11725(void) fprintf(stderr,11726gettext("invalid version '%s'\n"), optarg);11727usage(B_FALSE);11728}11729break;11730case ':':11731(void) fprintf(stderr, gettext("missing argument for "11732"'%c' option\n"), optopt);11733usage(B_FALSE);11734break;11735case '?':11736(void) fprintf(stderr, gettext("invalid option '%c'\n"),11737optopt);11738usage(B_FALSE);11739}11740}1174111742cb.cb_argc = argc;11743cb.cb_argv = argv;11744argc -= optind;11745argv += optind;1174611747if (cb.cb_version == 0) {11748cb.cb_version = SPA_VERSION;11749} else if (!upgradeall && argc == 0) {11750(void) fprintf(stderr, gettext("-V option is "11751"incompatible with other arguments\n"));11752usage(B_FALSE);11753}1175411755if (showversions) {11756if (upgradeall || argc != 0) {11757(void) fprintf(stderr, gettext("-v option is "11758"incompatible with other arguments\n"));11759usage(B_FALSE);11760}11761} else if (upgradeall) {11762if (argc != 0) {11763(void) fprintf(stderr, gettext("-a option should not "11764"be used along with a pool name\n"));11765usage(B_FALSE);11766}11767}1176811769(void) printf("%s", gettext("This system supports ZFS pool feature "11770"flags.\n\n"));11771if (showversions) {11772int i;1177311774(void) printf(gettext("The following features are "11775"supported:\n\n"));11776(void) printf(gettext("FEAT DESCRIPTION\n"));11777(void) printf("----------------------------------------------"11778"---------------\n");11779for (i = 0; i < SPA_FEATURES; i++) {11780zfeature_info_t *fi = &spa_feature_table[i];11781if (!fi->fi_zfs_mod_supported)11782continue;11783const char *ro =11784(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?11785" (read-only compatible)" : "";1178611787(void) printf("%-37s%s\n", fi->fi_uname, ro);11788(void) printf(" %s\n", fi->fi_desc);11789}11790(void) printf("\n");1179111792(void) printf(gettext("The following legacy versions are also "11793"supported:\n\n"));11794(void) printf(gettext("VER DESCRIPTION\n"));11795(void) printf("--- -----------------------------------------"11796"---------------\n");11797(void) printf(gettext(" 1 Initial ZFS version\n"));11798(void) printf(gettext(" 2 Ditto blocks "11799"(replicated metadata)\n"));11800(void) printf(gettext(" 3 Hot spares and double parity "11801"RAID-Z\n"));11802(void) printf(gettext(" 4 zpool history\n"));11803(void) printf(gettext(" 5 Compression using the gzip "11804"algorithm\n"));11805(void) printf(gettext(" 6 bootfs pool property\n"));11806(void) printf(gettext(" 7 Separate intent log devices\n"));11807(void) printf(gettext(" 8 Delegated administration\n"));11808(void) printf(gettext(" 9 refquota and refreservation "11809"properties\n"));11810(void) printf(gettext(" 10 Cache devices\n"));11811(void) printf(gettext(" 11 Improved scrub performance\n"));11812(void) printf(gettext(" 12 Snapshot properties\n"));11813(void) printf(gettext(" 13 snapused property\n"));11814(void) printf(gettext(" 14 passthrough-x aclinherit\n"));11815(void) printf(gettext(" 15 user/group space accounting\n"));11816(void) printf(gettext(" 16 stmf property support\n"));11817(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));11818(void) printf(gettext(" 18 Snapshot user holds\n"));11819(void) printf(gettext(" 19 Log device removal\n"));11820(void) printf(gettext(" 20 Compression using zle "11821"(zero-length encoding)\n"));11822(void) printf(gettext(" 21 Deduplication\n"));11823(void) printf(gettext(" 22 Received properties\n"));11824(void) printf(gettext(" 23 Slim ZIL\n"));11825(void) printf(gettext(" 24 System attributes\n"));11826(void) printf(gettext(" 25 Improved scrub stats\n"));11827(void) printf(gettext(" 26 Improved snapshot deletion "11828"performance\n"));11829(void) printf(gettext(" 27 Improved snapshot creation "11830"performance\n"));11831(void) printf(gettext(" 28 Multiple vdev replacements\n"));11832(void) printf(gettext("\nFor more information on a particular "11833"version, including supported releases,\n"));11834(void) printf(gettext("see the ZFS Administration Guide.\n\n"));11835} else if (argc == 0 && upgradeall) {11836cb.cb_first = B_TRUE;11837ret = zpool_iter(g_zfs, upgrade_cb, &cb);11838if (ret == 0 && cb.cb_first) {11839if (cb.cb_version == SPA_VERSION) {11840(void) printf(gettext("All pools are already "11841"formatted using feature flags.\n\n"));11842(void) printf(gettext("Every feature flags "11843"pool already has all supported and "11844"requested features enabled.\n"));11845} else {11846(void) printf(gettext("All pools are already "11847"formatted with version %llu or higher.\n"),11848(u_longlong_t)cb.cb_version);11849}11850}11851} else if (argc == 0) {11852cb.cb_first = B_TRUE;11853ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);11854assert(ret == 0);1185511856if (cb.cb_first) {11857(void) printf(gettext("All pools are formatted "11858"using feature flags.\n\n"));11859} else {11860(void) printf(gettext("\nUse 'zpool upgrade -v' "11861"for a list of available legacy versions.\n"));11862}1186311864cb.cb_first = B_TRUE;11865ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);11866assert(ret == 0);1186711868if (cb.cb_first) {11869(void) printf(gettext("Every feature flags pool has "11870"all supported and requested features enabled.\n"));11871} else {11872(void) printf(gettext("\n"));11873}11874} else {11875ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,11876B_FALSE, upgrade_one, &cb);11877}1187811879return (ret);11880}1188111882typedef struct hist_cbdata {11883boolean_t first;11884boolean_t longfmt;11885boolean_t internal;11886} hist_cbdata_t;1188711888static void11889print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)11890{11891nvlist_t **records;11892uint_t numrecords;11893int i;1189411895verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,11896&records, &numrecords) == 0);11897for (i = 0; i < numrecords; i++) {11898nvlist_t *rec = records[i];11899char tbuf[64] = "";1190011901if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {11902time_t tsec;11903struct tm t;1190411905tsec = fnvlist_lookup_uint64(records[i],11906ZPOOL_HIST_TIME);11907(void) localtime_r(&tsec, &t);11908(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);11909}1191011911if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {11912uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],11913ZPOOL_HIST_ELAPSED_NS);11914(void) snprintf(tbuf + strlen(tbuf),11915sizeof (tbuf) - strlen(tbuf),11916" (%lldms)", (long long)elapsed_ns / 1000 / 1000);11917}1191811919if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {11920(void) printf("%s %s", tbuf,11921fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));11922} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {11923int ievent =11924fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);11925if (!cb->internal)11926continue;11927if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {11928(void) printf("%s unrecognized record:\n",11929tbuf);11930dump_nvlist(rec, 4);11931continue;11932}11933(void) printf("%s [internal %s txg:%lld] %s", tbuf,11934zfs_history_event_names[ievent],11935(longlong_t)fnvlist_lookup_uint64(11936rec, ZPOOL_HIST_TXG),11937fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));11938} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {11939if (!cb->internal)11940continue;11941(void) printf("%s [txg:%lld] %s", tbuf,11942(longlong_t)fnvlist_lookup_uint64(11943rec, ZPOOL_HIST_TXG),11944fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));11945if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {11946(void) printf(" %s (%llu)",11947fnvlist_lookup_string(rec,11948ZPOOL_HIST_DSNAME),11949(u_longlong_t)fnvlist_lookup_uint64(rec,11950ZPOOL_HIST_DSID));11951}11952(void) printf(" %s", fnvlist_lookup_string(rec,11953ZPOOL_HIST_INT_STR));11954} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {11955if (!cb->internal)11956continue;11957(void) printf("%s ioctl %s\n", tbuf,11958fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));11959if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {11960(void) printf(" input:\n");11961dump_nvlist(fnvlist_lookup_nvlist(rec,11962ZPOOL_HIST_INPUT_NVL), 8);11963}11964if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {11965(void) printf(" output:\n");11966dump_nvlist(fnvlist_lookup_nvlist(rec,11967ZPOOL_HIST_OUTPUT_NVL), 8);11968}11969if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {11970(void) printf(" output nvlist omitted; "11971"original size: %lldKB\n",11972(longlong_t)fnvlist_lookup_int64(rec,11973ZPOOL_HIST_OUTPUT_SIZE) / 1024);11974}11975if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {11976(void) printf(" errno: %lld\n",11977(longlong_t)fnvlist_lookup_int64(rec,11978ZPOOL_HIST_ERRNO));11979}11980} else {11981if (!cb->internal)11982continue;11983(void) printf("%s unrecognized record:\n", tbuf);11984dump_nvlist(rec, 4);11985}1198611987if (!cb->longfmt) {11988(void) printf("\n");11989continue;11990}11991(void) printf(" [");11992if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {11993uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);11994struct passwd *pwd = getpwuid(who);11995(void) printf("user %d ", (int)who);11996if (pwd != NULL)11997(void) printf("(%s) ", pwd->pw_name);11998}11999if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {12000(void) printf("on %s",12001fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));12002}12003if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {12004(void) printf(":%s",12005fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));12006}1200712008(void) printf("]");12009(void) printf("\n");12010}12011}1201212013/*12014* Print out the command history for a specific pool.12015*/12016static int12017get_history_one(zpool_handle_t *zhp, void *data)12018{12019nvlist_t *nvhis;12020int ret;12021hist_cbdata_t *cb = (hist_cbdata_t *)data;12022uint64_t off = 0;12023boolean_t eof = B_FALSE;1202412025cb->first = B_FALSE;1202612027(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));1202812029while (!eof) {12030if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)12031return (ret);1203212033print_history_records(nvhis, cb);12034nvlist_free(nvhis);12035}12036(void) printf("\n");1203712038return (ret);12039}1204012041/*12042* zpool history <pool>12043*12044* Displays the history of commands that modified pools.12045*/12046int12047zpool_do_history(int argc, char **argv)12048{12049hist_cbdata_t cbdata = { 0 };12050int ret;12051int c;1205212053cbdata.first = B_TRUE;12054/* check options */12055while ((c = getopt(argc, argv, "li")) != -1) {12056switch (c) {12057case 'l':12058cbdata.longfmt = B_TRUE;12059break;12060case 'i':12061cbdata.internal = B_TRUE;12062break;12063case '?':12064(void) fprintf(stderr, gettext("invalid option '%c'\n"),12065optopt);12066usage(B_FALSE);12067}12068}12069argc -= optind;12070argv += optind;1207112072ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,12073B_FALSE, get_history_one, &cbdata);1207412075if (argc == 0 && cbdata.first == B_TRUE) {12076(void) fprintf(stderr, gettext("no pools available\n"));12077return (0);12078}1207912080return (ret);12081}1208212083typedef struct ev_opts {12084int verbose;12085int scripted;12086int follow;12087int clear;12088char poolname[ZFS_MAX_DATASET_NAME_LEN];12089} ev_opts_t;1209012091static void12092zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)12093{12094char ctime_str[26], str[32];12095const char *ptr;12096int64_t *tv;12097uint_t n;1209812099verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);12100memset(str, ' ', 32);12101(void) ctime_r((const time_t *)&tv[0], ctime_str);12102(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */12103(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */12104(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */12105(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */12106if (opts->scripted)12107(void) printf(gettext("%s\t"), str);12108else12109(void) printf(gettext("%s "), str);1211012111verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);12112(void) printf(gettext("%s\n"), ptr);12113}1211412115static void12116zpool_do_events_nvprint(nvlist_t *nvl, int depth)12117{12118nvpair_t *nvp;12119static char flagstr[256];1212012121for (nvp = nvlist_next_nvpair(nvl, NULL);12122nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {1212312124data_type_t type = nvpair_type(nvp);12125const char *name = nvpair_name(nvp);1212612127boolean_t b;12128uint8_t i8;12129uint16_t i16;12130uint32_t i32;12131uint64_t i64;12132const char *str;12133nvlist_t *cnv;1213412135printf(gettext("%*s%s = "), depth, "", name);1213612137switch (type) {12138case DATA_TYPE_BOOLEAN:12139printf(gettext("%s"), "1");12140break;1214112142case DATA_TYPE_BOOLEAN_VALUE:12143(void) nvpair_value_boolean_value(nvp, &b);12144printf(gettext("%s"), b ? "1" : "0");12145break;1214612147case DATA_TYPE_BYTE:12148(void) nvpair_value_byte(nvp, &i8);12149printf(gettext("0x%x"), i8);12150break;1215112152case DATA_TYPE_INT8:12153(void) nvpair_value_int8(nvp, (void *)&i8);12154printf(gettext("0x%x"), i8);12155break;1215612157case DATA_TYPE_UINT8:12158(void) nvpair_value_uint8(nvp, &i8);12159printf(gettext("0x%x"), i8);12160break;1216112162case DATA_TYPE_INT16:12163(void) nvpair_value_int16(nvp, (void *)&i16);12164printf(gettext("0x%x"), i16);12165break;1216612167case DATA_TYPE_UINT16:12168(void) nvpair_value_uint16(nvp, &i16);12169printf(gettext("0x%x"), i16);12170break;1217112172case DATA_TYPE_INT32:12173(void) nvpair_value_int32(nvp, (void *)&i32);12174printf(gettext("0x%x"), i32);12175break;1217612177case DATA_TYPE_UINT32:12178(void) nvpair_value_uint32(nvp, &i32);12179if (strcmp(name,12180FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||12181strcmp(name,12182FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {12183zfs_valstr_zio_stage(i32, flagstr,12184sizeof (flagstr));12185printf(gettext("0x%x [%s]"), i32, flagstr);12186} else if (strcmp(name,12187FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) {12188zfs_valstr_zio_type(i32, flagstr,12189sizeof (flagstr));12190printf(gettext("0x%x [%s]"), i32, flagstr);12191} else if (strcmp(name,12192FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {12193zfs_valstr_zio_priority(i32, flagstr,12194sizeof (flagstr));12195printf(gettext("0x%x [%s]"), i32, flagstr);12196} else {12197printf(gettext("0x%x"), i32);12198}12199break;1220012201case DATA_TYPE_INT64:12202(void) nvpair_value_int64(nvp, (void *)&i64);12203printf(gettext("0x%llx"), (u_longlong_t)i64);12204break;1220512206case DATA_TYPE_UINT64:12207(void) nvpair_value_uint64(nvp, &i64);12208/*12209* translate vdev state values to readable12210* strings to aide zpool events consumers12211*/12212if (strcmp(name,12213FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||12214strcmp(name,12215FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {12216printf(gettext("\"%s\" (0x%llx)"),12217zpool_state_to_name(i64, VDEV_AUX_NONE),12218(u_longlong_t)i64);12219} else if (strcmp(name,12220FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {12221zfs_valstr_zio_flag(i64, flagstr,12222sizeof (flagstr));12223printf(gettext("0x%llx [%s]"),12224(u_longlong_t)i64, flagstr);12225} else {12226printf(gettext("0x%llx"), (u_longlong_t)i64);12227}12228break;1222912230case DATA_TYPE_HRTIME:12231(void) nvpair_value_hrtime(nvp, (void *)&i64);12232printf(gettext("0x%llx"), (u_longlong_t)i64);12233break;1223412235case DATA_TYPE_STRING:12236(void) nvpair_value_string(nvp, &str);12237printf(gettext("\"%s\""), str ? str : "<NULL>");12238break;1223912240case DATA_TYPE_NVLIST:12241printf(gettext("(embedded nvlist)\n"));12242(void) nvpair_value_nvlist(nvp, &cnv);12243zpool_do_events_nvprint(cnv, depth + 8);12244printf(gettext("%*s(end %s)"), depth, "", name);12245break;1224612247case DATA_TYPE_NVLIST_ARRAY: {12248nvlist_t **val;12249uint_t i, nelem;1225012251(void) nvpair_value_nvlist_array(nvp, &val, &nelem);12252printf(gettext("(%d embedded nvlists)\n"), nelem);12253for (i = 0; i < nelem; i++) {12254printf(gettext("%*s%s[%d] = %s\n"),12255depth, "", name, i, "(embedded nvlist)");12256zpool_do_events_nvprint(val[i], depth + 8);12257printf(gettext("%*s(end %s[%i])\n"),12258depth, "", name, i);12259}12260printf(gettext("%*s(end %s)\n"), depth, "", name);12261}12262break;1226312264case DATA_TYPE_INT8_ARRAY: {12265int8_t *val;12266uint_t i, nelem;1226712268(void) nvpair_value_int8_array(nvp, &val, &nelem);12269for (i = 0; i < nelem; i++)12270printf(gettext("0x%x "), val[i]);1227112272break;12273}1227412275case DATA_TYPE_UINT8_ARRAY: {12276uint8_t *val;12277uint_t i, nelem;1227812279(void) nvpair_value_uint8_array(nvp, &val, &nelem);12280for (i = 0; i < nelem; i++)12281printf(gettext("0x%x "), val[i]);1228212283break;12284}1228512286case DATA_TYPE_INT16_ARRAY: {12287int16_t *val;12288uint_t i, nelem;1228912290(void) nvpair_value_int16_array(nvp, &val, &nelem);12291for (i = 0; i < nelem; i++)12292printf(gettext("0x%x "), val[i]);1229312294break;12295}1229612297case DATA_TYPE_UINT16_ARRAY: {12298uint16_t *val;12299uint_t i, nelem;1230012301(void) nvpair_value_uint16_array(nvp, &val, &nelem);12302for (i = 0; i < nelem; i++)12303printf(gettext("0x%x "), val[i]);1230412305break;12306}1230712308case DATA_TYPE_INT32_ARRAY: {12309int32_t *val;12310uint_t i, nelem;1231112312(void) nvpair_value_int32_array(nvp, &val, &nelem);12313for (i = 0; i < nelem; i++)12314printf(gettext("0x%x "), val[i]);1231512316break;12317}1231812319case DATA_TYPE_UINT32_ARRAY: {12320uint32_t *val;12321uint_t i, nelem;1232212323(void) nvpair_value_uint32_array(nvp, &val, &nelem);12324for (i = 0; i < nelem; i++)12325printf(gettext("0x%x "), val[i]);1232612327break;12328}1232912330case DATA_TYPE_INT64_ARRAY: {12331int64_t *val;12332uint_t i, nelem;1233312334(void) nvpair_value_int64_array(nvp, &val, &nelem);12335for (i = 0; i < nelem; i++)12336printf(gettext("0x%llx "),12337(u_longlong_t)val[i]);1233812339break;12340}1234112342case DATA_TYPE_UINT64_ARRAY: {12343uint64_t *val;12344uint_t i, nelem;1234512346(void) nvpair_value_uint64_array(nvp, &val, &nelem);12347for (i = 0; i < nelem; i++)12348printf(gettext("0x%llx "),12349(u_longlong_t)val[i]);1235012351break;12352}1235312354case DATA_TYPE_STRING_ARRAY: {12355const char **str;12356uint_t i, nelem;1235712358(void) nvpair_value_string_array(nvp, &str, &nelem);12359for (i = 0; i < nelem; i++)12360printf(gettext("\"%s\" "),12361str[i] ? str[i] : "<NULL>");1236212363break;12364}1236512366case DATA_TYPE_BOOLEAN_ARRAY:12367case DATA_TYPE_BYTE_ARRAY:12368case DATA_TYPE_DOUBLE:12369case DATA_TYPE_DONTCARE:12370case DATA_TYPE_UNKNOWN:12371printf(gettext("<unknown>"));12372break;12373}1237412375printf(gettext("\n"));12376}12377}1237812379static int12380zpool_do_events_next(ev_opts_t *opts)12381{12382nvlist_t *nvl;12383int zevent_fd, ret, dropped;12384const char *pool;1238512386zevent_fd = open(ZFS_DEV, O_RDWR);12387VERIFY(zevent_fd >= 0);1238812389if (!opts->scripted)12390(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");1239112392while (1) {12393ret = zpool_events_next(g_zfs, &nvl, &dropped,12394(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);12395if (ret || nvl == NULL)12396break;1239712398if (dropped > 0)12399(void) printf(gettext("dropped %d events\n"), dropped);1240012401if (strlen(opts->poolname) > 0 &&12402nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&12403strcmp(opts->poolname, pool) != 0)12404continue;1240512406zpool_do_events_short(nvl, opts);1240712408if (opts->verbose) {12409zpool_do_events_nvprint(nvl, 8);12410printf(gettext("\n"));12411}12412(void) fflush(stdout);1241312414nvlist_free(nvl);12415}1241612417VERIFY0(close(zevent_fd));1241812419return (ret);12420}1242112422static int12423zpool_do_events_clear(void)12424{12425int count, ret;1242612427ret = zpool_events_clear(g_zfs, &count);12428if (!ret)12429(void) printf(gettext("cleared %d events\n"), count);1243012431return (ret);12432}1243312434/*12435* zpool events [-vHf [pool] | -c]12436*12437* Displays events logs by ZFS.12438*/12439int12440zpool_do_events(int argc, char **argv)12441{12442ev_opts_t opts = { 0 };12443int ret;12444int c;1244512446/* check options */12447while ((c = getopt(argc, argv, "vHfc")) != -1) {12448switch (c) {12449case 'v':12450opts.verbose = 1;12451break;12452case 'H':12453opts.scripted = 1;12454break;12455case 'f':12456opts.follow = 1;12457break;12458case 'c':12459opts.clear = 1;12460break;12461case '?':12462(void) fprintf(stderr, gettext("invalid option '%c'\n"),12463optopt);12464usage(B_FALSE);12465}12466}12467argc -= optind;12468argv += optind;1246912470if (argc > 1) {12471(void) fprintf(stderr, gettext("too many arguments\n"));12472usage(B_FALSE);12473} else if (argc == 1) {12474(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));12475if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {12476(void) fprintf(stderr,12477gettext("invalid pool name '%s'\n"), opts.poolname);12478usage(B_FALSE);12479}12480}1248112482if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&12483opts.clear) {12484(void) fprintf(stderr,12485gettext("invalid options combined with -c\n"));12486usage(B_FALSE);12487}1248812489if (opts.clear)12490ret = zpool_do_events_clear();12491else12492ret = zpool_do_events_next(&opts);1249312494return (ret);12495}1249612497static int12498get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)12499{12500zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;12501char value[ZFS_MAXPROPLEN];12502zprop_source_t srctype;12503nvlist_t *props, *item, *d;12504props = item = d = NULL;1250512506if (cbp->cb_json) {12507d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");12508if (d == NULL) {12509fprintf(stderr, "vdevs obj not found.\n");12510exit(1);12511}12512props = fnvlist_alloc();12513}1251412515for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;12516pl = pl->pl_next) {12517char *prop_name;12518/*12519* If the first property is pool name, it is a special12520* placeholder that we can skip. This will also skip12521* over the name property when 'all' is specified.12522*/12523if (pl->pl_prop == ZPOOL_PROP_NAME &&12524pl == cbp->cb_proplist)12525continue;1252612527if (pl->pl_prop == ZPROP_INVAL) {12528prop_name = pl->pl_user_prop;12529} else {12530prop_name = (char *)vdev_prop_to_name(pl->pl_prop);12531}12532if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,12533prop_name, value, sizeof (value), &srctype,12534cbp->cb_literal) == 0) {12535zprop_collect_property(vdevname, cbp, prop_name,12536value, srctype, NULL, NULL, props);12537}12538}1253912540if (cbp->cb_json) {12541if (!nvlist_empty(props)) {12542item = fnvlist_alloc();12543fill_vdev_info(item, zhp, vdevname, B_TRUE,12544cbp->cb_json_as_int);12545fnvlist_add_nvlist(item, "properties", props);12546fnvlist_add_nvlist(d, vdevname, item);12547fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);12548fnvlist_free(item);12549}12550fnvlist_free(props);12551}1255212553return (0);12554}1255512556static int12557get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)12558{12559zpool_handle_t *zhp = zhp_data;12560zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;12561char *vdevname;12562const char *type;12563int ret;1256412565/*12566* zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the12567* pool name for display purposes, which is not desired. Fallback to12568* zpool_vdev_name() when not dealing with the root vdev.12569*/12570type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);12571if (zhp != NULL && strcmp(type, "root") == 0)12572vdevname = strdup("root-0");12573else12574vdevname = zpool_vdev_name(g_zfs, zhp, nv,12575cbp->cb_vdevs.cb_name_flags);1257612577(void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);1257812579ret = get_callback_vdev(zhp, vdevname, data);1258012581free(vdevname);1258212583return (ret);12584}1258512586static int12587get_callback(zpool_handle_t *zhp, void *data)12588{12589zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;12590char value[ZFS_MAXPROPLEN];12591zprop_source_t srctype;12592zprop_list_t *pl;12593int vid;12594int err = 0;12595nvlist_t *props, *item, *d;12596props = item = d = NULL;1259712598if (cbp->cb_type == ZFS_TYPE_VDEV) {12599if (cbp->cb_json) {12600nvlist_t *pool = fnvlist_alloc();12601fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);12602fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);12603fnvlist_free(pool);12604}1260512606if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {12607for_each_vdev(zhp, get_callback_vdev_cb, data);12608} else {12609/* Adjust column widths for vdev properties */12610for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;12611vid++) {12612vdev_expand_proplist(zhp,12613cbp->cb_vdevs.cb_names[vid],12614&cbp->cb_proplist);12615}12616/* Display the properties */12617for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;12618vid++) {12619get_callback_vdev(zhp,12620cbp->cb_vdevs.cb_names[vid], data);12621}12622}12623} else {12624assert(cbp->cb_type == ZFS_TYPE_POOL);12625if (cbp->cb_json) {12626d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");12627if (d == NULL) {12628fprintf(stderr, "pools obj not found.\n");12629exit(1);12630}12631props = fnvlist_alloc();12632}12633for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {12634/*12635* Skip the special fake placeholder. This will also12636* skip over the name property when 'all' is specified.12637*/12638if (pl->pl_prop == ZPOOL_PROP_NAME &&12639pl == cbp->cb_proplist)12640continue;1264112642if (pl->pl_prop == ZPROP_INVAL &&12643zfs_prop_user(pl->pl_user_prop)) {12644srctype = ZPROP_SRC_LOCAL;1264512646if (zpool_get_userprop(zhp, pl->pl_user_prop,12647value, sizeof (value), &srctype) != 0)12648continue;1264912650err = zprop_collect_property(12651zpool_get_name(zhp), cbp, pl->pl_user_prop,12652value, srctype, NULL, NULL, props);12653} else if (pl->pl_prop == ZPROP_INVAL &&12654(zpool_prop_feature(pl->pl_user_prop) ||12655zpool_prop_unsupported(pl->pl_user_prop))) {12656srctype = ZPROP_SRC_LOCAL;1265712658if (zpool_prop_get_feature(zhp,12659pl->pl_user_prop, value,12660sizeof (value)) == 0) {12661err = zprop_collect_property(12662zpool_get_name(zhp), cbp,12663pl->pl_user_prop, value, srctype,12664NULL, NULL, props);12665}12666} else {12667if (zpool_get_prop(zhp, pl->pl_prop, value,12668sizeof (value), &srctype,12669cbp->cb_literal) != 0)12670continue;1267112672err = zprop_collect_property(12673zpool_get_name(zhp), cbp,12674zpool_prop_to_name(pl->pl_prop),12675value, srctype, NULL, NULL, props);12676}12677if (err != 0)12678return (err);12679}1268012681if (cbp->cb_json) {12682if (!nvlist_empty(props)) {12683item = fnvlist_alloc();12684fill_pool_info(item, zhp, B_TRUE,12685cbp->cb_json_as_int);12686fnvlist_add_nvlist(item, "properties", props);12687if (cbp->cb_json_pool_key_guid) {12688char buf[256];12689uint64_t guid = fnvlist_lookup_uint64(12690zpool_get_config(zhp, NULL),12691ZPOOL_CONFIG_POOL_GUID);12692snprintf(buf, 256, "%llu",12693(u_longlong_t)guid);12694fnvlist_add_nvlist(d, buf, item);12695} else {12696const char *name = zpool_get_name(zhp);12697fnvlist_add_nvlist(d, name, item);12698}12699fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);12700fnvlist_free(item);12701}12702fnvlist_free(props);12703}12704}1270512706return (0);12707}1270812709/*12710* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...12711*12712* -H Scripted mode. Don't display headers, and separate properties12713* by a single tab.12714* -o List of columns to display. Defaults to12715* "name,property,value,source".12716* -p Display values in parsable (exact) format.12717* -j Display output in JSON format.12718* --json-int Display numbers as integers instead of strings.12719* --json-pool-key-guid Set pool GUID as key for pool objects.12720*12721* Get properties of pools in the system. Output space statistics12722* for each one as well as other attributes.12723*/12724int12725zpool_do_get(int argc, char **argv)12726{12727zprop_get_cbdata_t cb = { 0 };12728zprop_list_t fake_name = { 0 };12729int ret;12730int c, i;12731char *propstr = NULL;12732char *vdev = NULL;12733nvlist_t *data = NULL;1273412735cb.cb_first = B_TRUE;1273612737/*12738* Set up default columns and sources.12739*/12740cb.cb_sources = ZPROP_SRC_ALL;12741cb.cb_columns[0] = GET_COL_NAME;12742cb.cb_columns[1] = GET_COL_PROPERTY;12743cb.cb_columns[2] = GET_COL_VALUE;12744cb.cb_columns[3] = GET_COL_SOURCE;12745cb.cb_type = ZFS_TYPE_POOL;12746cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;12747current_prop_type = cb.cb_type;1274812749struct option long_options[] = {12750{"json", no_argument, NULL, 'j'},12751{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},12752{"json-pool-key-guid", no_argument, NULL,12753ZPOOL_OPTION_POOL_KEY_GUID},12754{0, 0, 0, 0}12755};1275612757/* check options */12758while ((c = getopt_long(argc, argv, ":jHpo:", long_options,12759NULL)) != -1) {12760switch (c) {12761case 'p':12762cb.cb_literal = B_TRUE;12763break;12764case 'H':12765cb.cb_scripted = B_TRUE;12766break;12767case 'j':12768cb.cb_json = B_TRUE;12769cb.cb_jsobj = zpool_json_schema(0, 1);12770data = fnvlist_alloc();12771break;12772case ZPOOL_OPTION_POOL_KEY_GUID:12773cb.cb_json_pool_key_guid = B_TRUE;12774break;12775case ZPOOL_OPTION_JSON_NUMS_AS_INT:12776cb.cb_json_as_int = B_TRUE;12777cb.cb_literal = B_TRUE;12778break;12779case 'o':12780memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));12781i = 0;1278212783for (char *tok; (tok = strsep(&optarg, ",")); ) {12784static const char *const col_opts[] =12785{ "name", "property", "value", "source",12786"all" };12787static const zfs_get_column_t col_cols[] =12788{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,12789GET_COL_SOURCE };1279012791if (i == ZFS_GET_NCOLS - 1) {12792(void) fprintf(stderr, gettext("too "12793"many fields given to -o "12794"option\n"));12795usage(B_FALSE);12796}1279712798for (c = 0; c < ARRAY_SIZE(col_opts); ++c)12799if (strcmp(tok, col_opts[c]) == 0)12800goto found;1280112802(void) fprintf(stderr,12803gettext("invalid column name '%s'\n"), tok);12804usage(B_FALSE);1280512806found:12807if (c >= 4) {12808if (i > 0) {12809(void) fprintf(stderr,12810gettext("\"all\" conflicts "12811"with specific fields "12812"given to -o option\n"));12813usage(B_FALSE);12814}1281512816memcpy(cb.cb_columns, col_cols,12817sizeof (col_cols));12818i = ZFS_GET_NCOLS - 1;12819} else12820cb.cb_columns[i++] = col_cols[c];12821}12822break;12823case '?':12824(void) fprintf(stderr, gettext("invalid option '%c'\n"),12825optopt);12826usage(B_FALSE);12827}12828}1282912830argc -= optind;12831argv += optind;1283212833if (!cb.cb_json && cb.cb_json_as_int) {12834(void) fprintf(stderr, gettext("'--json-int' only works with"12835" '-j' option\n"));12836usage(B_FALSE);12837}1283812839if (!cb.cb_json && cb.cb_json_pool_key_guid) {12840(void) fprintf(stderr, gettext("'json-pool-key-guid' only"12841" works with '-j' option\n"));12842usage(B_FALSE);12843}1284412845if (argc < 1) {12846(void) fprintf(stderr, gettext("missing property "12847"argument\n"));12848usage(B_FALSE);12849}1285012851/* Properties list is needed later by zprop_get_list() */12852propstr = argv[0];1285312854argc--;12855argv++;1285612857if (argc == 0) {12858/* No args, so just print the defaults. */12859} else if (are_all_pools(argc, argv)) {12860/* All the args are pool names */12861} else if (are_all_pools(1, argv)) {12862/* The first arg is a pool name */12863if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||12864(argc == 2 && strcmp(argv[1], "root") == 0) ||12865are_vdevs_in_pool(argc - 1, argv + 1, argv[0],12866&cb.cb_vdevs)) {1286712868if (strcmp(argv[1], "root") == 0)12869vdev = strdup("root-0");1287012871/* ... and the rest are vdev names */12872if (vdev == NULL)12873cb.cb_vdevs.cb_names = argv + 1;12874else12875cb.cb_vdevs.cb_names = &vdev;1287612877cb.cb_vdevs.cb_names_count = argc - 1;12878cb.cb_type = ZFS_TYPE_VDEV;12879argc = 1; /* One pool to process */12880} else {12881if (cb.cb_json) {12882nvlist_free(cb.cb_jsobj);12883nvlist_free(data);12884}12885fprintf(stderr, gettext("Expected a list of vdevs in"12886" \"%s\", but got:\n"), argv[0]);12887error_list_unresolved_vdevs(argc - 1, argv + 1,12888argv[0], &cb.cb_vdevs);12889fprintf(stderr, "\n");12890usage(B_FALSE);12891return (1);12892}12893} else {12894if (cb.cb_json) {12895nvlist_free(cb.cb_jsobj);12896nvlist_free(data);12897}12898/*12899* The first arg isn't the name of a valid pool.12900*/12901fprintf(stderr, gettext("Cannot get properties of %s: "12902"no such pool available.\n"), argv[0]);12903return (1);12904}1290512906if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,12907cb.cb_type) != 0) {12908/* Use correct list of valid properties (pool or vdev) */12909current_prop_type = cb.cb_type;12910usage(B_FALSE);12911}1291212913if (cb.cb_proplist != NULL) {12914fake_name.pl_prop = ZPOOL_PROP_NAME;12915fake_name.pl_width = strlen(gettext("NAME"));12916fake_name.pl_next = cb.cb_proplist;12917cb.cb_proplist = &fake_name;12918}1291912920if (cb.cb_json) {12921if (cb.cb_type == ZFS_TYPE_VDEV)12922fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);12923else12924fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);12925fnvlist_free(data);12926}1292712928ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,12929cb.cb_literal, get_callback, &cb);1293012931if (ret == 0 && cb.cb_json)12932zcmd_print_json(cb.cb_jsobj);12933else if (ret != 0 && cb.cb_json)12934nvlist_free(cb.cb_jsobj);1293512936if (cb.cb_proplist == &fake_name)12937zprop_free_list(fake_name.pl_next);12938else12939zprop_free_list(cb.cb_proplist);1294012941if (vdev != NULL)12942free(vdev);1294312944return (ret);12945}1294612947typedef struct set_cbdata {12948char *cb_propname;12949char *cb_value;12950zfs_type_t cb_type;12951vdev_cbdata_t cb_vdevs;12952boolean_t cb_any_successful;12953} set_cbdata_t;1295412955static int12956set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)12957{12958int error;1295912960/* Check if we have out-of-bounds features */12961if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {12962boolean_t features[SPA_FEATURES];12963if (zpool_do_load_compat(cb->cb_value, features) !=12964ZPOOL_COMPATIBILITY_OK)12965return (-1);1296612967nvlist_t *enabled = zpool_get_features(zhp);12968spa_feature_t i;12969for (i = 0; i < SPA_FEATURES; i++) {12970const char *fguid = spa_feature_table[i].fi_guid;12971if (nvlist_exists(enabled, fguid) && !features[i])12972break;12973}12974if (i < SPA_FEATURES)12975(void) fprintf(stderr, gettext("Warning: one or "12976"more features already enabled on pool '%s'\n"12977"are not present in this compatibility set.\n"),12978zpool_get_name(zhp));12979}1298012981/* if we're setting a feature, check it's in compatibility set */12982if (zpool_prop_feature(cb->cb_propname) &&12983strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {12984char *fname = strchr(cb->cb_propname, '@') + 1;12985spa_feature_t f;1298612987if (zfeature_lookup_name(fname, &f) == 0) {12988char compat[ZFS_MAXPROPLEN];12989if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,12990compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)12991compat[0] = '\0';1299212993boolean_t features[SPA_FEATURES];12994if (zpool_do_load_compat(compat, features) !=12995ZPOOL_COMPATIBILITY_OK) {12996(void) fprintf(stderr, gettext("Error: "12997"cannot enable feature '%s' on pool '%s'\n"12998"because the pool's 'compatibility' "12999"property cannot be parsed.\n"),13000fname, zpool_get_name(zhp));13001return (-1);13002}1300313004if (!features[f]) {13005(void) fprintf(stderr, gettext("Error: "13006"cannot enable feature '%s' on pool '%s'\n"13007"as it is not specified in this pool's "13008"current compatibility set.\n"13009"Consider setting 'compatibility' to a "13010"less restrictive set, or to 'off'.\n"),13011fname, zpool_get_name(zhp));13012return (-1);13013}13014}13015}1301613017error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);1301813019return (error);13020}1302113022static int13023set_callback(zpool_handle_t *zhp, void *data)13024{13025int error;13026set_cbdata_t *cb = (set_cbdata_t *)data;1302713028if (cb->cb_type == ZFS_TYPE_VDEV) {13029error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,13030cb->cb_propname, cb->cb_value);13031} else {13032assert(cb->cb_type == ZFS_TYPE_POOL);13033error = set_pool_callback(zhp, cb);13034}1303513036cb->cb_any_successful = !error;13037return (error);13038}1303913040int13041zpool_do_set(int argc, char **argv)13042{13043set_cbdata_t cb = { 0 };13044int error;13045char *vdev = NULL;1304613047current_prop_type = ZFS_TYPE_POOL;13048if (argc > 1 && argv[1][0] == '-') {13049(void) fprintf(stderr, gettext("invalid option '%c'\n"),13050argv[1][1]);13051usage(B_FALSE);13052}1305313054if (argc < 2) {13055(void) fprintf(stderr, gettext("missing property=value "13056"argument\n"));13057usage(B_FALSE);13058}1305913060if (argc < 3) {13061(void) fprintf(stderr, gettext("missing pool name\n"));13062usage(B_FALSE);13063}1306413065if (argc > 4) {13066(void) fprintf(stderr, gettext("too many pool names\n"));13067usage(B_FALSE);13068}1306913070cb.cb_propname = argv[1];13071cb.cb_type = ZFS_TYPE_POOL;13072cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;13073cb.cb_value = strchr(cb.cb_propname, '=');13074if (cb.cb_value == NULL) {13075(void) fprintf(stderr, gettext("missing value in "13076"property=value argument\n"));13077usage(B_FALSE);13078}1307913080*(cb.cb_value) = '\0';13081cb.cb_value++;13082argc -= 2;13083argv += 2;1308413085/* argv[0] is pool name */13086if (!is_pool(argv[0])) {13087(void) fprintf(stderr,13088gettext("cannot open '%s': is not a pool\n"), argv[0]);13089return (EINVAL);13090}1309113092/* argv[1], when supplied, is vdev name */13093if (argc == 2) {1309413095if (strcmp(argv[1], "root") == 0)13096vdev = strdup("root-0");13097else13098vdev = strdup(argv[1]);1309913100if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {13101(void) fprintf(stderr, gettext(13102"cannot find '%s' in '%s': device not in pool\n"),13103vdev, argv[0]);13104free(vdev);13105return (EINVAL);13106}13107cb.cb_vdevs.cb_names = &vdev;13108cb.cb_vdevs.cb_names_count = 1;13109cb.cb_type = ZFS_TYPE_VDEV;13110}1311113112error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,13113B_FALSE, set_callback, &cb);1311413115if (vdev != NULL)13116free(vdev);1311713118return (error);13119}1312013121/* Add up the total number of bytes left to initialize/trim across all vdevs */13122static uint64_t13123vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)13124{13125uint64_t bytes_remaining;13126nvlist_t **child;13127uint_t c, children;13128vdev_stat_t *vs;1312913130assert(activity == ZPOOL_WAIT_INITIALIZE ||13131activity == ZPOOL_WAIT_TRIM);1313213133verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,13134(uint64_t **)&vs, &c) == 0);1313513136if (activity == ZPOOL_WAIT_INITIALIZE &&13137vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)13138bytes_remaining = vs->vs_initialize_bytes_est -13139vs->vs_initialize_bytes_done;13140else if (activity == ZPOOL_WAIT_TRIM &&13141vs->vs_trim_state == VDEV_TRIM_ACTIVE)13142bytes_remaining = vs->vs_trim_bytes_est -13143vs->vs_trim_bytes_done;13144else13145bytes_remaining = 0;1314613147if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,13148&child, &children) != 0)13149children = 0;1315013151for (c = 0; c < children; c++)13152bytes_remaining += vdev_activity_remaining(child[c], activity);1315313154return (bytes_remaining);13155}1315613157/* Add up the total number of bytes left to rebuild across top-level vdevs */13158static uint64_t13159vdev_activity_top_remaining(nvlist_t *nv)13160{13161uint64_t bytes_remaining = 0;13162nvlist_t **child;13163uint_t children;13164int error;1316513166if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,13167&child, &children) != 0)13168children = 0;1316913170for (uint_t c = 0; c < children; c++) {13171vdev_rebuild_stat_t *vrs;13172uint_t i;1317313174error = nvlist_lookup_uint64_array(child[c],13175ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);13176if (error == 0) {13177if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {13178bytes_remaining += (vrs->vrs_bytes_est -13179vrs->vrs_bytes_rebuilt);13180}13181}13182}1318313184return (bytes_remaining);13185}1318613187/* Whether any vdevs are 'spare' or 'replacing' vdevs */13188static boolean_t13189vdev_any_spare_replacing(nvlist_t *nv)13190{13191nvlist_t **child;13192uint_t c, children;13193const char *vdev_type;1319413195(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);1319613197if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||13198strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||13199strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {13200return (B_TRUE);13201}1320213203if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,13204&child, &children) != 0)13205children = 0;1320613207for (c = 0; c < children; c++) {13208if (vdev_any_spare_replacing(child[c]))13209return (B_TRUE);13210}1321113212return (B_FALSE);13213}1321413215typedef struct wait_data {13216char *wd_poolname;13217boolean_t wd_scripted;13218boolean_t wd_exact;13219boolean_t wd_headers_once;13220boolean_t wd_should_exit;13221/* Which activities to wait for */13222boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];13223float wd_interval;13224pthread_cond_t wd_cv;13225pthread_mutex_t wd_mutex;13226} wait_data_t;1322713228/*13229* Print to stdout a single line, containing one column for each activity that13230* we are waiting for specifying how many bytes of work are left for that13231* activity.13232*/13233static void13234print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)13235{13236nvlist_t *config, *nvroot;13237uint_t c;13238int i;13239pool_checkpoint_stat_t *pcs = NULL;13240pool_scan_stat_t *pss = NULL;13241pool_removal_stat_t *prs = NULL;13242pool_raidz_expand_stat_t *pres = NULL;13243const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",13244"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};13245int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];1324613247/* Calculate the width of each column */13248for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {13249/*13250* Make sure we have enough space in the col for pretty-printed13251* numbers and for the column header, and then leave a couple13252* spaces between cols for readability.13253*/13254col_widths[i] = MAX(strlen(headers[i]), 6) + 2;13255}1325613257if (timestamp_fmt != NODATE)13258print_timestamp(timestamp_fmt);1325913260/* Print header if appropriate */13261int term_height = terminal_height();13262boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&13263row % (term_height-1) == 0);13264if (!wd->wd_scripted && (row == 0 || reprint_header)) {13265for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {13266if (wd->wd_enabled[i])13267(void) printf("%*s", col_widths[i], headers[i]);13268}13269(void) fputc('\n', stdout);13270}1327113272/* Bytes of work remaining in each activity */13273int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};1327413275bytes_rem[ZPOOL_WAIT_FREE] =13276zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);1327713278config = zpool_get_config(zhp, NULL);13279nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);1328013281(void) nvlist_lookup_uint64_array(nvroot,13282ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);13283if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)13284bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;1328513286(void) nvlist_lookup_uint64_array(nvroot,13287ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);13288if (prs != NULL && prs->prs_state == DSS_SCANNING)13289bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -13290prs->prs_copied;1329113292(void) nvlist_lookup_uint64_array(nvroot,13293ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);13294if (pss != NULL && pss->pss_state == DSS_SCANNING &&13295pss->pss_pass_scrub_pause == 0) {13296int64_t rem = pss->pss_to_examine - pss->pss_issued;13297if (pss->pss_func == POOL_SCAN_SCRUB)13298bytes_rem[ZPOOL_WAIT_SCRUB] = rem;13299else13300bytes_rem[ZPOOL_WAIT_RESILVER] = rem;13301} else if (check_rebuilding(nvroot, NULL)) {13302bytes_rem[ZPOOL_WAIT_RESILVER] =13303vdev_activity_top_remaining(nvroot);13304}1330513306(void) nvlist_lookup_uint64_array(nvroot,13307ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);13308if (pres != NULL && pres->pres_state == DSS_SCANNING) {13309int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;13310bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;13311}1331213313bytes_rem[ZPOOL_WAIT_INITIALIZE] =13314vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);13315bytes_rem[ZPOOL_WAIT_TRIM] =13316vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);1331713318/*13319* A replace finishes after resilvering finishes, so the amount of work13320* left for a replace is the same as for resilvering.13321*13322* It isn't quite correct to say that if we have any 'spare' or13323* 'replacing' vdevs and a resilver is happening, then a replace is in13324* progress, like we do here. When a hot spare is used, the faulted vdev13325* is not removed after the hot spare is resilvered, so parent 'spare'13326* vdev is not removed either. So we could have a 'spare' vdev, but be13327* resilvering for a different reason. However, we use it as a heuristic13328* because we don't have access to the DTLs, which could tell us whether13329* or not we have really finished resilvering a hot spare.13330*/13331if (vdev_any_spare_replacing(nvroot))13332bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];1333313334for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {13335char buf[64];13336if (!wd->wd_enabled[i])13337continue;1333813339if (wd->wd_exact) {13340(void) snprintf(buf, sizeof (buf), "%" PRIi64,13341bytes_rem[i]);13342} else {13343zfs_nicenum(bytes_rem[i], buf, sizeof (buf));13344}1334513346if (wd->wd_scripted)13347(void) printf(i == 0 ? "%s" : "\t%s", buf);13348else13349(void) printf(" %*s", col_widths[i] - 1, buf);13350}13351(void) printf("\n");13352(void) fflush(stdout);13353}1335413355static void *13356wait_status_thread(void *arg)13357{13358wait_data_t *wd = (wait_data_t *)arg;13359zpool_handle_t *zhp;1336013361if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)13362return (void *)(1);1336313364for (int row = 0; ; row++) {13365boolean_t missing;13366struct timespec timeout;13367int ret = 0;13368(void) clock_gettime(CLOCK_REALTIME, &timeout);1336913370if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||13371zpool_props_refresh(zhp) != 0) {13372zpool_close(zhp);13373return (void *)(uintptr_t)(missing ? 0 : 1);13374}1337513376print_wait_status_row(wd, zhp, row);1337713378timeout.tv_sec += floor(wd->wd_interval);13379long nanos = timeout.tv_nsec +13380(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;13381if (nanos >= NANOSEC) {13382timeout.tv_sec++;13383timeout.tv_nsec = nanos - NANOSEC;13384} else {13385timeout.tv_nsec = nanos;13386}13387pthread_mutex_lock(&wd->wd_mutex);13388if (!wd->wd_should_exit)13389ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,13390&timeout);13391pthread_mutex_unlock(&wd->wd_mutex);13392if (ret == 0) {13393break; /* signaled by main thread */13394} else if (ret != ETIMEDOUT) {13395(void) fprintf(stderr, gettext("pthread_cond_timedwait "13396"failed: %s\n"), strerror(ret));13397zpool_close(zhp);13398return (void *)(uintptr_t)(1);13399}13400}1340113402zpool_close(zhp);13403return (void *)(0);13404}1340513406int13407zpool_do_wait(int argc, char **argv)13408{13409boolean_t verbose = B_FALSE;13410int c, i;13411unsigned long count;13412pthread_t status_thr;13413int error = 0;13414zpool_handle_t *zhp;1341513416wait_data_t wd;13417wd.wd_scripted = B_FALSE;13418wd.wd_exact = B_FALSE;13419wd.wd_headers_once = B_FALSE;13420wd.wd_should_exit = B_FALSE;1342113422pthread_mutex_init(&wd.wd_mutex, NULL);13423pthread_cond_init(&wd.wd_cv, NULL);1342413425/* By default, wait for all types of activity. */13426for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)13427wd.wd_enabled[i] = B_TRUE;1342813429while ((c = getopt(argc, argv, "HpT:t:")) != -1) {13430switch (c) {13431case 'H':13432wd.wd_scripted = B_TRUE;13433break;13434case 'n':13435wd.wd_headers_once = B_TRUE;13436break;13437case 'p':13438wd.wd_exact = B_TRUE;13439break;13440case 'T':13441get_timestamp_arg(*optarg);13442break;13443case 't':13444/* Reset activities array */13445memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));1344613447for (char *tok; (tok = strsep(&optarg, ",")); ) {13448static const char *const col_opts[] = {13449"discard", "free", "initialize", "replace",13450"remove", "resilver", "scrub", "trim",13451"raidz_expand" };1345213453for (i = 0; i < ARRAY_SIZE(col_opts); ++i)13454if (strcmp(tok, col_opts[i]) == 0) {13455wd.wd_enabled[i] = B_TRUE;13456goto found;13457}1345813459(void) fprintf(stderr,13460gettext("invalid activity '%s'\n"), tok);13461usage(B_FALSE);13462found:;13463}13464break;13465case '?':13466(void) fprintf(stderr, gettext("invalid option '%c'\n"),13467optopt);13468usage(B_FALSE);13469}13470}1347113472argc -= optind;13473argv += optind;1347413475get_interval_count(&argc, argv, &wd.wd_interval, &count);13476if (count != 0) {13477/* This subcmd only accepts an interval, not a count */13478(void) fprintf(stderr, gettext("too many arguments\n"));13479usage(B_FALSE);13480}1348113482if (wd.wd_interval != 0)13483verbose = B_TRUE;1348413485if (argc < 1) {13486(void) fprintf(stderr, gettext("missing 'pool' argument\n"));13487usage(B_FALSE);13488}13489if (argc > 1) {13490(void) fprintf(stderr, gettext("too many arguments\n"));13491usage(B_FALSE);13492}1349313494wd.wd_poolname = argv[0];1349513496if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)13497return (1);1349813499if (verbose) {13500/*13501* We use a separate thread for printing status updates because13502* the main thread will call lzc_wait(), which blocks as long13503* as an activity is in progress, which can be a long time.13504*/13505if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)13506!= 0) {13507(void) fprintf(stderr, gettext("failed to create status"13508"thread: %s\n"), strerror(errno));13509zpool_close(zhp);13510return (1);13511}13512}1351313514/*13515* Loop over all activities that we are supposed to wait for until none13516* of them are in progress. Note that this means we can end up waiting13517* for more activities to complete than just those that were in progress13518* when we began waiting; if an activity we are interested in begins13519* while we are waiting for another activity, we will wait for both to13520* complete before exiting.13521*/13522for (;;) {13523boolean_t missing = B_FALSE;13524boolean_t any_waited = B_FALSE;1352513526for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {13527boolean_t waited;1352813529if (!wd.wd_enabled[i])13530continue;1353113532error = zpool_wait_status(zhp, i, &missing, &waited);13533if (error != 0 || missing)13534break;1353513536any_waited = (any_waited || waited);13537}1353813539if (error != 0 || missing || !any_waited)13540break;13541}1354213543zpool_close(zhp);1354413545if (verbose) {13546uintptr_t status;13547pthread_mutex_lock(&wd.wd_mutex);13548wd.wd_should_exit = B_TRUE;13549pthread_cond_signal(&wd.wd_cv);13550pthread_mutex_unlock(&wd.wd_mutex);13551(void) pthread_join(status_thr, (void *)&status);13552if (status != 0)13553error = status;13554}1355513556pthread_mutex_destroy(&wd.wd_mutex);13557pthread_cond_destroy(&wd.wd_cv);13558return (error);13559}1356013561/*13562* zpool ddtprune -d|-p <amount> <pool>13563*13564* -d <days> Prune entries <days> old and older13565* -p <percent> Prune <percent> amount of entries13566*13567* Prune single reference entries from DDT to satisfy the amount specified.13568*/13569int13570zpool_do_ddt_prune(int argc, char **argv)13571{13572zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;13573uint64_t amount = 0;13574zpool_handle_t *zhp;13575char *endptr;13576int c;1357713578while ((c = getopt(argc, argv, "d:p:")) != -1) {13579switch (c) {13580case 'd':13581if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {13582(void) fprintf(stderr, gettext("-d cannot be "13583"combined with -p option\n"));13584usage(B_FALSE);13585}13586errno = 0;13587amount = strtoull(optarg, &endptr, 0);13588if (errno != 0 || *endptr != '\0' || amount == 0) {13589(void) fprintf(stderr,13590gettext("invalid days value\n"));13591usage(B_FALSE);13592}13593amount *= 86400; /* convert days to seconds */13594unit = ZPOOL_DDT_PRUNE_AGE;13595break;13596case 'p':13597if (unit == ZPOOL_DDT_PRUNE_AGE) {13598(void) fprintf(stderr, gettext("-p cannot be "13599"combined with -d option\n"));13600usage(B_FALSE);13601}13602errno = 0;13603amount = strtoull(optarg, &endptr, 0);13604if (errno != 0 || *endptr != '\0' ||13605amount == 0 || amount > 100) {13606(void) fprintf(stderr,13607gettext("invalid percentage value\n"));13608usage(B_FALSE);13609}13610unit = ZPOOL_DDT_PRUNE_PERCENTAGE;13611break;13612case '?':13613(void) fprintf(stderr, gettext("invalid option '%c'\n"),13614optopt);13615usage(B_FALSE);13616}13617}13618argc -= optind;13619argv += optind;1362013621if (unit == ZPOOL_DDT_PRUNE_NONE) {13622(void) fprintf(stderr,13623gettext("missing amount option (-d|-p <value>)\n"));13624usage(B_FALSE);13625} else if (argc < 1) {13626(void) fprintf(stderr, gettext("missing pool argument\n"));13627usage(B_FALSE);13628} else if (argc > 1) {13629(void) fprintf(stderr, gettext("too many arguments\n"));13630usage(B_FALSE);13631}13632zhp = zpool_open(g_zfs, argv[0]);13633if (zhp == NULL)13634return (-1);1363513636int error = zpool_ddt_prune(zhp, unit, amount);1363713638zpool_close(zhp);1363913640return (error);13641}1364213643static int13644find_command_idx(const char *command, int *idx)13645{13646for (int i = 0; i < NCOMMAND; ++i) {13647if (command_table[i].name == NULL)13648continue;1364913650if (strcmp(command, command_table[i].name) == 0) {13651*idx = i;13652return (0);13653}13654}13655return (1);13656}1365713658/*13659* Display version message13660*/13661static int13662zpool_do_version(int argc, char **argv)13663{13664int c;13665nvlist_t *jsobj = NULL, *zfs_ver = NULL;13666boolean_t json = B_FALSE;1366713668struct option long_options[] = {13669{"json", no_argument, NULL, 'j'},13670};1367113672while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {13673switch (c) {13674case 'j':13675json = B_TRUE;13676jsobj = zpool_json_schema(0, 1);13677break;13678case '?':13679(void) fprintf(stderr, gettext("invalid option '%c'\n"),13680optopt);13681usage(B_FALSE);13682}13683}1368413685argc -= optind;13686if (argc != 0) {13687(void) fprintf(stderr, "too many arguments\n");13688usage(B_FALSE);13689}1369013691if (json) {13692zfs_ver = zfs_version_nvlist();13693if (zfs_ver) {13694fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);13695zcmd_print_json(jsobj);13696fnvlist_free(zfs_ver);13697return (0);13698} else13699return (-1);13700} else13701return (zfs_version_print() != 0);13702}1370313704/* Display documentation */13705static int13706zpool_do_help(int argc, char **argv)13707{13708char page[MAXNAMELEN];13709if (argc < 3 || strcmp(argv[2], "zpool") == 0)13710strcpy(page, "zpool");13711else if (strcmp(argv[2], "concepts") == 0 ||13712strcmp(argv[2], "props") == 0)13713snprintf(page, sizeof (page), "zpool%s", argv[2]);13714else13715snprintf(page, sizeof (page), "zpool-%s", argv[2]);1371613717execlp("man", "man", page, NULL);1371813719fprintf(stderr, "couldn't run man program: %s", strerror(errno));13720return (-1);13721}1372213723/*13724* Do zpool_load_compat() and print error message on failure13725*/13726static zpool_compat_status_t13727zpool_do_load_compat(const char *compat, boolean_t *list)13728{13729char report[1024];1373013731zpool_compat_status_t ret;1373213733ret = zpool_load_compat(compat, list, report, 1024);13734switch (ret) {1373513736case ZPOOL_COMPATIBILITY_OK:13737break;1373813739case ZPOOL_COMPATIBILITY_NOFILES:13740case ZPOOL_COMPATIBILITY_BADFILE:13741case ZPOOL_COMPATIBILITY_BADTOKEN:13742(void) fprintf(stderr, "Error: %s\n", report);13743break;1374413745case ZPOOL_COMPATIBILITY_WARNTOKEN:13746(void) fprintf(stderr, "Warning: %s\n", report);13747ret = ZPOOL_COMPATIBILITY_OK;13748break;13749}13750return (ret);13751}1375213753int13754main(int argc, char **argv)13755{13756int ret = 0;13757int i = 0;13758char *cmdname;13759char **newargv;1376013761(void) setlocale(LC_ALL, "");13762(void) setlocale(LC_NUMERIC, "C");13763(void) textdomain(TEXT_DOMAIN);13764srand(time(NULL));1376513766opterr = 0;1376713768/*13769* Make sure the user has specified some command.13770*/13771if (argc < 2) {13772(void) fprintf(stderr, gettext("missing command\n"));13773usage(B_FALSE);13774}1377513776cmdname = argv[1];1377713778/*13779* Special case '-?'13780*/13781if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)13782usage(B_TRUE);1378313784/*13785* Special case '-V|--version'13786*/13787if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))13788return (zfs_version_print() != 0);1378913790/*13791* Special case 'help'13792*/13793if (strcmp(cmdname, "help") == 0)13794return (zpool_do_help(argc, argv));1379513796if ((g_zfs = libzfs_init()) == NULL) {13797(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));13798return (1);13799}1380013801libzfs_print_on_error(g_zfs, B_TRUE);1380213803zfs_save_arguments(argc, argv, history_str, sizeof (history_str));1380413805/*13806* Many commands modify input strings for string parsing reasons.13807* We create a copy to protect the original argv.13808*/13809newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));13810for (i = 0; i < argc; i++)13811newargv[i] = strdup(argv[i]);13812newargv[argc] = NULL;1381313814/*13815* Run the appropriate command.13816*/13817if (find_command_idx(cmdname, &i) == 0) {13818current_command = &command_table[i];13819ret = command_table[i].func(argc - 1, newargv + 1);13820} else if (strchr(cmdname, '=')) {13821verify(find_command_idx("set", &i) == 0);13822current_command = &command_table[i];13823ret = command_table[i].func(argc, newargv);13824} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {13825/*13826* 'freeze' is a vile debugging abomination, so we treat13827* it as such.13828*/13829zfs_cmd_t zc = {"\0"};1383013831(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));13832ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);13833if (ret != 0) {13834(void) fprintf(stderr,13835gettext("failed to freeze pool: %d\n"), errno);13836ret = 1;13837}1383813839log_history = 0;13840} else {13841(void) fprintf(stderr, gettext("unrecognized "13842"command '%s'\n"), cmdname);13843usage(B_FALSE);13844ret = 1;13845}1384613847for (i = 0; i < argc; i++)13848free(newargv[i]);13849free(newargv);1385013851if (ret == 0 && log_history)13852(void) zpool_log_history(g_zfs, history_str);1385313854libzfs_fini(g_zfs);1385513856/*13857* The 'ZFS_ABORT' environment variable causes us to dump core on exit13858* for the purposes of running ::findleaks.13859*/13860if (getenv("ZFS_ABORT") != NULL) {13861(void) printf("dumping core by request\n");13862abort();13863}1386413865return (ret);13866}138671386813869