Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/cmd/zpool/zpool_main.c
108453 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
23
/*
24
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26
* Copyright (c) 2011, 2024 by Delphix. All rights reserved.
27
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
28
* Copyright (c) 2012 by Cyril Plisko. All rights reserved.
29
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
30
* Copyright 2016 Igor Kozhukhov <[email protected]>.
31
* Copyright (c) 2017 Datto Inc.
32
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
33
* Copyright (c) 2017, Intel Corporation.
34
* Copyright (c) 2019, loli10K <[email protected]>
35
* Copyright (c) 2021, Colm Buckley <[email protected]>
36
* Copyright (c) 2021, 2023, 2025, Klara, Inc.
37
* Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.
38
*/
39
40
#include <assert.h>
41
#include <ctype.h>
42
#include <dirent.h>
43
#include <errno.h>
44
#include <fcntl.h>
45
#include <getopt.h>
46
#include <inttypes.h>
47
#include <libgen.h>
48
#include <libintl.h>
49
#include <locale.h>
50
#include <pthread.h>
51
#include <stdio.h>
52
#include <stdlib.h>
53
#include <string.h>
54
#include <termios.h>
55
#include <time.h>
56
#include <unistd.h>
57
#include <pwd.h>
58
#include <zone.h>
59
#include <sys/wait.h>
60
#include <zfs_prop.h>
61
#include <sys/fs/zfs.h>
62
#include <sys/stat.h>
63
#include <sys/systeminfo.h>
64
#include <sys/fm/fs/zfs.h>
65
#include <sys/fm/util.h>
66
#include <sys/fm/protocol.h>
67
#include <sys/zfs_ioctl.h>
68
#include <sys/mount.h>
69
#include <sys/sysmacros.h>
70
#include <string.h>
71
#include <math.h>
72
73
#include <libzfs.h>
74
#include <libzutil.h>
75
76
#include "zpool_util.h"
77
#include "zfs_comutil.h"
78
#include "zfeature_common.h"
79
#include "zfs_valstr.h"
80
81
#include "statcommon.h"
82
83
libzfs_handle_t *g_zfs;
84
85
static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
86
87
static int zpool_do_create(int, char **);
88
static int zpool_do_destroy(int, char **);
89
90
static int zpool_do_add(int, char **);
91
static int zpool_do_remove(int, char **);
92
static int zpool_do_labelclear(int, char **);
93
94
static int zpool_do_checkpoint(int, char **);
95
static int zpool_do_prefetch(int, char **);
96
97
static int zpool_do_list(int, char **);
98
static int zpool_do_iostat(int, char **);
99
static int zpool_do_status(int, char **);
100
101
static int zpool_do_online(int, char **);
102
static int zpool_do_offline(int, char **);
103
static int zpool_do_clear(int, char **);
104
static int zpool_do_reopen(int, char **);
105
106
static int zpool_do_reguid(int, char **);
107
108
static int zpool_do_attach(int, char **);
109
static int zpool_do_detach(int, char **);
110
static int zpool_do_replace(int, char **);
111
static int zpool_do_split(int, char **);
112
113
static int zpool_do_initialize(int, char **);
114
static int zpool_do_scrub(int, char **);
115
static int zpool_do_resilver(int, char **);
116
static int zpool_do_trim(int, char **);
117
118
static int zpool_do_import(int, char **);
119
static int zpool_do_export(int, char **);
120
121
static int zpool_do_upgrade(int, char **);
122
123
static int zpool_do_history(int, char **);
124
static int zpool_do_events(int, char **);
125
126
static int zpool_do_get(int, char **);
127
static int zpool_do_set(int, char **);
128
129
static int zpool_do_sync(int, char **);
130
131
static int zpool_do_version(int, char **);
132
133
static int zpool_do_wait(int, char **);
134
135
static int zpool_do_ddt_prune(int, char **);
136
137
static int zpool_do_help(int argc, char **argv);
138
139
static zpool_compat_status_t zpool_do_load_compat(
140
const char *, boolean_t *);
141
142
enum zpool_options {
143
ZPOOL_OPTION_POWER = 1024,
144
ZPOOL_OPTION_ALLOW_INUSE,
145
ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH,
146
ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH,
147
ZPOOL_OPTION_POOL_KEY_GUID,
148
ZPOOL_OPTION_JSON_NUMS_AS_INT,
149
ZPOOL_OPTION_JSON_FLAT_VDEVS
150
};
151
152
/*
153
* These libumem hooks provide a reasonable set of defaults for the allocator's
154
* debugging facilities.
155
*/
156
157
#ifdef DEBUG
158
const char *
159
_umem_debug_init(void)
160
{
161
return ("default,verbose"); /* $UMEM_DEBUG setting */
162
}
163
164
const char *
165
_umem_logging_init(void)
166
{
167
return ("fail,contents"); /* $UMEM_LOGGING setting */
168
}
169
#endif
170
171
typedef enum {
172
HELP_ADD,
173
HELP_ATTACH,
174
HELP_CLEAR,
175
HELP_CREATE,
176
HELP_CHECKPOINT,
177
HELP_DDT_PRUNE,
178
HELP_DESTROY,
179
HELP_DETACH,
180
HELP_EXPORT,
181
HELP_HISTORY,
182
HELP_IMPORT,
183
HELP_IOSTAT,
184
HELP_LABELCLEAR,
185
HELP_LIST,
186
HELP_OFFLINE,
187
HELP_ONLINE,
188
HELP_PREFETCH,
189
HELP_REPLACE,
190
HELP_REMOVE,
191
HELP_INITIALIZE,
192
HELP_SCRUB,
193
HELP_RESILVER,
194
HELP_TRIM,
195
HELP_STATUS,
196
HELP_UPGRADE,
197
HELP_EVENTS,
198
HELP_GET,
199
HELP_SET,
200
HELP_SPLIT,
201
HELP_SYNC,
202
HELP_REGUID,
203
HELP_REOPEN,
204
HELP_VERSION,
205
HELP_WAIT
206
} zpool_help_t;
207
208
209
/*
210
* Flags for stats to display with "zpool iostats"
211
*/
212
enum iostat_type {
213
IOS_DEFAULT = 0,
214
IOS_LATENCY = 1,
215
IOS_QUEUES = 2,
216
IOS_L_HISTO = 3,
217
IOS_RQ_HISTO = 4,
218
IOS_COUNT, /* always last element */
219
};
220
221
/* iostat_type entries as bitmasks */
222
#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
223
#define IOS_LATENCY_M (1ULL << IOS_LATENCY)
224
#define IOS_QUEUES_M (1ULL << IOS_QUEUES)
225
#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
226
#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
227
228
/* Mask of all the histo bits */
229
#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
230
231
/*
232
* Lookup table for iostat flags to nvlist names. Basically a list
233
* of all the nvlists a flag requires. Also specifies the order in
234
* which data gets printed in zpool iostat.
235
*/
236
static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
237
[IOS_L_HISTO] = {
238
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
239
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
240
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
241
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
242
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
243
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
244
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
245
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
246
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
247
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
248
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
249
NULL},
250
[IOS_LATENCY] = {
251
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
252
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
253
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
254
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
255
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
256
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
257
NULL},
258
[IOS_QUEUES] = {
259
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
260
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
261
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
262
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
263
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
264
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
265
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
266
NULL},
267
[IOS_RQ_HISTO] = {
268
ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
269
ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
270
ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
271
ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
272
ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
273
ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
274
ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
275
ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
276
ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
277
ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
278
ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
279
ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
280
ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
281
ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
282
NULL},
283
};
284
285
static const char *pool_scan_func_str[] = {
286
"NONE",
287
"SCRUB",
288
"RESILVER",
289
"ERRORSCRUB"
290
};
291
292
static const char *pool_scan_state_str[] = {
293
"NONE",
294
"SCANNING",
295
"FINISHED",
296
"CANCELED",
297
"ERRORSCRUBBING"
298
};
299
300
static const char *vdev_rebuild_state_str[] = {
301
"NONE",
302
"ACTIVE",
303
"CANCELED",
304
"COMPLETE"
305
};
306
307
static const char *checkpoint_state_str[] = {
308
"NONE",
309
"EXISTS",
310
"DISCARDING"
311
};
312
313
static const char *vdev_state_str[] = {
314
"UNKNOWN",
315
"CLOSED",
316
"OFFLINE",
317
"REMOVED",
318
"CANT_OPEN",
319
"FAULTED",
320
"DEGRADED",
321
"ONLINE"
322
};
323
324
static const char *vdev_aux_str[] = {
325
"NONE",
326
"OPEN_FAILED",
327
"CORRUPT_DATA",
328
"NO_REPLICAS",
329
"BAD_GUID_SUM",
330
"TOO_SMALL",
331
"BAD_LABEL",
332
"VERSION_NEWER",
333
"VERSION_OLDER",
334
"UNSUP_FEAT",
335
"SPARED",
336
"ERR_EXCEEDED",
337
"IO_FAILURE",
338
"BAD_LOG",
339
"EXTERNAL",
340
"SPLIT_POOL",
341
"BAD_ASHIFT",
342
"EXTERNAL_PERSIST",
343
"ACTIVE",
344
"CHILDREN_OFFLINE",
345
"ASHIFT_TOO_BIG"
346
};
347
348
static const char *vdev_init_state_str[] = {
349
"NONE",
350
"ACTIVE",
351
"CANCELED",
352
"SUSPENDED",
353
"COMPLETE"
354
};
355
356
static const char *vdev_trim_state_str[] = {
357
"NONE",
358
"ACTIVE",
359
"CANCELED",
360
"SUSPENDED",
361
"COMPLETE"
362
};
363
364
#define ZFS_NICE_TIMESTAMP 100
365
366
/*
367
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
368
* Right now, only one histo bit is ever set at one time, so we can
369
* just do a highbit64(a)
370
*/
371
#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
372
373
typedef struct zpool_command {
374
const char *name;
375
int (*func)(int, char **);
376
zpool_help_t usage;
377
} zpool_command_t;
378
379
/*
380
* Master command table. Each ZFS command has a name, associated function, and
381
* usage message. The usage messages need to be internationalized, so we have
382
* to have a function to return the usage message based on a command index.
383
*
384
* These commands are organized according to how they are displayed in the usage
385
* message. An empty command (one with a NULL name) indicates an empty line in
386
* the generic usage message.
387
*/
388
static zpool_command_t command_table[] = {
389
{ "version", zpool_do_version, HELP_VERSION },
390
{ NULL },
391
{ "create", zpool_do_create, HELP_CREATE },
392
{ "destroy", zpool_do_destroy, HELP_DESTROY },
393
{ NULL },
394
{ "add", zpool_do_add, HELP_ADD },
395
{ "remove", zpool_do_remove, HELP_REMOVE },
396
{ NULL },
397
{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
398
{ NULL },
399
{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
400
{ "prefetch", zpool_do_prefetch, HELP_PREFETCH },
401
{ NULL },
402
{ "list", zpool_do_list, HELP_LIST },
403
{ "iostat", zpool_do_iostat, HELP_IOSTAT },
404
{ "status", zpool_do_status, HELP_STATUS },
405
{ NULL },
406
{ "online", zpool_do_online, HELP_ONLINE },
407
{ "offline", zpool_do_offline, HELP_OFFLINE },
408
{ "clear", zpool_do_clear, HELP_CLEAR },
409
{ "reopen", zpool_do_reopen, HELP_REOPEN },
410
{ NULL },
411
{ "attach", zpool_do_attach, HELP_ATTACH },
412
{ "detach", zpool_do_detach, HELP_DETACH },
413
{ "replace", zpool_do_replace, HELP_REPLACE },
414
{ "split", zpool_do_split, HELP_SPLIT },
415
{ NULL },
416
{ "initialize", zpool_do_initialize, HELP_INITIALIZE },
417
{ "resilver", zpool_do_resilver, HELP_RESILVER },
418
{ "scrub", zpool_do_scrub, HELP_SCRUB },
419
{ "trim", zpool_do_trim, HELP_TRIM },
420
{ NULL },
421
{ "import", zpool_do_import, HELP_IMPORT },
422
{ "export", zpool_do_export, HELP_EXPORT },
423
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },
424
{ "reguid", zpool_do_reguid, HELP_REGUID },
425
{ NULL },
426
{ "history", zpool_do_history, HELP_HISTORY },
427
{ "events", zpool_do_events, HELP_EVENTS },
428
{ NULL },
429
{ "get", zpool_do_get, HELP_GET },
430
{ "set", zpool_do_set, HELP_SET },
431
{ "sync", zpool_do_sync, HELP_SYNC },
432
{ NULL },
433
{ "wait", zpool_do_wait, HELP_WAIT },
434
{ NULL },
435
{ "ddtprune", zpool_do_ddt_prune, HELP_DDT_PRUNE },
436
};
437
438
#define NCOMMAND (ARRAY_SIZE(command_table))
439
440
#define VDEV_ALLOC_CLASS_LOGS "logs"
441
442
#define MAX_CMD_LEN 256
443
444
static zpool_command_t *current_command;
445
static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
446
static char history_str[HIS_MAX_RECORD_LEN];
447
static boolean_t log_history = B_TRUE;
448
static uint_t timestamp_fmt = NODATE;
449
450
static const char *
451
get_usage(zpool_help_t idx)
452
{
453
switch (idx) {
454
case HELP_ADD:
455
return (gettext("\tadd [-afgLnP] [-o property=value] "
456
"<pool> <vdev> ...\n"));
457
case HELP_ATTACH:
458
return (gettext("\tattach [-fsw] [-o property=value] "
459
"<pool> <vdev> <new-device>\n"));
460
case HELP_CLEAR:
461
return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
462
case HELP_CREATE:
463
return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
464
"\t [-O file-system-property=value] ... \n"
465
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
466
case HELP_CHECKPOINT:
467
return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
468
case HELP_DESTROY:
469
return (gettext("\tdestroy [-f] <pool>\n"));
470
case HELP_DETACH:
471
return (gettext("\tdetach <pool> <device>\n"));
472
case HELP_EXPORT:
473
return (gettext("\texport [-af] <pool> ...\n"));
474
case HELP_HISTORY:
475
return (gettext("\thistory [-il] [<pool>] ...\n"));
476
case HELP_IMPORT:
477
return (gettext("\timport [-d dir] [-D]\n"
478
"\timport [-o mntopts] [-o property=value] ... \n"
479
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
480
"[-R root] [-F [-n]] -a\n"
481
"\timport [-o mntopts] [-o property=value] ... \n"
482
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
483
"[-R root] [-F [-n]]\n"
484
"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
485
case HELP_IOSTAT:
486
return (gettext("\tiostat [[[-c [script1,script2,...]"
487
"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
488
"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
489
" [[-n] interval [count]]\n"));
490
case HELP_LABELCLEAR:
491
return (gettext("\tlabelclear [-f] <vdev>\n"));
492
case HELP_LIST:
493
return (gettext("\tlist [-gHLpPv] [-o property[,...]] [-j "
494
"[--json-int, --json-pool-key-guid]] ...\n"
495
"\t [-T d|u] [pool] [interval [count]]\n"));
496
case HELP_PREFETCH:
497
return (gettext("\tprefetch [-t <type>] <pool>\n"));
498
case HELP_OFFLINE:
499
return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
500
"<device> ...\n"));
501
case HELP_ONLINE:
502
return (gettext("\tonline [--power][-e] <pool> <device> "
503
"...\n"));
504
case HELP_REPLACE:
505
return (gettext("\treplace [-fsw] [-o property=value] "
506
"<pool> <device> [new-device]\n"));
507
case HELP_REMOVE:
508
return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
509
case HELP_REOPEN:
510
return (gettext("\treopen [-n] <pool>\n"));
511
case HELP_INITIALIZE:
512
return (gettext("\tinitialize [-c | -s | -u] [-w] <-a | <pool> "
513
"[<device> ...]>\n"));
514
case HELP_SCRUB:
515
return (gettext("\tscrub [-e | -s | -p | -C | -E | -S] [-w] "
516
"<-a | <pool> [<pool> ...]>\n"));
517
case HELP_RESILVER:
518
return (gettext("\tresilver <pool> ...\n"));
519
case HELP_TRIM:
520
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] "
521
"<-a | <pool> [<device> ...]>\n"));
522
case HELP_STATUS:
523
return (gettext("\tstatus [-DdegiLPpstvx] "
524
"[-c script1[,script2,...]] ...\n"
525
"\t [-j|--json [--json-flat-vdevs] [--json-int] "
526
"[--json-pool-key-guid]] ...\n"
527
"\t [-T d|u] [--power] [pool] [interval [count]]\n"));
528
case HELP_UPGRADE:
529
return (gettext("\tupgrade\n"
530
"\tupgrade -v\n"
531
"\tupgrade [-V version] <-a | pool ...>\n"));
532
case HELP_EVENTS:
533
return (gettext("\tevents [-vHf [pool] | -c]\n"));
534
case HELP_GET:
535
return (gettext("\tget [-Hp] [-j [--json-int, "
536
"--json-pool-key-guid]] ...\n"
537
"\t [-o \"all\" | field[,...]] "
538
"<\"all\" | property[,...]> <pool> ...\n"));
539
case HELP_SET:
540
return (gettext("\tset <property=value> <pool>\n"
541
"\tset <vdev_property=value> <pool> <vdev>\n"));
542
case HELP_SPLIT:
543
return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
544
"\t [-o property=value] <pool> <newpool> "
545
"[<device> ...]\n"));
546
case HELP_REGUID:
547
return (gettext("\treguid [-g guid] <pool>\n"));
548
case HELP_SYNC:
549
return (gettext("\tsync [pool] ...\n"));
550
case HELP_VERSION:
551
return (gettext("\tversion [-j]\n"));
552
case HELP_WAIT:
553
return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
554
"<pool> [interval]\n"));
555
case HELP_DDT_PRUNE:
556
return (gettext("\tddtprune -d|-p <amount> <pool>\n"));
557
default:
558
__builtin_unreachable();
559
}
560
}
561
562
/*
563
* Callback routine that will print out a pool property value.
564
*/
565
static int
566
print_pool_prop_cb(int prop, void *cb)
567
{
568
FILE *fp = cb;
569
570
(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
571
572
if (zpool_prop_readonly(prop))
573
(void) fprintf(fp, " NO ");
574
else
575
(void) fprintf(fp, " YES ");
576
577
if (zpool_prop_values(prop) == NULL)
578
(void) fprintf(fp, "-\n");
579
else
580
(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
581
582
return (ZPROP_CONT);
583
}
584
585
/*
586
* Callback routine that will print out a vdev property value.
587
*/
588
static int
589
print_vdev_prop_cb(int prop, void *cb)
590
{
591
FILE *fp = cb;
592
593
(void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
594
595
if (vdev_prop_readonly(prop))
596
(void) fprintf(fp, " NO ");
597
else
598
(void) fprintf(fp, " YES ");
599
600
if (vdev_prop_values(prop) == NULL)
601
(void) fprintf(fp, "-\n");
602
else
603
(void) fprintf(fp, "%s\n", vdev_prop_values(prop));
604
605
return (ZPROP_CONT);
606
}
607
608
/*
609
* Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like
610
* '/dev/disk/by-vdev/L5'.
611
*/
612
static const char *
613
vdev_name_to_path(zpool_handle_t *zhp, char *vdev)
614
{
615
nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL);
616
if (vdev_nv == NULL) {
617
return (NULL);
618
}
619
return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH));
620
}
621
622
static int
623
zpool_power_on(zpool_handle_t *zhp, char *vdev)
624
{
625
return (zpool_power(zhp, vdev, B_TRUE));
626
}
627
628
static int
629
zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
630
{
631
int rc;
632
633
rc = zpool_power_on(zhp, vdev);
634
if (rc != 0)
635
return (rc);
636
637
(void) zpool_disk_wait(vdev_name_to_path(zhp, vdev));
638
639
return (0);
640
}
641
642
static int
643
zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
644
{
645
nvlist_t *nv;
646
const char *path = NULL;
647
int rc;
648
649
/* Power up all the devices first */
650
FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
651
path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
652
if (path != NULL) {
653
rc = zpool_power_on(zhp, (char *)path);
654
if (rc != 0) {
655
return (rc);
656
}
657
}
658
}
659
660
/*
661
* Wait for their devices to show up. Since we powered them on
662
* at roughly the same time, they should all come online around
663
* the same time.
664
*/
665
FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
666
path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
667
(void) zpool_disk_wait(path);
668
}
669
670
return (0);
671
}
672
673
static int
674
zpool_power_off(zpool_handle_t *zhp, char *vdev)
675
{
676
return (zpool_power(zhp, vdev, B_FALSE));
677
}
678
679
/*
680
* Display usage message. If we're inside a command, display only the usage for
681
* that command. Otherwise, iterate over the entire command table and display
682
* a complete usage message.
683
*/
684
static __attribute__((noreturn)) void
685
usage(boolean_t requested)
686
{
687
FILE *fp = requested ? stdout : stderr;
688
689
if (current_command == NULL) {
690
int i;
691
692
(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
693
(void) fprintf(fp,
694
gettext("where 'command' is one of the following:\n\n"));
695
696
for (i = 0; i < NCOMMAND; i++) {
697
if (command_table[i].name == NULL)
698
(void) fprintf(fp, "\n");
699
else
700
(void) fprintf(fp, "%s",
701
get_usage(command_table[i].usage));
702
}
703
704
(void) fprintf(fp,
705
gettext("\nFor further help on a command or topic, "
706
"run: %s\n"), "zpool help [<topic>]");
707
} else {
708
(void) fprintf(fp, gettext("usage:\n"));
709
(void) fprintf(fp, "%s", get_usage(current_command->usage));
710
}
711
712
if (current_command != NULL &&
713
current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
714
((strcmp(current_command->name, "set") == 0) ||
715
(strcmp(current_command->name, "get") == 0) ||
716
(strcmp(current_command->name, "list") == 0))) {
717
718
(void) fprintf(fp, "%s",
719
gettext("\nthe following properties are supported:\n"));
720
721
(void) fprintf(fp, "\n\t%-19s %s %s\n\n",
722
"PROPERTY", "EDIT", "VALUES");
723
724
/* Iterate over all properties */
725
if (current_prop_type == ZFS_TYPE_POOL) {
726
(void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
727
B_TRUE, current_prop_type);
728
729
(void) fprintf(fp, "\t%-19s ", "feature@...");
730
(void) fprintf(fp, "YES "
731
"disabled | enabled | active\n");
732
733
(void) fprintf(fp, gettext("\nThe feature@ properties "
734
"must be appended with a feature name.\n"
735
"See zpool-features(7).\n"));
736
} else if (current_prop_type == ZFS_TYPE_VDEV) {
737
(void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
738
B_TRUE, current_prop_type);
739
}
740
}
741
742
/*
743
* See comments at end of main().
744
*/
745
if (getenv("ZFS_ABORT") != NULL) {
746
(void) printf("dumping core by request\n");
747
abort();
748
}
749
750
exit(requested ? 0 : 2);
751
}
752
753
/*
754
* zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...]
755
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
756
* if none specified.
757
*
758
* -a Use all pools.
759
* -c Cancel. Ends active initializing.
760
* -s Suspend. Initializing can then be restarted with no flags.
761
* -u Uninitialize. Clears initialization state.
762
* -w Wait. Blocks until initializing has completed.
763
*/
764
int
765
zpool_do_initialize(int argc, char **argv)
766
{
767
int c;
768
char *poolname;
769
zpool_handle_t *zhp;
770
int err = 0;
771
boolean_t wait = B_FALSE;
772
boolean_t initialize_all = B_FALSE;
773
774
struct option long_options[] = {
775
{"cancel", no_argument, NULL, 'c'},
776
{"suspend", no_argument, NULL, 's'},
777
{"uninit", no_argument, NULL, 'u'},
778
{"wait", no_argument, NULL, 'w'},
779
{"all", no_argument, NULL, 'a'},
780
{0, 0, 0, 0}
781
};
782
783
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
784
while ((c = getopt_long(argc, argv, "acsuw", long_options,
785
NULL)) != -1) {
786
switch (c) {
787
case 'a':
788
initialize_all = B_TRUE;
789
break;
790
case 'c':
791
if (cmd_type != POOL_INITIALIZE_START &&
792
cmd_type != POOL_INITIALIZE_CANCEL) {
793
(void) fprintf(stderr, gettext("-c cannot be "
794
"combined with other options\n"));
795
usage(B_FALSE);
796
}
797
cmd_type = POOL_INITIALIZE_CANCEL;
798
break;
799
case 's':
800
if (cmd_type != POOL_INITIALIZE_START &&
801
cmd_type != POOL_INITIALIZE_SUSPEND) {
802
(void) fprintf(stderr, gettext("-s cannot be "
803
"combined with other options\n"));
804
usage(B_FALSE);
805
}
806
cmd_type = POOL_INITIALIZE_SUSPEND;
807
break;
808
case 'u':
809
if (cmd_type != POOL_INITIALIZE_START &&
810
cmd_type != POOL_INITIALIZE_UNINIT) {
811
(void) fprintf(stderr, gettext("-u cannot be "
812
"combined with other options\n"));
813
usage(B_FALSE);
814
}
815
cmd_type = POOL_INITIALIZE_UNINIT;
816
break;
817
case 'w':
818
wait = B_TRUE;
819
break;
820
case '?':
821
if (optopt != 0) {
822
(void) fprintf(stderr,
823
gettext("invalid option '%c'\n"), optopt);
824
} else {
825
(void) fprintf(stderr,
826
gettext("invalid option '%s'\n"),
827
argv[optind - 1]);
828
}
829
usage(B_FALSE);
830
}
831
}
832
833
argc -= optind;
834
argv += optind;
835
836
initialize_cbdata_t cbdata = {
837
.wait = wait,
838
.cmd_type = cmd_type
839
};
840
841
if (initialize_all && argc > 0) {
842
(void) fprintf(stderr, gettext("-a cannot be combined with "
843
"individual pools or vdevs\n"));
844
usage(B_FALSE);
845
}
846
847
if (argc < 1 && !initialize_all) {
848
(void) fprintf(stderr, gettext("missing pool name argument\n"));
849
usage(B_FALSE);
850
}
851
852
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
853
(void) fprintf(stderr, gettext("-w cannot be used with -c, -s"
854
"or -u\n"));
855
usage(B_FALSE);
856
}
857
858
if (argc == 0 && initialize_all) {
859
/* Initilize each pool recursively */
860
err = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
861
B_FALSE, zpool_initialize_one, &cbdata);
862
return (err);
863
} else if (argc == 1) {
864
/* no individual leaf vdevs specified, initialize the pool */
865
poolname = argv[0];
866
zhp = zpool_open(g_zfs, poolname);
867
if (zhp == NULL)
868
return (-1);
869
err = zpool_initialize_one(zhp, &cbdata);
870
} else {
871
/* individual leaf vdevs specified, initialize them */
872
poolname = argv[0];
873
zhp = zpool_open(g_zfs, poolname);
874
if (zhp == NULL)
875
return (-1);
876
nvlist_t *vdevs = fnvlist_alloc();
877
for (int i = 1; i < argc; i++) {
878
fnvlist_add_boolean(vdevs, argv[i]);
879
}
880
if (wait)
881
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
882
else
883
err = zpool_initialize(zhp, cmd_type, vdevs);
884
fnvlist_free(vdevs);
885
}
886
887
zpool_close(zhp);
888
889
return (err);
890
}
891
892
/*
893
* print a pool vdev config for dry runs
894
*/
895
static void
896
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
897
const char *match, int name_flags)
898
{
899
nvlist_t **child;
900
uint_t c, children;
901
char *vname;
902
boolean_t printed = B_FALSE;
903
904
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
905
&child, &children) != 0) {
906
if (name != NULL)
907
(void) printf("\t%*s%s\n", indent, "", name);
908
return;
909
}
910
911
for (c = 0; c < children; c++) {
912
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
913
const char *class = "";
914
915
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
916
&is_hole);
917
918
if (is_hole == B_TRUE) {
919
continue;
920
}
921
922
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
923
&is_log);
924
if (is_log)
925
class = VDEV_ALLOC_BIAS_LOG;
926
(void) nvlist_lookup_string(child[c],
927
ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
928
if (strcmp(match, class) != 0)
929
continue;
930
931
if (!printed && name != NULL) {
932
(void) printf("\t%*s%s\n", indent, "", name);
933
printed = B_TRUE;
934
}
935
vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
936
print_vdev_tree(zhp, vname, child[c], indent + 2, "",
937
name_flags);
938
free(vname);
939
}
940
}
941
942
/*
943
* Print the list of l2cache devices for dry runs.
944
*/
945
static void
946
print_cache_list(nvlist_t *nv, int indent)
947
{
948
nvlist_t **child;
949
uint_t c, children;
950
951
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
952
&child, &children) == 0 && children > 0) {
953
(void) printf("\t%*s%s\n", indent, "", "cache");
954
} else {
955
return;
956
}
957
for (c = 0; c < children; c++) {
958
char *vname;
959
960
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
961
(void) printf("\t%*s%s\n", indent + 2, "", vname);
962
free(vname);
963
}
964
}
965
966
/*
967
* Print the list of spares for dry runs.
968
*/
969
static void
970
print_spare_list(nvlist_t *nv, int indent)
971
{
972
nvlist_t **child;
973
uint_t c, children;
974
975
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
976
&child, &children) == 0 && children > 0) {
977
(void) printf("\t%*s%s\n", indent, "", "spares");
978
} else {
979
return;
980
}
981
for (c = 0; c < children; c++) {
982
char *vname;
983
984
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
985
(void) printf("\t%*s%s\n", indent + 2, "", vname);
986
free(vname);
987
}
988
}
989
990
typedef struct spare_cbdata {
991
uint64_t cb_guid;
992
zpool_handle_t *cb_zhp;
993
} spare_cbdata_t;
994
995
static boolean_t
996
find_vdev(nvlist_t *nv, uint64_t search)
997
{
998
uint64_t guid;
999
nvlist_t **child;
1000
uint_t c, children;
1001
1002
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
1003
search == guid)
1004
return (B_TRUE);
1005
1006
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1007
&child, &children) == 0) {
1008
for (c = 0; c < children; c++)
1009
if (find_vdev(child[c], search))
1010
return (B_TRUE);
1011
}
1012
1013
return (B_FALSE);
1014
}
1015
1016
static int
1017
find_spare(zpool_handle_t *zhp, void *data)
1018
{
1019
spare_cbdata_t *cbp = data;
1020
nvlist_t *config, *nvroot;
1021
1022
config = zpool_get_config(zhp, NULL);
1023
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1024
&nvroot) == 0);
1025
1026
if (find_vdev(nvroot, cbp->cb_guid)) {
1027
cbp->cb_zhp = zhp;
1028
return (1);
1029
}
1030
1031
zpool_close(zhp);
1032
return (0);
1033
}
1034
1035
static void
1036
nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
1037
boolean_t literal, boolean_t as_int, int format)
1038
{
1039
char buf[256];
1040
1041
if (literal) {
1042
if (!as_int)
1043
(void) snprintf(buf, 256, "%llu", (u_longlong_t)value);
1044
} else {
1045
switch (format) {
1046
case ZFS_NICENUM_1024:
1047
zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_1024);
1048
break;
1049
case ZFS_NICENUM_BYTES:
1050
zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_BYTES);
1051
break;
1052
case ZFS_NICENUM_TIME:
1053
zfs_nicenum_format(value, buf, 256, ZFS_NICENUM_TIME);
1054
break;
1055
case ZFS_NICE_TIMESTAMP:
1056
format_timestamp(value, buf, 256);
1057
break;
1058
default:
1059
fprintf(stderr, "Invalid number format");
1060
exit(1);
1061
}
1062
}
1063
if (as_int)
1064
fnvlist_add_uint64(item, key, value);
1065
else
1066
fnvlist_add_string(item, key, buf);
1067
}
1068
1069
/*
1070
* Generates an nvlist with output version for every command based on params.
1071
* Purpose of this is to add a version of JSON output, considering the schema
1072
* format might be updated for each command in future.
1073
*
1074
* Schema:
1075
*
1076
* "output_version": {
1077
* "command": string,
1078
* "vers_major": integer,
1079
* "vers_minor": integer,
1080
* }
1081
*/
1082
static nvlist_t *
1083
zpool_json_schema(int maj_v, int min_v)
1084
{
1085
char cmd[MAX_CMD_LEN];
1086
nvlist_t *sch = fnvlist_alloc();
1087
nvlist_t *ov = fnvlist_alloc();
1088
1089
(void) snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
1090
fnvlist_add_string(ov, "command", cmd);
1091
fnvlist_add_uint32(ov, "vers_major", maj_v);
1092
fnvlist_add_uint32(ov, "vers_minor", min_v);
1093
fnvlist_add_nvlist(sch, "output_version", ov);
1094
fnvlist_free(ov);
1095
return (sch);
1096
}
1097
1098
static void
1099
fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
1100
boolean_t as_int)
1101
{
1102
nvlist_t *config = zpool_get_config(zhp, NULL);
1103
uint64_t guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
1104
uint64_t txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);
1105
1106
fnvlist_add_string(list, "name", zpool_get_name(zhp));
1107
if (addtype)
1108
fnvlist_add_string(list, "type", "POOL");
1109
fnvlist_add_string(list, "state", zpool_get_state_str(zhp));
1110
if (as_int) {
1111
if (guid)
1112
fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_GUID, guid);
1113
if (txg)
1114
fnvlist_add_uint64(list, ZPOOL_CONFIG_POOL_TXG, txg);
1115
fnvlist_add_uint64(list, "spa_version", SPA_VERSION);
1116
fnvlist_add_uint64(list, "zpl_version", ZPL_VERSION);
1117
} else {
1118
char value[ZFS_MAXPROPLEN];
1119
if (guid) {
1120
(void) snprintf(value, ZFS_MAXPROPLEN, "%llu",
1121
(u_longlong_t)guid);
1122
fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);
1123
}
1124
if (txg) {
1125
(void) snprintf(value, ZFS_MAXPROPLEN, "%llu",
1126
(u_longlong_t)txg);
1127
fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);
1128
}
1129
fnvlist_add_string(list, "spa_version", SPA_VERSION_STRING);
1130
fnvlist_add_string(list, "zpl_version", ZPL_VERSION_STRING);
1131
}
1132
}
1133
1134
static void
1135
used_by_other(zpool_handle_t *zhp, nvlist_t *nvdev, nvlist_t *list)
1136
{
1137
spare_cbdata_t spare_cb;
1138
verify(nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID,
1139
&spare_cb.cb_guid) == 0);
1140
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
1141
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
1142
zpool_get_name(zhp)) != 0) {
1143
fnvlist_add_string(list, "used_by",
1144
zpool_get_name(spare_cb.cb_zhp));
1145
}
1146
zpool_close(spare_cb.cb_zhp);
1147
}
1148
}
1149
1150
static void
1151
fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
1152
boolean_t addtype, boolean_t as_int)
1153
{
1154
boolean_t l2c = B_FALSE;
1155
const char *path, *phys, *devid, *bias = NULL;
1156
uint64_t hole = 0, log = 0, spare = 0;
1157
vdev_stat_t *vs;
1158
uint_t c;
1159
nvlist_t *nvdev;
1160
nvlist_t *nvdev_parent = NULL;
1161
char *_name;
1162
1163
if (strcmp(name, zpool_get_name(zhp)) != 0)
1164
_name = name;
1165
else
1166
_name = (char *)"root-0";
1167
1168
nvdev = zpool_find_vdev(zhp, _name, NULL, &l2c, NULL);
1169
1170
fnvlist_add_string(list, "name", name);
1171
if (addtype)
1172
fnvlist_add_string(list, "type", "VDEV");
1173
if (nvdev) {
1174
const char *type = fnvlist_lookup_string(nvdev,
1175
ZPOOL_CONFIG_TYPE);
1176
if (type)
1177
fnvlist_add_string(list, "vdev_type", type);
1178
uint64_t guid = fnvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_GUID);
1179
if (guid) {
1180
if (as_int) {
1181
fnvlist_add_uint64(list, "guid", guid);
1182
} else {
1183
char buf[ZFS_MAXPROPLEN];
1184
(void) snprintf(buf, ZFS_MAXPROPLEN, "%llu",
1185
(u_longlong_t)guid);
1186
fnvlist_add_string(list, "guid", buf);
1187
}
1188
}
1189
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PATH, &path) == 0)
1190
fnvlist_add_string(list, "path", path);
1191
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_PHYS_PATH,
1192
&phys) == 0)
1193
fnvlist_add_string(list, "phys_path", phys);
1194
if (nvlist_lookup_string(nvdev, ZPOOL_CONFIG_DEVID,
1195
&devid) == 0)
1196
fnvlist_add_string(list, "devid", devid);
1197
(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_LOG, &log);
1198
(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_SPARE,
1199
&spare);
1200
(void) nvlist_lookup_uint64(nvdev, ZPOOL_CONFIG_IS_HOLE, &hole);
1201
if (hole)
1202
fnvlist_add_string(list, "class", VDEV_TYPE_HOLE);
1203
else if (l2c)
1204
fnvlist_add_string(list, "class", VDEV_TYPE_L2CACHE);
1205
else if (spare)
1206
fnvlist_add_string(list, "class", VDEV_TYPE_SPARE);
1207
else if (log)
1208
fnvlist_add_string(list, "class", VDEV_TYPE_LOG);
1209
else {
1210
(void) nvlist_lookup_string(nvdev,
1211
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
1212
if (bias != NULL)
1213
fnvlist_add_string(list, "class", bias);
1214
else {
1215
nvdev_parent = NULL;
1216
nvdev_parent = zpool_find_parent_vdev(zhp,
1217
_name, NULL, NULL, NULL);
1218
1219
/*
1220
* With a mirrored special device, the parent
1221
* "mirror" vdev will have
1222
* ZPOOL_CONFIG_ALLOCATION_BIAS set to "special"
1223
* not the leaf vdevs. If we're a leaf vdev
1224
* in that case we need to look at our parent
1225
* to see if they're "special" to know if we
1226
* are "special" too.
1227
*/
1228
if (nvdev_parent) {
1229
(void) nvlist_lookup_string(
1230
nvdev_parent,
1231
ZPOOL_CONFIG_ALLOCATION_BIAS,
1232
&bias);
1233
}
1234
if (bias != NULL)
1235
fnvlist_add_string(list, "class", bias);
1236
else
1237
fnvlist_add_string(list, "class",
1238
"normal");
1239
}
1240
}
1241
if (nvlist_lookup_uint64_array(nvdev, ZPOOL_CONFIG_VDEV_STATS,
1242
(uint64_t **)&vs, &c) == 0) {
1243
fnvlist_add_string(list, "state",
1244
vdev_state_str[vs->vs_state]);
1245
}
1246
}
1247
}
1248
1249
static boolean_t
1250
prop_list_contains_feature(nvlist_t *proplist)
1251
{
1252
nvpair_t *nvp;
1253
for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
1254
nvp = nvlist_next_nvpair(proplist, nvp)) {
1255
if (zpool_prop_feature(nvpair_name(nvp)))
1256
return (B_TRUE);
1257
}
1258
return (B_FALSE);
1259
}
1260
1261
/*
1262
* Add a property pair (name, string-value) into a property nvlist.
1263
*/
1264
static int
1265
add_prop_list(const char *propname, const char *propval, nvlist_t **props,
1266
boolean_t poolprop)
1267
{
1268
zpool_prop_t prop = ZPOOL_PROP_INVAL;
1269
nvlist_t *proplist;
1270
const char *normnm;
1271
const char *strval;
1272
1273
if (*props == NULL &&
1274
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
1275
(void) fprintf(stderr,
1276
gettext("internal error: out of memory\n"));
1277
return (1);
1278
}
1279
1280
proplist = *props;
1281
1282
if (poolprop) {
1283
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
1284
const char *cname =
1285
zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
1286
1287
if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
1288
(!zpool_prop_feature(propname) &&
1289
!zpool_prop_vdev(propname))) {
1290
(void) fprintf(stderr, gettext("property '%s' is "
1291
"not a valid pool or vdev property\n"), propname);
1292
return (2);
1293
}
1294
1295
/*
1296
* feature@ properties and version should not be specified
1297
* at the same time.
1298
*/
1299
if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
1300
nvlist_exists(proplist, vname)) ||
1301
(prop == ZPOOL_PROP_VERSION &&
1302
prop_list_contains_feature(proplist))) {
1303
(void) fprintf(stderr, gettext("'feature@' and "
1304
"'version' properties cannot be specified "
1305
"together\n"));
1306
return (2);
1307
}
1308
1309
/*
1310
* if version is specified, only "legacy" compatibility
1311
* may be requested
1312
*/
1313
if ((prop == ZPOOL_PROP_COMPATIBILITY &&
1314
strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
1315
nvlist_exists(proplist, vname)) ||
1316
(prop == ZPOOL_PROP_VERSION &&
1317
nvlist_exists(proplist, cname) &&
1318
strcmp(fnvlist_lookup_string(proplist, cname),
1319
ZPOOL_COMPAT_LEGACY) != 0)) {
1320
(void) fprintf(stderr, gettext("when 'version' is "
1321
"specified, the 'compatibility' feature may only "
1322
"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
1323
return (2);
1324
}
1325
1326
if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
1327
normnm = propname;
1328
else
1329
normnm = zpool_prop_to_name(prop);
1330
} else {
1331
zfs_prop_t fsprop = zfs_name_to_prop(propname);
1332
1333
if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
1334
B_FALSE)) {
1335
normnm = zfs_prop_to_name(fsprop);
1336
} else if (zfs_prop_user(propname) ||
1337
zfs_prop_userquota(propname)) {
1338
normnm = propname;
1339
} else {
1340
(void) fprintf(stderr, gettext("property '%s' is "
1341
"not a valid filesystem property\n"), propname);
1342
return (2);
1343
}
1344
}
1345
1346
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
1347
prop != ZPOOL_PROP_CACHEFILE) {
1348
(void) fprintf(stderr, gettext("property '%s' "
1349
"specified multiple times\n"), propname);
1350
return (2);
1351
}
1352
1353
if (nvlist_add_string(proplist, normnm, propval) != 0) {
1354
(void) fprintf(stderr, gettext("internal "
1355
"error: out of memory\n"));
1356
return (1);
1357
}
1358
1359
return (0);
1360
}
1361
1362
/*
1363
* Set a default property pair (name, string-value) in a property nvlist
1364
*/
1365
static int
1366
add_prop_list_default(const char *propname, const char *propval,
1367
nvlist_t **props)
1368
{
1369
const char *pval;
1370
1371
if (nvlist_lookup_string(*props, propname, &pval) == 0)
1372
return (0);
1373
1374
return (add_prop_list(propname, propval, props, B_TRUE));
1375
}
1376
1377
/*
1378
* zpool add [-afgLnP] [-o property=value] <pool> <vdev> ...
1379
*
1380
* -a Disable the ashift validation checks
1381
* -f Force addition of devices, even if they appear in use
1382
* -g Display guid for individual vdev name.
1383
* -L Follow links when resolving vdev path name.
1384
* -n Do not add the devices, but display the resulting layout if
1385
* they were to be added.
1386
* -o Set property=value.
1387
* -P Display full path for vdev name.
1388
*
1389
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is
1390
* handled by make_root_vdev(), which constructs the nvlist needed to pass to
1391
* libzfs.
1392
*/
1393
int
1394
zpool_do_add(int argc, char **argv)
1395
{
1396
boolean_t check_replication = B_TRUE;
1397
boolean_t check_inuse = B_TRUE;
1398
boolean_t dryrun = B_FALSE;
1399
boolean_t check_ashift = B_TRUE;
1400
boolean_t force = B_FALSE;
1401
int name_flags = 0;
1402
int c;
1403
nvlist_t *nvroot;
1404
char *poolname;
1405
int ret;
1406
zpool_handle_t *zhp;
1407
nvlist_t *config;
1408
nvlist_t *props = NULL;
1409
char *propval;
1410
1411
struct option long_options[] = {
1412
{"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE},
1413
{"allow-replication-mismatch", no_argument, NULL,
1414
ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH},
1415
{"allow-ashift-mismatch", no_argument, NULL,
1416
ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH},
1417
{0, 0, 0, 0}
1418
};
1419
1420
/* check options */
1421
while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL))
1422
!= -1) {
1423
switch (c) {
1424
case 'f':
1425
force = B_TRUE;
1426
break;
1427
case 'g':
1428
name_flags |= VDEV_NAME_GUID;
1429
break;
1430
case 'L':
1431
name_flags |= VDEV_NAME_FOLLOW_LINKS;
1432
break;
1433
case 'n':
1434
dryrun = B_TRUE;
1435
break;
1436
case 'o':
1437
if ((propval = strchr(optarg, '=')) == NULL) {
1438
(void) fprintf(stderr, gettext("missing "
1439
"'=' for -o option\n"));
1440
usage(B_FALSE);
1441
}
1442
*propval = '\0';
1443
propval++;
1444
1445
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
1446
(add_prop_list(optarg, propval, &props, B_TRUE)))
1447
usage(B_FALSE);
1448
break;
1449
case 'P':
1450
name_flags |= VDEV_NAME_PATH;
1451
break;
1452
case ZPOOL_OPTION_ALLOW_INUSE:
1453
check_inuse = B_FALSE;
1454
break;
1455
case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH:
1456
check_replication = B_FALSE;
1457
break;
1458
case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH:
1459
check_ashift = B_FALSE;
1460
break;
1461
case '?':
1462
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
1463
optopt);
1464
usage(B_FALSE);
1465
}
1466
}
1467
1468
argc -= optind;
1469
argv += optind;
1470
1471
/* get pool name and check number of arguments */
1472
if (argc < 1) {
1473
(void) fprintf(stderr, gettext("missing pool name argument\n"));
1474
usage(B_FALSE);
1475
}
1476
if (argc < 2) {
1477
(void) fprintf(stderr, gettext("missing vdev specification\n"));
1478
usage(B_FALSE);
1479
}
1480
1481
if (force) {
1482
if (!check_inuse || !check_replication || !check_ashift) {
1483
(void) fprintf(stderr, gettext("'-f' option is not "
1484
"allowed with '--allow-replication-mismatch', "
1485
"'--allow-ashift-mismatch', or "
1486
"'--allow-in-use'\n"));
1487
usage(B_FALSE);
1488
}
1489
check_inuse = B_FALSE;
1490
check_replication = B_FALSE;
1491
check_ashift = B_FALSE;
1492
}
1493
1494
poolname = argv[0];
1495
1496
argc--;
1497
argv++;
1498
1499
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1500
return (1);
1501
1502
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1503
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1504
poolname);
1505
zpool_close(zhp);
1506
return (1);
1507
}
1508
1509
/* unless manually specified use "ashift" pool property (if set) */
1510
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1511
int intval;
1512
zprop_source_t src;
1513
char strval[ZPOOL_MAXPROPLEN];
1514
1515
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1516
if (src != ZPROP_SRC_DEFAULT) {
1517
(void) sprintf(strval, "%" PRId32, intval);
1518
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1519
&props, B_TRUE) == 0);
1520
}
1521
}
1522
1523
/* pass off to make_root_vdev for processing */
1524
nvroot = make_root_vdev(zhp, props, !check_inuse,
1525
check_replication, B_FALSE, dryrun, argc, argv);
1526
if (nvroot == NULL) {
1527
zpool_close(zhp);
1528
return (1);
1529
}
1530
1531
if (dryrun) {
1532
nvlist_t *poolnvroot;
1533
nvlist_t **l2child, **sparechild;
1534
uint_t l2children, sparechildren, c;
1535
char *vname;
1536
boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1537
1538
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1539
&poolnvroot) == 0);
1540
1541
(void) printf(gettext("would update '%s' to the following "
1542
"configuration:\n\n"), zpool_get_name(zhp));
1543
1544
/* print original main pool and new tree */
1545
print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1546
name_flags | VDEV_NAME_TYPE_ID);
1547
print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1548
1549
/* print other classes: 'dedup', 'special', and 'log' */
1550
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1551
print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1552
VDEV_ALLOC_BIAS_DEDUP, name_flags);
1553
print_vdev_tree(zhp, NULL, nvroot, 0,
1554
VDEV_ALLOC_BIAS_DEDUP, name_flags);
1555
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1556
print_vdev_tree(zhp, "dedup", nvroot, 0,
1557
VDEV_ALLOC_BIAS_DEDUP, name_flags);
1558
}
1559
1560
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1561
print_vdev_tree(zhp, "special", poolnvroot, 0,
1562
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1563
print_vdev_tree(zhp, NULL, nvroot, 0,
1564
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1565
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1566
print_vdev_tree(zhp, "special", nvroot, 0,
1567
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1568
}
1569
1570
if (num_logs(poolnvroot) > 0) {
1571
print_vdev_tree(zhp, "logs", poolnvroot, 0,
1572
VDEV_ALLOC_BIAS_LOG, name_flags);
1573
print_vdev_tree(zhp, NULL, nvroot, 0,
1574
VDEV_ALLOC_BIAS_LOG, name_flags);
1575
} else if (num_logs(nvroot) > 0) {
1576
print_vdev_tree(zhp, "logs", nvroot, 0,
1577
VDEV_ALLOC_BIAS_LOG, name_flags);
1578
}
1579
1580
/* Do the same for the caches */
1581
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1582
&l2child, &l2children) == 0 && l2children) {
1583
hadcache = B_TRUE;
1584
(void) printf(gettext("\tcache\n"));
1585
for (c = 0; c < l2children; c++) {
1586
vname = zpool_vdev_name(g_zfs, NULL,
1587
l2child[c], name_flags);
1588
(void) printf("\t %s\n", vname);
1589
free(vname);
1590
}
1591
}
1592
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1593
&l2child, &l2children) == 0 && l2children) {
1594
if (!hadcache)
1595
(void) printf(gettext("\tcache\n"));
1596
for (c = 0; c < l2children; c++) {
1597
vname = zpool_vdev_name(g_zfs, NULL,
1598
l2child[c], name_flags);
1599
(void) printf("\t %s\n", vname);
1600
free(vname);
1601
}
1602
}
1603
/* And finally the spares */
1604
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1605
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
1606
hadspare = B_TRUE;
1607
(void) printf(gettext("\tspares\n"));
1608
for (c = 0; c < sparechildren; c++) {
1609
vname = zpool_vdev_name(g_zfs, NULL,
1610
sparechild[c], name_flags);
1611
(void) printf("\t %s\n", vname);
1612
free(vname);
1613
}
1614
}
1615
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1616
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
1617
if (!hadspare)
1618
(void) printf(gettext("\tspares\n"));
1619
for (c = 0; c < sparechildren; c++) {
1620
vname = zpool_vdev_name(g_zfs, NULL,
1621
sparechild[c], name_flags);
1622
(void) printf("\t %s\n", vname);
1623
free(vname);
1624
}
1625
}
1626
1627
ret = 0;
1628
} else {
1629
ret = (zpool_add(zhp, nvroot, check_ashift) != 0);
1630
}
1631
1632
nvlist_free(props);
1633
nvlist_free(nvroot);
1634
zpool_close(zhp);
1635
1636
return (ret);
1637
}
1638
1639
/*
1640
* zpool remove [-npsw] <pool> <vdev> ...
1641
*
1642
* Removes the given vdev from the pool.
1643
*/
1644
int
1645
zpool_do_remove(int argc, char **argv)
1646
{
1647
char *poolname;
1648
int i, ret = 0;
1649
zpool_handle_t *zhp = NULL;
1650
boolean_t stop = B_FALSE;
1651
int c;
1652
boolean_t noop = B_FALSE;
1653
boolean_t parsable = B_FALSE;
1654
boolean_t wait = B_FALSE;
1655
1656
/* check options */
1657
while ((c = getopt(argc, argv, "npsw")) != -1) {
1658
switch (c) {
1659
case 'n':
1660
noop = B_TRUE;
1661
break;
1662
case 'p':
1663
parsable = B_TRUE;
1664
break;
1665
case 's':
1666
stop = B_TRUE;
1667
break;
1668
case 'w':
1669
wait = B_TRUE;
1670
break;
1671
case '?':
1672
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
1673
optopt);
1674
usage(B_FALSE);
1675
}
1676
}
1677
1678
argc -= optind;
1679
argv += optind;
1680
1681
/* get pool name and check number of arguments */
1682
if (argc < 1) {
1683
(void) fprintf(stderr, gettext("missing pool name argument\n"));
1684
usage(B_FALSE);
1685
}
1686
1687
poolname = argv[0];
1688
1689
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1690
return (1);
1691
1692
if (stop && noop) {
1693
zpool_close(zhp);
1694
(void) fprintf(stderr, gettext("stop request ignored\n"));
1695
return (0);
1696
}
1697
1698
if (stop) {
1699
if (argc > 1) {
1700
(void) fprintf(stderr, gettext("too many arguments\n"));
1701
usage(B_FALSE);
1702
}
1703
if (zpool_vdev_remove_cancel(zhp) != 0)
1704
ret = 1;
1705
if (wait) {
1706
(void) fprintf(stderr, gettext("invalid option "
1707
"combination: -w cannot be used with -s\n"));
1708
usage(B_FALSE);
1709
}
1710
} else {
1711
if (argc < 2) {
1712
(void) fprintf(stderr, gettext("missing device\n"));
1713
usage(B_FALSE);
1714
}
1715
1716
for (i = 1; i < argc; i++) {
1717
if (noop) {
1718
uint64_t size;
1719
1720
if (zpool_vdev_indirect_size(zhp, argv[i],
1721
&size) != 0) {
1722
ret = 1;
1723
break;
1724
}
1725
if (parsable) {
1726
(void) printf("%s %llu\n",
1727
argv[i], (unsigned long long)size);
1728
} else {
1729
char valstr[32];
1730
zfs_nicenum(size, valstr,
1731
sizeof (valstr));
1732
(void) printf("Memory that will be "
1733
"used after removing %s: %s\n",
1734
argv[i], valstr);
1735
}
1736
} else {
1737
if (zpool_vdev_remove(zhp, argv[i]) != 0)
1738
ret = 1;
1739
}
1740
}
1741
1742
if (ret == 0 && wait)
1743
ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1744
}
1745
zpool_close(zhp);
1746
1747
return (ret);
1748
}
1749
1750
/*
1751
* Return 1 if a vdev is active (being used in a pool)
1752
* Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1753
*
1754
* This is useful for checking if a disk in an active pool is offlined or
1755
* faulted.
1756
*/
1757
static int
1758
vdev_is_active(char *vdev_path)
1759
{
1760
int fd;
1761
fd = open(vdev_path, O_EXCL);
1762
if (fd < 0) {
1763
return (1); /* cant open O_EXCL - disk is active */
1764
}
1765
1766
(void) close(fd);
1767
return (0); /* disk is inactive in the pool */
1768
}
1769
1770
/*
1771
* zpool labelclear [-f] <vdev>
1772
*
1773
* -f Force clearing the label for the vdevs which are members of
1774
* the exported or foreign pools.
1775
*
1776
* Verifies that the vdev is not active and zeros out the label information
1777
* on the device.
1778
*/
1779
int
1780
zpool_do_labelclear(int argc, char **argv)
1781
{
1782
char vdev[MAXPATHLEN];
1783
char *name = NULL;
1784
int c, fd, ret = 0;
1785
nvlist_t *config;
1786
pool_state_t state;
1787
boolean_t inuse = B_FALSE;
1788
boolean_t force = B_FALSE;
1789
1790
/* check options */
1791
while ((c = getopt(argc, argv, "f")) != -1) {
1792
switch (c) {
1793
case 'f':
1794
force = B_TRUE;
1795
break;
1796
default:
1797
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
1798
optopt);
1799
usage(B_FALSE);
1800
}
1801
}
1802
1803
argc -= optind;
1804
argv += optind;
1805
1806
/* get vdev name */
1807
if (argc < 1) {
1808
(void) fprintf(stderr, gettext("missing vdev name\n"));
1809
usage(B_FALSE);
1810
}
1811
if (argc > 1) {
1812
(void) fprintf(stderr, gettext("too many arguments\n"));
1813
usage(B_FALSE);
1814
}
1815
1816
(void) strlcpy(vdev, argv[0], sizeof (vdev));
1817
1818
/*
1819
* If we cannot open an absolute path, we quit.
1820
* Otherwise if the provided vdev name doesn't point to a file,
1821
* try prepending expected disk paths and partition numbers.
1822
*/
1823
if ((fd = open(vdev, O_RDWR)) < 0) {
1824
int error;
1825
if (vdev[0] == '/') {
1826
(void) fprintf(stderr, gettext("failed to open "
1827
"%s: %s\n"), vdev, strerror(errno));
1828
return (1);
1829
}
1830
1831
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1832
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1833
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1834
error = ENOENT;
1835
}
1836
1837
if (error || ((fd = open(vdev, O_RDWR)) < 0)) {
1838
if (errno == ENOENT) {
1839
(void) fprintf(stderr, gettext(
1840
"failed to find device %s, try "
1841
"specifying absolute path instead\n"),
1842
argv[0]);
1843
return (1);
1844
}
1845
1846
(void) fprintf(stderr, gettext("failed to open %s:"
1847
" %s\n"), vdev, strerror(errno));
1848
return (1);
1849
}
1850
}
1851
1852
/*
1853
* Flush all dirty pages for the block device. This should not be
1854
* fatal when the device does not support BLKFLSBUF as would be the
1855
* case for a file vdev.
1856
*/
1857
if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1858
(void) fprintf(stderr, gettext("failed to invalidate "
1859
"cache for %s: %s\n"), vdev, strerror(errno));
1860
1861
if (zpool_read_label(fd, &config, NULL) != 0) {
1862
(void) fprintf(stderr,
1863
gettext("failed to read label from %s\n"), vdev);
1864
ret = 1;
1865
goto errout;
1866
}
1867
nvlist_free(config);
1868
1869
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1870
if (ret != 0) {
1871
(void) fprintf(stderr,
1872
gettext("failed to check state for %s\n"), vdev);
1873
ret = 1;
1874
goto errout;
1875
}
1876
1877
if (!inuse)
1878
goto wipe_label;
1879
1880
switch (state) {
1881
default:
1882
case POOL_STATE_ACTIVE:
1883
case POOL_STATE_SPARE:
1884
case POOL_STATE_L2CACHE:
1885
/*
1886
* We allow the user to call 'zpool offline -f'
1887
* on an offlined disk in an active pool. We can check if
1888
* the disk is online by calling vdev_is_active().
1889
*/
1890
if (force && !vdev_is_active(vdev))
1891
break;
1892
1893
(void) fprintf(stderr, gettext(
1894
"%s is a member (%s) of pool \"%s\""),
1895
vdev, zpool_pool_state_to_name(state), name);
1896
1897
if (force) {
1898
(void) fprintf(stderr, gettext(
1899
". Offline the disk first to clear its label."));
1900
}
1901
printf("\n");
1902
ret = 1;
1903
goto errout;
1904
1905
case POOL_STATE_EXPORTED:
1906
if (force)
1907
break;
1908
(void) fprintf(stderr, gettext(
1909
"use '-f' to override the following error:\n"
1910
"%s is a member of exported pool \"%s\"\n"),
1911
vdev, name);
1912
ret = 1;
1913
goto errout;
1914
1915
case POOL_STATE_POTENTIALLY_ACTIVE:
1916
if (force)
1917
break;
1918
(void) fprintf(stderr, gettext(
1919
"use '-f' to override the following error:\n"
1920
"%s is a member of potentially active pool \"%s\"\n"),
1921
vdev, name);
1922
ret = 1;
1923
goto errout;
1924
1925
case POOL_STATE_DESTROYED:
1926
/* inuse should never be set for a destroyed pool */
1927
assert(0);
1928
break;
1929
}
1930
1931
wipe_label:
1932
ret = zpool_clear_label(fd);
1933
if (ret != 0) {
1934
(void) fprintf(stderr,
1935
gettext("failed to clear label for %s\n"), vdev);
1936
}
1937
1938
errout:
1939
free(name);
1940
(void) close(fd);
1941
1942
return (ret);
1943
}
1944
1945
/*
1946
* zpool create [-fnd] [-o property=value] ...
1947
* [-O file-system-property=value] ...
1948
* [-R root] [-m mountpoint] <pool> <dev> ...
1949
*
1950
* -f Force creation, even if devices appear in use
1951
* -n Do not create the pool, but display the resulting layout if it
1952
* were to be created.
1953
* -R Create a pool under an alternate root
1954
* -m Set default mountpoint for the root dataset. By default it's
1955
* '/<pool>'
1956
* -o Set property=value.
1957
* -o Set feature@feature=enabled|disabled.
1958
* -d Don't automatically enable all supported pool features
1959
* (individual features can be enabled with -o).
1960
* -O Set fsproperty=value in the pool's root file system
1961
*
1962
* Creates the named pool according to the given vdev specification. The
1963
* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1964
* Once we get the nvlist back from make_root_vdev(), we either print out the
1965
* contents (if '-n' was specified), or pass it to libzfs to do the creation.
1966
*/
1967
int
1968
zpool_do_create(int argc, char **argv)
1969
{
1970
boolean_t force = B_FALSE;
1971
boolean_t dryrun = B_FALSE;
1972
boolean_t enable_pool_features = B_TRUE;
1973
1974
int c;
1975
nvlist_t *nvroot = NULL;
1976
char *poolname;
1977
char *tname = NULL;
1978
int ret = 1;
1979
char *altroot = NULL;
1980
char *compat = NULL;
1981
char *mountpoint = NULL;
1982
nvlist_t *fsprops = NULL;
1983
nvlist_t *props = NULL;
1984
char *propval;
1985
1986
/* check options */
1987
while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1988
switch (c) {
1989
case 'f':
1990
force = B_TRUE;
1991
break;
1992
case 'n':
1993
dryrun = B_TRUE;
1994
break;
1995
case 'd':
1996
enable_pool_features = B_FALSE;
1997
break;
1998
case 'R':
1999
altroot = optarg;
2000
if (add_prop_list(zpool_prop_to_name(
2001
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
2002
goto errout;
2003
if (add_prop_list_default(zpool_prop_to_name(
2004
ZPOOL_PROP_CACHEFILE), "none", &props))
2005
goto errout;
2006
break;
2007
case 'm':
2008
/* Equivalent to -O mountpoint=optarg */
2009
mountpoint = optarg;
2010
break;
2011
case 'o':
2012
if ((propval = strchr(optarg, '=')) == NULL) {
2013
(void) fprintf(stderr, gettext("missing "
2014
"'=' for -o option\n"));
2015
goto errout;
2016
}
2017
*propval = '\0';
2018
propval++;
2019
2020
if (add_prop_list(optarg, propval, &props, B_TRUE))
2021
goto errout;
2022
2023
/*
2024
* If the user is creating a pool that doesn't support
2025
* feature flags, don't enable any features.
2026
*/
2027
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
2028
char *end;
2029
u_longlong_t ver;
2030
2031
ver = strtoull(propval, &end, 0);
2032
if (*end == '\0' &&
2033
ver < SPA_VERSION_FEATURES) {
2034
enable_pool_features = B_FALSE;
2035
}
2036
}
2037
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
2038
altroot = propval;
2039
if (zpool_name_to_prop(optarg) ==
2040
ZPOOL_PROP_COMPATIBILITY)
2041
compat = propval;
2042
break;
2043
case 'O':
2044
if ((propval = strchr(optarg, '=')) == NULL) {
2045
(void) fprintf(stderr, gettext("missing "
2046
"'=' for -O option\n"));
2047
goto errout;
2048
}
2049
*propval = '\0';
2050
propval++;
2051
2052
/*
2053
* Mountpoints are checked and then added later.
2054
* Uniquely among properties, they can be specified
2055
* more than once, to avoid conflict with -m.
2056
*/
2057
if (0 == strcmp(optarg,
2058
zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
2059
mountpoint = propval;
2060
} else if (add_prop_list(optarg, propval, &fsprops,
2061
B_FALSE)) {
2062
goto errout;
2063
}
2064
break;
2065
case 't':
2066
/*
2067
* Sanity check temporary pool name.
2068
*/
2069
if (strchr(optarg, '/') != NULL) {
2070
(void) fprintf(stderr, gettext("cannot create "
2071
"'%s': invalid character '/' in temporary "
2072
"name\n"), optarg);
2073
(void) fprintf(stderr, gettext("use 'zfs "
2074
"create' to create a dataset\n"));
2075
goto errout;
2076
}
2077
2078
if (add_prop_list(zpool_prop_to_name(
2079
ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
2080
goto errout;
2081
if (add_prop_list_default(zpool_prop_to_name(
2082
ZPOOL_PROP_CACHEFILE), "none", &props))
2083
goto errout;
2084
tname = optarg;
2085
break;
2086
case ':':
2087
(void) fprintf(stderr, gettext("missing argument for "
2088
"'%c' option\n"), optopt);
2089
goto badusage;
2090
case '?':
2091
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
2092
optopt);
2093
goto badusage;
2094
}
2095
}
2096
2097
argc -= optind;
2098
argv += optind;
2099
2100
/* get pool name and check number of arguments */
2101
if (argc < 1) {
2102
(void) fprintf(stderr, gettext("missing pool name argument\n"));
2103
goto badusage;
2104
}
2105
if (argc < 2) {
2106
(void) fprintf(stderr, gettext("missing vdev specification\n"));
2107
goto badusage;
2108
}
2109
2110
poolname = argv[0];
2111
2112
/*
2113
* As a special case, check for use of '/' in the name, and direct the
2114
* user to use 'zfs create' instead.
2115
*/
2116
if (strchr(poolname, '/') != NULL) {
2117
(void) fprintf(stderr, gettext("cannot create '%s': invalid "
2118
"character '/' in pool name\n"), poolname);
2119
(void) fprintf(stderr, gettext("use 'zfs create' to "
2120
"create a dataset\n"));
2121
goto errout;
2122
}
2123
2124
/* pass off to make_root_vdev for bulk processing */
2125
nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
2126
argc - 1, argv + 1);
2127
if (nvroot == NULL)
2128
goto errout;
2129
2130
/* make_root_vdev() allows 0 toplevel children if there are spares */
2131
if (!zfs_allocatable_devs(nvroot)) {
2132
(void) fprintf(stderr, gettext("invalid vdev "
2133
"specification: at least one toplevel vdev must be "
2134
"specified\n"));
2135
goto errout;
2136
}
2137
2138
if (altroot != NULL && altroot[0] != '/') {
2139
(void) fprintf(stderr, gettext("invalid alternate root '%s': "
2140
"must be an absolute path\n"), altroot);
2141
goto errout;
2142
}
2143
2144
/*
2145
* Check the validity of the mountpoint and direct the user to use the
2146
* '-m' mountpoint option if it looks like its in use.
2147
*/
2148
if (mountpoint == NULL ||
2149
(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
2150
strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
2151
char buf[MAXPATHLEN];
2152
DIR *dirp;
2153
2154
if (mountpoint && mountpoint[0] != '/') {
2155
(void) fprintf(stderr, gettext("invalid mountpoint "
2156
"'%s': must be an absolute path, 'legacy', or "
2157
"'none'\n"), mountpoint);
2158
goto errout;
2159
}
2160
2161
if (mountpoint == NULL) {
2162
if (altroot != NULL)
2163
(void) snprintf(buf, sizeof (buf), "%s/%s",
2164
altroot, poolname);
2165
else
2166
(void) snprintf(buf, sizeof (buf), "/%s",
2167
poolname);
2168
} else {
2169
if (altroot != NULL)
2170
(void) snprintf(buf, sizeof (buf), "%s%s",
2171
altroot, mountpoint);
2172
else
2173
(void) snprintf(buf, sizeof (buf), "%s",
2174
mountpoint);
2175
}
2176
2177
if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
2178
(void) fprintf(stderr, gettext("mountpoint '%s' : "
2179
"%s\n"), buf, strerror(errno));
2180
(void) fprintf(stderr, gettext("use '-m' "
2181
"option to provide a different default\n"));
2182
goto errout;
2183
} else if (dirp) {
2184
int count = 0;
2185
2186
while (count < 3 && readdir(dirp) != NULL)
2187
count++;
2188
(void) closedir(dirp);
2189
2190
if (count > 2) {
2191
(void) fprintf(stderr, gettext("mountpoint "
2192
"'%s' exists and is not empty\n"), buf);
2193
(void) fprintf(stderr, gettext("use '-m' "
2194
"option to provide a "
2195
"different default\n"));
2196
goto errout;
2197
}
2198
}
2199
}
2200
2201
/*
2202
* Now that the mountpoint's validity has been checked, ensure that
2203
* the property is set appropriately prior to creating the pool.
2204
*/
2205
if (mountpoint != NULL) {
2206
ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
2207
mountpoint, &fsprops, B_FALSE);
2208
if (ret != 0)
2209
goto errout;
2210
}
2211
2212
ret = 1;
2213
if (dryrun) {
2214
/*
2215
* For a dry run invocation, print out a basic message and run
2216
* through all the vdevs in the list and print out in an
2217
* appropriate hierarchy.
2218
*/
2219
(void) printf(gettext("would create '%s' with the "
2220
"following layout:\n\n"), poolname);
2221
2222
print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
2223
print_vdev_tree(NULL, "dedup", nvroot, 0,
2224
VDEV_ALLOC_BIAS_DEDUP, 0);
2225
print_vdev_tree(NULL, "special", nvroot, 0,
2226
VDEV_ALLOC_BIAS_SPECIAL, 0);
2227
print_vdev_tree(NULL, "logs", nvroot, 0,
2228
VDEV_ALLOC_BIAS_LOG, 0);
2229
print_cache_list(nvroot, 0);
2230
print_spare_list(nvroot, 0);
2231
2232
ret = 0;
2233
} else {
2234
/*
2235
* Load in feature set.
2236
* Note: if compatibility property not given, we'll have
2237
* NULL, which means 'all features'.
2238
*/
2239
boolean_t requested_features[SPA_FEATURES];
2240
if (zpool_do_load_compat(compat, requested_features) !=
2241
ZPOOL_COMPATIBILITY_OK)
2242
goto errout;
2243
2244
/*
2245
* props contains list of features to enable.
2246
* For each feature:
2247
* - remove it if feature@name=disabled
2248
* - leave it there if feature@name=enabled
2249
* - add it if:
2250
* - enable_pool_features (ie: no '-d' or '-o version')
2251
* - it's supported by the kernel module
2252
* - it's in the requested feature set
2253
* - warn if it's enabled but not in compat
2254
*/
2255
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
2256
char propname[MAXPATHLEN];
2257
const char *propval;
2258
zfeature_info_t *feat = &spa_feature_table[i];
2259
2260
(void) snprintf(propname, sizeof (propname),
2261
"feature@%s", feat->fi_uname);
2262
2263
if (!nvlist_lookup_string(props, propname, &propval)) {
2264
if (strcmp(propval,
2265
ZFS_FEATURE_DISABLED) == 0) {
2266
(void) nvlist_remove_all(props,
2267
propname);
2268
} else if (strcmp(propval,
2269
ZFS_FEATURE_ENABLED) == 0 &&
2270
!requested_features[i]) {
2271
(void) fprintf(stderr, gettext(
2272
"Warning: feature \"%s\" enabled "
2273
"but is not in specified "
2274
"'compatibility' feature set.\n"),
2275
feat->fi_uname);
2276
}
2277
} else if (
2278
enable_pool_features &&
2279
feat->fi_zfs_mod_supported &&
2280
requested_features[i]) {
2281
ret = add_prop_list(propname,
2282
ZFS_FEATURE_ENABLED, &props, B_TRUE);
2283
if (ret != 0)
2284
goto errout;
2285
}
2286
}
2287
2288
ret = 1;
2289
if (zpool_create(g_zfs, poolname,
2290
nvroot, props, fsprops) == 0) {
2291
zfs_handle_t *pool = zfs_open(g_zfs,
2292
tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
2293
if (pool != NULL) {
2294
if (zfs_mount(pool, NULL, 0) == 0) {
2295
ret = zfs_share(pool, NULL);
2296
zfs_commit_shares(NULL);
2297
}
2298
zfs_close(pool);
2299
}
2300
} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
2301
(void) fprintf(stderr, gettext("pool name may have "
2302
"been omitted\n"));
2303
}
2304
}
2305
2306
errout:
2307
nvlist_free(nvroot);
2308
nvlist_free(fsprops);
2309
nvlist_free(props);
2310
return (ret);
2311
badusage:
2312
nvlist_free(fsprops);
2313
nvlist_free(props);
2314
usage(B_FALSE);
2315
return (2);
2316
}
2317
2318
/*
2319
* zpool destroy <pool>
2320
*
2321
* -f Forcefully unmount any datasets
2322
*
2323
* Destroy the given pool. Automatically unmounts any datasets in the pool.
2324
*/
2325
int
2326
zpool_do_destroy(int argc, char **argv)
2327
{
2328
boolean_t force = B_FALSE;
2329
int c;
2330
char *pool;
2331
zpool_handle_t *zhp;
2332
int ret;
2333
2334
/* check options */
2335
while ((c = getopt(argc, argv, "f")) != -1) {
2336
switch (c) {
2337
case 'f':
2338
force = B_TRUE;
2339
break;
2340
case '?':
2341
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
2342
optopt);
2343
usage(B_FALSE);
2344
}
2345
}
2346
2347
argc -= optind;
2348
argv += optind;
2349
2350
/* check arguments */
2351
if (argc < 1) {
2352
(void) fprintf(stderr, gettext("missing pool argument\n"));
2353
usage(B_FALSE);
2354
}
2355
if (argc > 1) {
2356
(void) fprintf(stderr, gettext("too many arguments\n"));
2357
usage(B_FALSE);
2358
}
2359
2360
pool = argv[0];
2361
2362
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
2363
/*
2364
* As a special case, check for use of '/' in the name, and
2365
* direct the user to use 'zfs destroy' instead.
2366
*/
2367
if (strchr(pool, '/') != NULL)
2368
(void) fprintf(stderr, gettext("use 'zfs destroy' to "
2369
"destroy a dataset\n"));
2370
return (1);
2371
}
2372
2373
if (zpool_disable_datasets(zhp, force) != 0) {
2374
(void) fprintf(stderr, gettext("could not destroy '%s': "
2375
"could not unmount datasets\n"), zpool_get_name(zhp));
2376
zpool_close(zhp);
2377
return (1);
2378
}
2379
2380
/* The history must be logged as part of the export */
2381
log_history = B_FALSE;
2382
2383
ret = (zpool_destroy(zhp, history_str) != 0);
2384
2385
zpool_close(zhp);
2386
2387
return (ret);
2388
}
2389
2390
typedef struct export_cbdata {
2391
taskq_t *taskq;
2392
pthread_mutex_t mnttab_lock;
2393
boolean_t force;
2394
boolean_t hardforce;
2395
int retval;
2396
} export_cbdata_t;
2397
2398
2399
typedef struct {
2400
char *aea_poolname;
2401
export_cbdata_t *aea_cbdata;
2402
} async_export_args_t;
2403
2404
/*
2405
* Export one pool
2406
*/
2407
static int
2408
zpool_export_one(zpool_handle_t *zhp, void *data)
2409
{
2410
export_cbdata_t *cb = data;
2411
2412
/*
2413
* zpool_disable_datasets() is not thread-safe for mnttab access.
2414
* So we serialize access here for 'zpool export -a' parallel case.
2415
*/
2416
if (cb->taskq != NULL)
2417
(void) pthread_mutex_lock(&cb->mnttab_lock);
2418
2419
int retval = zpool_disable_datasets(zhp, cb->force);
2420
2421
if (cb->taskq != NULL)
2422
(void) pthread_mutex_unlock(&cb->mnttab_lock);
2423
2424
if (retval)
2425
return (1);
2426
2427
if (cb->hardforce) {
2428
if (zpool_export_force(zhp, history_str) != 0)
2429
return (1);
2430
} else if (zpool_export(zhp, cb->force, history_str) != 0) {
2431
return (1);
2432
}
2433
2434
return (0);
2435
}
2436
2437
/*
2438
* Asynchronous export request
2439
*/
2440
static void
2441
zpool_export_task(void *arg)
2442
{
2443
async_export_args_t *aea = arg;
2444
2445
zpool_handle_t *zhp = zpool_open(g_zfs, aea->aea_poolname);
2446
if (zhp != NULL) {
2447
int ret = zpool_export_one(zhp, aea->aea_cbdata);
2448
if (ret != 0)
2449
aea->aea_cbdata->retval = ret;
2450
zpool_close(zhp);
2451
} else {
2452
aea->aea_cbdata->retval = 1;
2453
}
2454
2455
free(aea->aea_poolname);
2456
free(aea);
2457
}
2458
2459
/*
2460
* Process an export request in parallel
2461
*/
2462
static int
2463
zpool_export_one_async(zpool_handle_t *zhp, void *data)
2464
{
2465
taskq_t *tq = ((export_cbdata_t *)data)->taskq;
2466
async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));
2467
2468
/* save pool name since zhp will go out of scope */
2469
aea->aea_poolname = strdup(zpool_get_name(zhp));
2470
aea->aea_cbdata = data;
2471
2472
/* ship off actual export to another thread */
2473
if (taskq_dispatch(tq, zpool_export_task, (void *)aea,
2474
TQ_SLEEP) == TASKQID_INVALID)
2475
return (errno); /* unlikely */
2476
else
2477
return (0);
2478
}
2479
2480
/*
2481
* zpool export [-f] <pool> ...
2482
*
2483
* -a Export all pools
2484
* -f Forcefully unmount datasets
2485
*
2486
* Export the given pools. By default, the command will attempt to cleanly
2487
* unmount any active datasets within the pool. If the '-f' flag is specified,
2488
* then the datasets will be forcefully unmounted.
2489
*/
2490
int
2491
zpool_do_export(int argc, char **argv)
2492
{
2493
export_cbdata_t cb;
2494
boolean_t do_all = B_FALSE;
2495
boolean_t force = B_FALSE;
2496
boolean_t hardforce = B_FALSE;
2497
int c, ret;
2498
2499
/* check options */
2500
while ((c = getopt(argc, argv, "afF")) != -1) {
2501
switch (c) {
2502
case 'a':
2503
do_all = B_TRUE;
2504
break;
2505
case 'f':
2506
force = B_TRUE;
2507
break;
2508
case 'F':
2509
hardforce = B_TRUE;
2510
break;
2511
case '?':
2512
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
2513
optopt);
2514
usage(B_FALSE);
2515
}
2516
}
2517
2518
cb.force = force;
2519
cb.hardforce = hardforce;
2520
cb.taskq = NULL;
2521
cb.retval = 0;
2522
argc -= optind;
2523
argv += optind;
2524
2525
/* The history will be logged as part of the export itself */
2526
log_history = B_FALSE;
2527
2528
if (do_all) {
2529
if (argc != 0) {
2530
(void) fprintf(stderr, gettext("too many arguments\n"));
2531
usage(B_FALSE);
2532
}
2533
2534
cb.taskq = taskq_create("zpool_export",
2535
5 * sysconf(_SC_NPROCESSORS_ONLN), minclsyspri, 1, INT_MAX,
2536
TASKQ_DYNAMIC);
2537
(void) pthread_mutex_init(&cb.mnttab_lock, NULL);
2538
2539
/* Asynchronously call zpool_export_one using thread pool */
2540
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2541
B_FALSE, zpool_export_one_async, &cb);
2542
2543
taskq_wait(cb.taskq);
2544
taskq_destroy(cb.taskq);
2545
(void) pthread_mutex_destroy(&cb.mnttab_lock);
2546
2547
return (ret | cb.retval);
2548
}
2549
2550
/* check arguments */
2551
if (argc < 1) {
2552
(void) fprintf(stderr, gettext("missing pool argument\n"));
2553
usage(B_FALSE);
2554
}
2555
2556
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
2557
B_FALSE, zpool_export_one, &cb);
2558
2559
return (ret);
2560
}
2561
2562
/*
2563
* Given a vdev configuration, determine the maximum width needed for the device
2564
* name column.
2565
*/
2566
static int
2567
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
2568
int name_flags)
2569
{
2570
static const char *const subtypes[] =
2571
{ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
2572
2573
char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
2574
max = MAX(strlen(name) + depth, max);
2575
free(name);
2576
2577
nvlist_t **child;
2578
uint_t children;
2579
for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2580
if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2581
&child, &children) == 0)
2582
for (uint_t c = 0; c < children; ++c)
2583
max = MAX(max_width(zhp, child[c], depth + 2,
2584
max, name_flags), max);
2585
2586
return (max);
2587
}
2588
2589
typedef struct status_cbdata {
2590
int cb_count;
2591
int cb_name_flags;
2592
int cb_namewidth;
2593
boolean_t cb_allpools;
2594
boolean_t cb_verbose;
2595
boolean_t cb_literal;
2596
boolean_t cb_explain;
2597
boolean_t cb_first;
2598
boolean_t cb_dedup_stats;
2599
boolean_t cb_print_unhealthy;
2600
boolean_t cb_print_status;
2601
boolean_t cb_print_slow_ios;
2602
boolean_t cb_print_dio_verify;
2603
boolean_t cb_print_vdev_init;
2604
boolean_t cb_print_vdev_trim;
2605
vdev_cmd_data_list_t *vcdl;
2606
boolean_t cb_print_power;
2607
boolean_t cb_json;
2608
boolean_t cb_flat_vdevs;
2609
nvlist_t *cb_jsobj;
2610
boolean_t cb_json_as_int;
2611
boolean_t cb_json_pool_key_guid;
2612
} status_cbdata_t;
2613
2614
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2615
static boolean_t
2616
is_blank_str(const char *str)
2617
{
2618
for (; str != NULL && *str != '\0'; ++str)
2619
if (!isblank(*str))
2620
return (B_FALSE);
2621
return (B_TRUE);
2622
}
2623
2624
static void
2625
zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
2626
nvlist_t *item)
2627
{
2628
vdev_cmd_data_t *data;
2629
int i, j, k = 1;
2630
char tmp[256];
2631
const char *val;
2632
2633
for (i = 0; i < vcdl->count; i++) {
2634
if ((strcmp(vcdl->data[i].path, path) != 0) ||
2635
(strcmp(vcdl->data[i].pool, pool) != 0))
2636
continue;
2637
2638
data = &vcdl->data[i];
2639
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2640
val = NULL;
2641
for (int k = 0; k < data->cols_cnt; k++) {
2642
if (strcmp(data->cols[k],
2643
vcdl->uniq_cols[j]) == 0) {
2644
val = data->lines[k];
2645
break;
2646
}
2647
}
2648
if (val == NULL || is_blank_str(val))
2649
val = "-";
2650
fnvlist_add_string(item, vcdl->uniq_cols[j], val);
2651
}
2652
2653
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2654
if (data->lines[j]) {
2655
(void) snprintf(tmp, 256, "extra_%d", k++);
2656
fnvlist_add_string(item, tmp,
2657
data->lines[j]);
2658
}
2659
}
2660
break;
2661
}
2662
}
2663
2664
/* Print command output lines for specific vdev in a specific pool */
2665
static void
2666
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
2667
{
2668
vdev_cmd_data_t *data;
2669
int i, j;
2670
const char *val;
2671
2672
for (i = 0; i < vcdl->count; i++) {
2673
if ((strcmp(vcdl->data[i].path, path) != 0) ||
2674
(strcmp(vcdl->data[i].pool, pool) != 0)) {
2675
/* Not the vdev we're looking for */
2676
continue;
2677
}
2678
2679
data = &vcdl->data[i];
2680
/* Print out all the output values for this vdev */
2681
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2682
val = NULL;
2683
/* Does this vdev have values for this column? */
2684
for (int k = 0; k < data->cols_cnt; k++) {
2685
if (strcmp(data->cols[k],
2686
vcdl->uniq_cols[j]) == 0) {
2687
/* yes it does, record the value */
2688
val = data->lines[k];
2689
break;
2690
}
2691
}
2692
/*
2693
* Mark empty values with dashes to make output
2694
* awk-able.
2695
*/
2696
if (val == NULL || is_blank_str(val))
2697
val = "-";
2698
2699
printf("%*s", vcdl->uniq_cols_width[j], val);
2700
if (j < vcdl->uniq_cols_cnt - 1)
2701
(void) fputs(" ", stdout);
2702
}
2703
2704
/* Print out any values that aren't in a column at the end */
2705
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2706
/* Did we have any columns? If so print a spacer. */
2707
if (vcdl->uniq_cols_cnt > 0)
2708
(void) fputs(" ", stdout);
2709
2710
val = data->lines[j];
2711
(void) fputs(val ?: "", stdout);
2712
}
2713
break;
2714
}
2715
}
2716
2717
/*
2718
* Print vdev initialization status for leaves
2719
*/
2720
static void
2721
print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2722
{
2723
if (verbose) {
2724
if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2725
vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2726
vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2727
!vs->vs_scan_removing) {
2728
char zbuf[1024];
2729
char tbuf[256];
2730
2731
time_t t = vs->vs_initialize_action_time;
2732
int initialize_pct = 100;
2733
if (vs->vs_initialize_state !=
2734
VDEV_INITIALIZE_COMPLETE) {
2735
initialize_pct = (vs->vs_initialize_bytes_done *
2736
100 / (vs->vs_initialize_bytes_est + 1));
2737
}
2738
2739
(void) ctime_r(&t, tbuf);
2740
tbuf[24] = 0;
2741
2742
switch (vs->vs_initialize_state) {
2743
case VDEV_INITIALIZE_SUSPENDED:
2744
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2745
gettext("suspended, started at"), tbuf);
2746
break;
2747
case VDEV_INITIALIZE_ACTIVE:
2748
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2749
gettext("started at"), tbuf);
2750
break;
2751
case VDEV_INITIALIZE_COMPLETE:
2752
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2753
gettext("completed at"), tbuf);
2754
break;
2755
}
2756
2757
(void) printf(gettext(" (%d%% initialized%s)"),
2758
initialize_pct, zbuf);
2759
} else {
2760
(void) printf(gettext(" (uninitialized)"));
2761
}
2762
} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2763
(void) printf(gettext(" (initializing)"));
2764
}
2765
}
2766
2767
/*
2768
* Print vdev TRIM status for leaves
2769
*/
2770
static void
2771
print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2772
{
2773
if (verbose) {
2774
if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2775
vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2776
vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2777
!vs->vs_scan_removing) {
2778
char zbuf[1024];
2779
char tbuf[256];
2780
2781
time_t t = vs->vs_trim_action_time;
2782
int trim_pct = 100;
2783
if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2784
trim_pct = (vs->vs_trim_bytes_done *
2785
100 / (vs->vs_trim_bytes_est + 1));
2786
}
2787
2788
(void) ctime_r(&t, tbuf);
2789
tbuf[24] = 0;
2790
2791
switch (vs->vs_trim_state) {
2792
case VDEV_TRIM_SUSPENDED:
2793
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2794
gettext("suspended, started at"), tbuf);
2795
break;
2796
case VDEV_TRIM_ACTIVE:
2797
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2798
gettext("started at"), tbuf);
2799
break;
2800
case VDEV_TRIM_COMPLETE:
2801
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2802
gettext("completed at"), tbuf);
2803
break;
2804
}
2805
2806
(void) printf(gettext(" (%d%% trimmed%s)"),
2807
trim_pct, zbuf);
2808
} else if (vs->vs_trim_notsup) {
2809
(void) printf(gettext(" (trim unsupported)"));
2810
} else {
2811
(void) printf(gettext(" (untrimmed)"));
2812
}
2813
} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2814
(void) printf(gettext(" (trimming)"));
2815
}
2816
}
2817
2818
/*
2819
* Return the color associated with a health string. This includes returning
2820
* NULL for no color change.
2821
*/
2822
static const char *
2823
health_str_to_color(const char *health)
2824
{
2825
if (strcmp(health, gettext("FAULTED")) == 0 ||
2826
strcmp(health, gettext("SUSPENDED")) == 0 ||
2827
strcmp(health, gettext("UNAVAIL")) == 0) {
2828
return (ANSI_RED);
2829
}
2830
2831
if (strcmp(health, gettext("OFFLINE")) == 0 ||
2832
strcmp(health, gettext("DEGRADED")) == 0 ||
2833
strcmp(health, gettext("REMOVED")) == 0) {
2834
return (ANSI_YELLOW);
2835
}
2836
2837
return (NULL);
2838
}
2839
2840
/*
2841
* Called for each leaf vdev. Returns 0 if the vdev is healthy.
2842
* A vdev is unhealthy if any of the following are true:
2843
* 1) there are read, write, or checksum errors,
2844
* 2) its state is not ONLINE, or
2845
* 3) slow IO reporting was requested (-s) and there are slow IOs.
2846
*/
2847
static int
2848
vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data)
2849
{
2850
status_cbdata_t *cb = data;
2851
vdev_stat_t *vs;
2852
uint_t vsc;
2853
(void) hdl_data;
2854
2855
if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2856
(uint64_t **)&vs, &vsc) != 0)
2857
return (1);
2858
2859
if (vs->vs_checksum_errors || vs->vs_read_errors ||
2860
vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY)
2861
return (1);
2862
2863
if (cb->cb_print_slow_ios && vs->vs_slow_ios)
2864
return (1);
2865
2866
return (0);
2867
}
2868
2869
/*
2870
* Print out configuration state as requested by status_callback.
2871
*/
2872
static void
2873
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2874
nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2875
{
2876
nvlist_t **child, *root;
2877
uint_t c, i, vsc, children;
2878
pool_scan_stat_t *ps = NULL;
2879
vdev_stat_t *vs;
2880
char rbuf[6], wbuf[6], cbuf[6], dbuf[6];
2881
char *vname;
2882
uint64_t notpresent;
2883
spare_cbdata_t spare_cb;
2884
const char *state;
2885
const char *type;
2886
const char *path = NULL;
2887
const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL,
2888
*scolor = NULL;
2889
2890
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2891
&child, &children) != 0)
2892
children = 0;
2893
2894
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2895
(uint64_t **)&vs, &vsc) == 0);
2896
2897
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2898
2899
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2900
return;
2901
2902
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2903
2904
if (isspare) {
2905
/*
2906
* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2907
* online drives.
2908
*/
2909
if (vs->vs_aux == VDEV_AUX_SPARED)
2910
state = gettext("INUSE");
2911
else if (vs->vs_state == VDEV_STATE_HEALTHY)
2912
state = gettext("AVAIL");
2913
}
2914
2915
/*
2916
* If '-e' is specified then top-level vdevs and their children
2917
* can be pruned if all of their leaves are healthy.
2918
*/
2919
if (cb->cb_print_unhealthy && depth > 0 &&
2920
for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
2921
return;
2922
}
2923
2924
(void) printf_color(health_str_to_color(state),
2925
"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2926
name, state);
2927
2928
if (!isspare) {
2929
if (vs->vs_read_errors)
2930
rcolor = ANSI_RED;
2931
2932
if (vs->vs_write_errors)
2933
wcolor = ANSI_RED;
2934
2935
if (vs->vs_checksum_errors)
2936
ccolor = ANSI_RED;
2937
2938
if (vs->vs_slow_ios)
2939
scolor = ANSI_BLUE;
2940
2941
if (cb->cb_literal) {
2942
(void) fputc(' ', stdout);
2943
(void) printf_color(rcolor, "%5llu",
2944
(u_longlong_t)vs->vs_read_errors);
2945
(void) fputc(' ', stdout);
2946
(void) printf_color(wcolor, "%5llu",
2947
(u_longlong_t)vs->vs_write_errors);
2948
(void) fputc(' ', stdout);
2949
(void) printf_color(ccolor, "%5llu",
2950
(u_longlong_t)vs->vs_checksum_errors);
2951
} else {
2952
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2953
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2954
zfs_nicenum(vs->vs_checksum_errors, cbuf,
2955
sizeof (cbuf));
2956
(void) fputc(' ', stdout);
2957
(void) printf_color(rcolor, "%5s", rbuf);
2958
(void) fputc(' ', stdout);
2959
(void) printf_color(wcolor, "%5s", wbuf);
2960
(void) fputc(' ', stdout);
2961
(void) printf_color(ccolor, "%5s", cbuf);
2962
}
2963
if (cb->cb_print_slow_ios) {
2964
if (children == 0) {
2965
/* Only leafs vdevs have slow IOs */
2966
zfs_nicenum(vs->vs_slow_ios, rbuf,
2967
sizeof (rbuf));
2968
} else {
2969
(void) snprintf(rbuf, sizeof (rbuf), "-");
2970
}
2971
2972
if (cb->cb_literal)
2973
(void) printf_color(scolor, " %5llu",
2974
(u_longlong_t)vs->vs_slow_ios);
2975
else
2976
(void) printf_color(scolor, " %5s", rbuf);
2977
}
2978
if (cb->cb_print_power) {
2979
if (children == 0) {
2980
/* Only leaf vdevs have physical slots */
2981
switch (zpool_power_current_state(zhp, (char *)
2982
fnvlist_lookup_string(nv,
2983
ZPOOL_CONFIG_PATH))) {
2984
case 0:
2985
(void) printf_color(ANSI_RED, " %5s",
2986
gettext("off"));
2987
break;
2988
case 1:
2989
printf(" %5s", gettext("on"));
2990
break;
2991
default:
2992
printf(" %5s", "-");
2993
}
2994
} else {
2995
printf(" %5s", "-");
2996
}
2997
}
2998
if (VDEV_STAT_VALID(vs_dio_verify_errors, vsc) &&
2999
cb->cb_print_dio_verify) {
3000
zfs_nicenum(vs->vs_dio_verify_errors, dbuf,
3001
sizeof (dbuf));
3002
3003
if (cb->cb_literal)
3004
printf(" %5llu",
3005
(u_longlong_t)vs->vs_dio_verify_errors);
3006
else
3007
printf(" %5s", dbuf);
3008
}
3009
}
3010
3011
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3012
&notpresent) == 0) {
3013
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
3014
(void) printf(" %s %s", gettext("was"), path);
3015
} else if (vs->vs_aux != 0) {
3016
(void) printf(" ");
3017
color_start(ANSI_RED);
3018
switch (vs->vs_aux) {
3019
case VDEV_AUX_OPEN_FAILED:
3020
(void) printf(gettext("cannot open"));
3021
break;
3022
3023
case VDEV_AUX_BAD_GUID_SUM:
3024
(void) printf(gettext("missing device"));
3025
break;
3026
3027
case VDEV_AUX_NO_REPLICAS:
3028
(void) printf(gettext("insufficient replicas"));
3029
break;
3030
3031
case VDEV_AUX_VERSION_NEWER:
3032
(void) printf(gettext("newer version"));
3033
break;
3034
3035
case VDEV_AUX_UNSUP_FEAT:
3036
(void) printf(gettext("unsupported feature(s)"));
3037
break;
3038
3039
case VDEV_AUX_ASHIFT_TOO_BIG:
3040
(void) printf(gettext("unsupported minimum blocksize"));
3041
break;
3042
3043
case VDEV_AUX_SPARED:
3044
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3045
&spare_cb.cb_guid) == 0);
3046
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
3047
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
3048
zpool_get_name(zhp)) == 0)
3049
(void) printf(gettext("currently in "
3050
"use"));
3051
else
3052
(void) printf(gettext("in use by "
3053
"pool '%s'"),
3054
zpool_get_name(spare_cb.cb_zhp));
3055
zpool_close(spare_cb.cb_zhp);
3056
} else {
3057
(void) printf(gettext("currently in use"));
3058
}
3059
break;
3060
3061
case VDEV_AUX_ERR_EXCEEDED:
3062
if (vs->vs_read_errors + vs->vs_write_errors +
3063
vs->vs_checksum_errors == 0 && children == 0 &&
3064
vs->vs_slow_ios > 0) {
3065
(void) printf(gettext("too many slow I/Os"));
3066
} else {
3067
(void) printf(gettext("too many errors"));
3068
}
3069
break;
3070
3071
case VDEV_AUX_IO_FAILURE:
3072
(void) printf(gettext("experienced I/O failures"));
3073
break;
3074
3075
case VDEV_AUX_BAD_LOG:
3076
(void) printf(gettext("bad intent log"));
3077
break;
3078
3079
case VDEV_AUX_EXTERNAL:
3080
(void) printf(gettext("external device fault"));
3081
break;
3082
3083
case VDEV_AUX_SPLIT_POOL:
3084
(void) printf(gettext("split into new pool"));
3085
break;
3086
3087
case VDEV_AUX_ACTIVE:
3088
(void) printf(gettext("currently in use"));
3089
break;
3090
3091
case VDEV_AUX_CHILDREN_OFFLINE:
3092
(void) printf(gettext("all children offline"));
3093
break;
3094
3095
case VDEV_AUX_BAD_LABEL:
3096
(void) printf(gettext("invalid label"));
3097
break;
3098
3099
default:
3100
(void) printf(gettext("corrupted data"));
3101
break;
3102
}
3103
color_end();
3104
} else if (children == 0 && !isspare &&
3105
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
3106
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
3107
vs->vs_configured_ashift < vs->vs_physical_ashift) {
3108
(void) printf(
3109
gettext(" block size: %dB configured, %dB native"),
3110
1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
3111
}
3112
3113
if (vs->vs_scan_removing != 0) {
3114
(void) printf(gettext(" (removing)"));
3115
} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
3116
(void) printf(gettext(" (non-allocating)"));
3117
}
3118
3119
/* The root vdev has the scrub/resilver stats */
3120
root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3121
ZPOOL_CONFIG_VDEV_TREE);
3122
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
3123
(uint64_t **)&ps, &c);
3124
3125
/*
3126
* If you force fault a drive that's resilvering, its scan stats can
3127
* get frozen in time, giving the false impression that it's
3128
* being resilvered. That's why we check the state to see if the vdev
3129
* is healthy before reporting "resilvering" or "repairing".
3130
*/
3131
if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
3132
vs->vs_state == VDEV_STATE_HEALTHY) {
3133
if (vs->vs_scan_processed != 0) {
3134
(void) printf(gettext(" (%s)"),
3135
(ps->pss_func == POOL_SCAN_RESILVER) ?
3136
"resilvering" : "repairing");
3137
} else if (vs->vs_resilver_deferred) {
3138
(void) printf(gettext(" (awaiting resilver)"));
3139
}
3140
}
3141
3142
/* The top-level vdevs have the rebuild stats */
3143
if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
3144
children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
3145
if (vs->vs_rebuild_processed != 0) {
3146
(void) printf(gettext(" (resilvering)"));
3147
}
3148
}
3149
3150
if (cb->vcdl != NULL) {
3151
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3152
printf(" ");
3153
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
3154
}
3155
}
3156
3157
/* Display vdev initialization and trim status for leaves. */
3158
if (children == 0) {
3159
print_status_initialize(vs, cb->cb_print_vdev_init);
3160
print_status_trim(vs, cb->cb_print_vdev_trim);
3161
}
3162
3163
(void) printf("\n");
3164
3165
for (c = 0; c < children; c++) {
3166
uint64_t islog = B_FALSE, ishole = B_FALSE;
3167
3168
/* Don't print logs or holes here */
3169
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3170
&islog);
3171
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3172
&ishole);
3173
if (islog || ishole)
3174
continue;
3175
/* Only print normal classes here */
3176
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3177
continue;
3178
3179
/* Provide vdev_rebuild_stats to children if available */
3180
if (vrs == NULL) {
3181
(void) nvlist_lookup_uint64_array(nv,
3182
ZPOOL_CONFIG_REBUILD_STATS,
3183
(uint64_t **)&vrs, &i);
3184
}
3185
3186
vname = zpool_vdev_name(g_zfs, zhp, child[c],
3187
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3188
print_status_config(zhp, cb, vname, child[c], depth + 2,
3189
isspare, vrs);
3190
free(vname);
3191
}
3192
}
3193
3194
/*
3195
* Print the configuration of an exported pool. Iterate over all vdevs in the
3196
* pool, printing out the name and status for each one.
3197
*/
3198
static void
3199
print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
3200
int depth)
3201
{
3202
nvlist_t **child;
3203
uint_t c, children;
3204
vdev_stat_t *vs;
3205
const char *type;
3206
char *vname;
3207
3208
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3209
if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
3210
strcmp(type, VDEV_TYPE_HOLE) == 0)
3211
return;
3212
3213
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3214
(uint64_t **)&vs, &c) == 0);
3215
3216
(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
3217
(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
3218
3219
if (vs->vs_aux != 0) {
3220
(void) printf(" ");
3221
3222
switch (vs->vs_aux) {
3223
case VDEV_AUX_OPEN_FAILED:
3224
(void) printf(gettext("cannot open"));
3225
break;
3226
3227
case VDEV_AUX_BAD_GUID_SUM:
3228
(void) printf(gettext("missing device"));
3229
break;
3230
3231
case VDEV_AUX_NO_REPLICAS:
3232
(void) printf(gettext("insufficient replicas"));
3233
break;
3234
3235
case VDEV_AUX_VERSION_NEWER:
3236
(void) printf(gettext("newer version"));
3237
break;
3238
3239
case VDEV_AUX_UNSUP_FEAT:
3240
(void) printf(gettext("unsupported feature(s)"));
3241
break;
3242
3243
case VDEV_AUX_ERR_EXCEEDED:
3244
(void) printf(gettext("too many errors"));
3245
break;
3246
3247
case VDEV_AUX_ACTIVE:
3248
(void) printf(gettext("currently in use"));
3249
break;
3250
3251
case VDEV_AUX_CHILDREN_OFFLINE:
3252
(void) printf(gettext("all children offline"));
3253
break;
3254
3255
case VDEV_AUX_BAD_LABEL:
3256
(void) printf(gettext("invalid label"));
3257
break;
3258
3259
default:
3260
(void) printf(gettext("corrupted data"));
3261
break;
3262
}
3263
}
3264
(void) printf("\n");
3265
3266
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3267
&child, &children) != 0)
3268
return;
3269
3270
for (c = 0; c < children; c++) {
3271
uint64_t is_log = B_FALSE;
3272
3273
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3274
&is_log);
3275
if (is_log)
3276
continue;
3277
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
3278
continue;
3279
3280
vname = zpool_vdev_name(g_zfs, NULL, child[c],
3281
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3282
print_import_config(cb, vname, child[c], depth + 2);
3283
free(vname);
3284
}
3285
3286
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3287
&child, &children) == 0) {
3288
(void) printf(gettext("\tcache\n"));
3289
for (c = 0; c < children; c++) {
3290
vname = zpool_vdev_name(g_zfs, NULL, child[c],
3291
cb->cb_name_flags);
3292
(void) printf("\t %s\n", vname);
3293
free(vname);
3294
}
3295
}
3296
3297
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3298
&child, &children) == 0) {
3299
(void) printf(gettext("\tspares\n"));
3300
for (c = 0; c < children; c++) {
3301
vname = zpool_vdev_name(g_zfs, NULL, child[c],
3302
cb->cb_name_flags);
3303
(void) printf("\t %s\n", vname);
3304
free(vname);
3305
}
3306
}
3307
}
3308
3309
/*
3310
* Print specialized class vdevs.
3311
*
3312
* These are recorded as top level vdevs in the main pool child array
3313
* but with "is_log" set to 1 or an "alloc_bias" string. We use either
3314
* print_status_config() or print_import_config() to print the top level
3315
* class vdevs then any of their children (eg mirrored slogs) are printed
3316
* recursively - which works because only the top level vdev is marked.
3317
*/
3318
static void
3319
print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
3320
const char *class)
3321
{
3322
uint_t c, children;
3323
nvlist_t **child;
3324
boolean_t printed = B_FALSE;
3325
3326
assert(zhp != NULL || !cb->cb_verbose);
3327
3328
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
3329
&children) != 0)
3330
return;
3331
3332
for (c = 0; c < children; c++) {
3333
uint64_t is_log = B_FALSE;
3334
const char *bias = NULL;
3335
const char *type = NULL;
3336
3337
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3338
&is_log);
3339
3340
if (is_log) {
3341
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
3342
} else {
3343
(void) nvlist_lookup_string(child[c],
3344
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
3345
(void) nvlist_lookup_string(child[c],
3346
ZPOOL_CONFIG_TYPE, &type);
3347
}
3348
3349
if (bias == NULL || strcmp(bias, class) != 0)
3350
continue;
3351
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
3352
continue;
3353
3354
if (!printed) {
3355
(void) printf("\t%s\t\n", gettext(class));
3356
printed = B_TRUE;
3357
}
3358
3359
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
3360
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
3361
if (cb->cb_print_status)
3362
print_status_config(zhp, cb, name, child[c], 2,
3363
B_FALSE, NULL);
3364
else
3365
print_import_config(cb, name, child[c], 2);
3366
free(name);
3367
}
3368
}
3369
3370
/*
3371
* Display the status for the given pool.
3372
*/
3373
static int
3374
show_import(nvlist_t *config, boolean_t report_error)
3375
{
3376
uint64_t pool_state;
3377
vdev_stat_t *vs;
3378
const char *name;
3379
uint64_t guid;
3380
uint64_t hostid = 0;
3381
const char *msgid;
3382
const char *hostname = "unknown";
3383
nvlist_t *nvroot, *nvinfo;
3384
zpool_status_t reason;
3385
zpool_errata_t errata;
3386
const char *health;
3387
uint_t vsc;
3388
const char *comment;
3389
const char *indent;
3390
char buf[2048];
3391
status_cbdata_t cb = { 0 };
3392
3393
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3394
&name) == 0);
3395
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3396
&guid) == 0);
3397
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3398
&pool_state) == 0);
3399
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3400
&nvroot) == 0);
3401
3402
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
3403
(uint64_t **)&vs, &vsc) == 0);
3404
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
3405
3406
reason = zpool_import_status(config, &msgid, &errata);
3407
3408
/*
3409
* If we're importing using a cachefile, then we won't report any
3410
* errors unless we are in the scan phase of the import.
3411
*/
3412
if (reason != ZPOOL_STATUS_OK && !report_error)
3413
return (reason);
3414
3415
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) {
3416
indent = " ";
3417
} else {
3418
comment = NULL;
3419
indent = "";
3420
}
3421
3422
(void) printf(gettext("%s pool: %s\n"), indent, name);
3423
(void) printf(gettext("%s id: %llu\n"), indent, (u_longlong_t)guid);
3424
(void) printf(gettext("%s state: %s"), indent, health);
3425
if (pool_state == POOL_STATE_DESTROYED)
3426
(void) printf(gettext(" (DESTROYED)"));
3427
(void) printf("\n");
3428
3429
if (reason != ZPOOL_STATUS_OK) {
3430
(void) printf("%s", indent);
3431
(void) printf_color(ANSI_BOLD, gettext("status: "));
3432
}
3433
switch (reason) {
3434
case ZPOOL_STATUS_MISSING_DEV_R:
3435
case ZPOOL_STATUS_MISSING_DEV_NR:
3436
case ZPOOL_STATUS_BAD_GUID_SUM:
3437
(void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3438
"are missing from the system.\n"));
3439
break;
3440
3441
case ZPOOL_STATUS_CORRUPT_LABEL_R:
3442
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
3443
(void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3444
"contains corrupted data.\n"));
3445
break;
3446
3447
case ZPOOL_STATUS_CORRUPT_DATA:
3448
(void) printf_color(ANSI_YELLOW, gettext("The pool data is "
3449
"corrupted.\n"));
3450
break;
3451
3452
case ZPOOL_STATUS_OFFLINE_DEV:
3453
(void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3454
"are offlined.\n"));
3455
break;
3456
3457
case ZPOOL_STATUS_CORRUPT_POOL:
3458
(void) printf_color(ANSI_YELLOW, gettext("The pool metadata is "
3459
"corrupted.\n"));
3460
break;
3461
3462
case ZPOOL_STATUS_VERSION_OLDER:
3463
(void) printf_color(ANSI_YELLOW, gettext("The pool is "
3464
"formatted using a legacy on-disk version.\n"));
3465
break;
3466
3467
case ZPOOL_STATUS_VERSION_NEWER:
3468
(void) printf_color(ANSI_YELLOW, gettext("The pool is "
3469
"formatted using an incompatible version.\n"));
3470
break;
3471
3472
case ZPOOL_STATUS_FEAT_DISABLED:
3473
(void) printf_color(ANSI_YELLOW, gettext("Some supported "
3474
"features are not enabled on the pool.\n"
3475
"\t%s(Note that they may be intentionally disabled if the\n"
3476
"\t%s'compatibility' property is set.)\n"), indent, indent);
3477
break;
3478
3479
case ZPOOL_STATUS_COMPATIBILITY_ERR:
3480
(void) printf_color(ANSI_YELLOW, gettext("Error reading or "
3481
"parsing the file(s) indicated by the 'compatibility'\n"
3482
"\t%sproperty.\n"), indent);
3483
break;
3484
3485
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
3486
(void) printf_color(ANSI_YELLOW, gettext("One or more features "
3487
"are enabled on the pool despite not being\n"
3488
"\t%srequested by the 'compatibility' property.\n"),
3489
indent);
3490
break;
3491
3492
case ZPOOL_STATUS_UNSUP_FEAT_READ:
3493
(void) printf_color(ANSI_YELLOW, gettext("The pool uses the "
3494
"following feature(s) not supported on this system:\n"));
3495
color_start(ANSI_YELLOW);
3496
zpool_collect_unsup_feat(config, buf, 2048);
3497
(void) printf("%s", buf);
3498
color_end();
3499
break;
3500
3501
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3502
(void) printf_color(ANSI_YELLOW, gettext("The pool can only be "
3503
"accessed in read-only mode on this system. It\n"
3504
"\t%scannot be accessed in read-write mode because it uses "
3505
"the following\n"
3506
"\t%sfeature(s) not supported on this system:\n"),
3507
indent, indent);
3508
color_start(ANSI_YELLOW);
3509
zpool_collect_unsup_feat(config, buf, 2048);
3510
(void) printf("%s", buf);
3511
color_end();
3512
break;
3513
3514
case ZPOOL_STATUS_HOSTID_ACTIVE:
3515
(void) printf_color(ANSI_YELLOW, gettext("The pool is "
3516
"currently imported by another system.\n"));
3517
break;
3518
3519
case ZPOOL_STATUS_HOSTID_REQUIRED:
3520
(void) printf_color(ANSI_YELLOW, gettext("The pool has the "
3521
"multihost property on. It cannot\n"
3522
"\t%sbe safely imported when the system hostid is not "
3523
"set.\n"), indent);
3524
break;
3525
3526
case ZPOOL_STATUS_HOSTID_MISMATCH:
3527
(void) printf_color(ANSI_YELLOW, gettext("The pool was last "
3528
"accessed by another system.\n"));
3529
break;
3530
3531
case ZPOOL_STATUS_FAULTED_DEV_R:
3532
case ZPOOL_STATUS_FAULTED_DEV_NR:
3533
(void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3534
"are faulted.\n"));
3535
break;
3536
3537
case ZPOOL_STATUS_BAD_LOG:
3538
(void) printf_color(ANSI_YELLOW, gettext("An intent log record "
3539
"cannot be read.\n"));
3540
break;
3541
3542
case ZPOOL_STATUS_RESILVERING:
3543
case ZPOOL_STATUS_REBUILDING:
3544
(void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3545
"were being resilvered.\n"));
3546
break;
3547
3548
case ZPOOL_STATUS_ERRATA:
3549
(void) printf_color(ANSI_YELLOW,
3550
gettext("Errata #%d detected.\n"),
3551
errata);
3552
break;
3553
3554
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
3555
(void) printf_color(ANSI_YELLOW, gettext("One or more devices "
3556
"are configured to use a non-native block size.\n"
3557
"\t%sExpect reduced performance.\n"), indent);
3558
break;
3559
3560
default:
3561
/*
3562
* No other status can be seen when importing pools.
3563
*/
3564
assert(reason == ZPOOL_STATUS_OK);
3565
}
3566
3567
/*
3568
* Print out an action according to the overall state of the pool.
3569
*/
3570
if (vs->vs_state != VDEV_STATE_HEALTHY ||
3571
reason != ZPOOL_STATUS_ERRATA || errata != ZPOOL_ERRATA_NONE) {
3572
(void) printf("%s", indent);
3573
(void) printf(gettext("action: "));
3574
}
3575
if (vs->vs_state == VDEV_STATE_HEALTHY) {
3576
if (reason == ZPOOL_STATUS_VERSION_OLDER ||
3577
reason == ZPOOL_STATUS_FEAT_DISABLED) {
3578
(void) printf(gettext("The pool can be imported using "
3579
"its name or numeric identifier, though\n"
3580
"\t%ssome features will not be available without "
3581
"an explicit 'zpool upgrade'.\n"), indent);
3582
} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
3583
(void) printf(gettext("The pool can be imported using "
3584
"its name or numeric\n"
3585
"\t%sidentifier, though the file(s) indicated by "
3586
"its 'compatibility'\n"
3587
"\t%sproperty cannot be parsed at this time.\n"),
3588
indent, indent);
3589
} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
3590
(void) printf(gettext("The pool can be imported using "
3591
"its name or numeric identifier and\n"
3592
"\t%sthe '-f' flag.\n"), indent);
3593
} else if (reason == ZPOOL_STATUS_ERRATA) {
3594
switch (errata) {
3595
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
3596
(void) printf(gettext("The pool can be "
3597
"imported using its name or numeric "
3598
"identifier,\n"
3599
"\t%showever there is a compatibility "
3600
"issue which should be corrected\n"
3601
"\t%sby running 'zpool scrub'\n"),
3602
indent, indent);
3603
break;
3604
3605
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
3606
(void) printf(gettext("The pool cannot be "
3607
"imported with this version of ZFS due to\n"
3608
"\t%san active asynchronous destroy. "
3609
"Revert to an earlier version\n"
3610
"\t%sand allow the destroy to complete "
3611
"before updating.\n"), indent, indent);
3612
break;
3613
3614
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
3615
(void) printf(gettext("Existing encrypted "
3616
"datasets contain an on-disk "
3617
"incompatibility, which\n"
3618
"\t%sneeds to be corrected. Backup these "
3619
"datasets to new encrypted datasets\n"
3620
"\t%sand destroy the old ones.\n"),
3621
indent, indent);
3622
break;
3623
3624
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
3625
(void) printf(gettext("Existing encrypted "
3626
"snapshots and bookmarks contain an "
3627
"on-disk\n"
3628
"\t%sincompatibility. This may cause "
3629
"on-disk corruption if they are used\n"
3630
"\t%swith 'zfs recv'. To correct the "
3631
"issue, enable the bookmark_v2 feature.\n"
3632
"\t%sNo additional action is needed if "
3633
"there are no encrypted snapshots or\n"
3634
"\t%sbookmarks. If preserving the "
3635
"encrypted snapshots and bookmarks is\n"
3636
"\t%srequired, use a non-raw send to "
3637
"backup and restore them. Alternately,\n"
3638
"\t%sthey may be removed to resolve the "
3639
"incompatibility.\n"), indent, indent,
3640
indent, indent, indent, indent);
3641
break;
3642
default:
3643
/*
3644
* All errata must contain an action message.
3645
*/
3646
assert(errata == ZPOOL_ERRATA_NONE);
3647
}
3648
} else {
3649
(void) printf(gettext("The pool can be imported using "
3650
"its name or numeric identifier.\n"));
3651
}
3652
} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
3653
(void) printf(gettext("The pool can be imported despite "
3654
"missing or damaged devices. The\n"
3655
"\t%sfault tolerance of the pool may be compromised if "
3656
"imported.\n"), indent);
3657
} else {
3658
switch (reason) {
3659
case ZPOOL_STATUS_VERSION_NEWER:
3660
(void) printf(gettext("The pool cannot be imported. "
3661
"Access the pool on a system running newer\n"
3662
"\t%ssoftware, or recreate the pool from "
3663
"backup.\n"), indent);
3664
break;
3665
case ZPOOL_STATUS_UNSUP_FEAT_READ:
3666
(void) printf(gettext("The pool cannot be imported. "
3667
"Access the pool on a system that supports\n"
3668
"\t%sthe required feature(s), or recreate the pool "
3669
"from backup.\n"), indent);
3670
break;
3671
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3672
(void) printf(gettext("The pool cannot be imported in "
3673
"read-write mode. Import the pool with\n"
3674
"\t%s'-o readonly=on', access the pool on a system "
3675
"that supports the\n"
3676
"\t%srequired feature(s), or recreate the pool "
3677
"from backup.\n"), indent, indent);
3678
break;
3679
case ZPOOL_STATUS_MISSING_DEV_R:
3680
case ZPOOL_STATUS_MISSING_DEV_NR:
3681
case ZPOOL_STATUS_BAD_GUID_SUM:
3682
(void) printf(gettext("The pool cannot be imported. "
3683
"Attach the missing\n"
3684
"\t%sdevices and try again.\n"), indent);
3685
break;
3686
case ZPOOL_STATUS_HOSTID_ACTIVE:
3687
VERIFY0(nvlist_lookup_nvlist(config,
3688
ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3689
3690
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3691
hostname = fnvlist_lookup_string(nvinfo,
3692
ZPOOL_CONFIG_MMP_HOSTNAME);
3693
3694
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3695
hostid = fnvlist_lookup_uint64(nvinfo,
3696
ZPOOL_CONFIG_MMP_HOSTID);
3697
3698
(void) printf(gettext("The pool must be exported from "
3699
"%s (hostid=%"PRIx64")\n"
3700
"\t%sbefore it can be safely imported.\n"),
3701
hostname, hostid, indent);
3702
break;
3703
case ZPOOL_STATUS_HOSTID_REQUIRED:
3704
(void) printf(gettext("Set a unique system hostid with "
3705
"the zgenhostid(8) command.\n"));
3706
break;
3707
default:
3708
(void) printf(gettext("The pool cannot be imported due "
3709
"to damaged devices or data.\n"));
3710
}
3711
}
3712
3713
/* Print the comment attached to the pool. */
3714
if (comment != NULL)
3715
(void) printf(gettext("comment: %s\n"), comment);
3716
3717
/*
3718
* If the state is "closed" or "can't open", and the aux state
3719
* is "corrupt data":
3720
*/
3721
if ((vs->vs_state == VDEV_STATE_CLOSED ||
3722
vs->vs_state == VDEV_STATE_CANT_OPEN) &&
3723
vs->vs_aux == VDEV_AUX_CORRUPT_DATA) {
3724
if (pool_state == POOL_STATE_DESTROYED)
3725
(void) printf(gettext("\t%sThe pool was destroyed, "
3726
"but can be imported using the '-Df' flags.\n"),
3727
indent);
3728
else if (pool_state != POOL_STATE_EXPORTED)
3729
(void) printf(gettext("\t%sThe pool may be active on "
3730
"another system, but can be imported using\n"
3731
"\t%sthe '-f' flag.\n"), indent, indent);
3732
}
3733
3734
if (msgid != NULL) {
3735
(void) printf(gettext("%s see: "
3736
"https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3737
indent, msgid);
3738
}
3739
3740
(void) printf(gettext("%sconfig:\n\n"), indent);
3741
3742
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3743
VDEV_NAME_TYPE_ID);
3744
if (cb.cb_namewidth < 10)
3745
cb.cb_namewidth = 10;
3746
3747
print_import_config(&cb, name, nvroot, 0);
3748
3749
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3750
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3751
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3752
3753
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3754
(void) printf(gettext("\n\t%sAdditional devices are known to "
3755
"be part of this pool, though their\n"
3756
"\t%sexact configuration cannot be determined.\n"),
3757
indent, indent);
3758
}
3759
return (0);
3760
}
3761
3762
static boolean_t
3763
zfs_force_import_required(nvlist_t *config)
3764
{
3765
uint64_t state;
3766
uint64_t hostid = 0;
3767
nvlist_t *nvinfo;
3768
3769
state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3770
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3771
3772
/*
3773
* The hostid on LOAD_INFO comes from the MOS label via
3774
* spa_tryimport(). If its not there then we're likely talking to an
3775
* older kernel, so use the top one, which will be from the label
3776
* discovered in zpool_find_import(), or if a cachefile is in use, the
3777
* local hostid.
3778
*/
3779
if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0)
3780
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
3781
&hostid);
3782
3783
if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3784
return (B_TRUE);
3785
3786
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3787
mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3788
ZPOOL_CONFIG_MMP_STATE);
3789
3790
if (mmp_state != MMP_STATE_INACTIVE)
3791
return (B_TRUE);
3792
}
3793
3794
return (B_FALSE);
3795
}
3796
3797
/*
3798
* Perform the import for the given configuration. This passes the heavy
3799
* lifting off to zpool_import_props(), and then mounts the datasets contained
3800
* within the pool.
3801
*/
3802
static int
3803
do_import(nvlist_t *config, const char *newname, const char *mntopts,
3804
nvlist_t *props, int flags, uint_t mntthreads)
3805
{
3806
int ret = 0;
3807
int ms_status = 0;
3808
zpool_handle_t *zhp;
3809
const char *name;
3810
uint64_t version;
3811
3812
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3813
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3814
3815
if (!SPA_VERSION_IS_SUPPORTED(version)) {
3816
(void) fprintf(stderr, gettext("cannot import '%s': pool "
3817
"is formatted using an unsupported ZFS version\n"), name);
3818
return (1);
3819
} else if (zfs_force_import_required(config) &&
3820
!(flags & ZFS_IMPORT_ANY_HOST)) {
3821
mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3822
nvlist_t *nvinfo;
3823
3824
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3825
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3826
mmp_state = fnvlist_lookup_uint64(nvinfo,
3827
ZPOOL_CONFIG_MMP_STATE);
3828
3829
if (mmp_state == MMP_STATE_ACTIVE) {
3830
const char *hostname = "<unknown>";
3831
uint64_t hostid = 0;
3832
3833
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3834
hostname = fnvlist_lookup_string(nvinfo,
3835
ZPOOL_CONFIG_MMP_HOSTNAME);
3836
3837
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3838
hostid = fnvlist_lookup_uint64(nvinfo,
3839
ZPOOL_CONFIG_MMP_HOSTID);
3840
3841
(void) fprintf(stderr, gettext("cannot import '%s': "
3842
"pool is imported on %s (hostid: "
3843
"0x%"PRIx64")\nExport the pool on the other "
3844
"system, then run 'zpool import'.\n"),
3845
name, hostname, hostid);
3846
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
3847
(void) fprintf(stderr, gettext("Cannot import '%s': "
3848
"pool has the multihost property on and the\n"
3849
"system's hostid is not set. Set a unique hostid "
3850
"with the zgenhostid(8) command.\n"), name);
3851
} else {
3852
const char *hostname = "<unknown>";
3853
time_t timestamp = 0;
3854
uint64_t hostid = 0;
3855
3856
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME))
3857
hostname = fnvlist_lookup_string(nvinfo,
3858
ZPOOL_CONFIG_HOSTNAME);
3859
else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3860
hostname = fnvlist_lookup_string(config,
3861
ZPOOL_CONFIG_HOSTNAME);
3862
3863
if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3864
timestamp = fnvlist_lookup_uint64(config,
3865
ZPOOL_CONFIG_TIMESTAMP);
3866
3867
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID))
3868
hostid = fnvlist_lookup_uint64(nvinfo,
3869
ZPOOL_CONFIG_HOSTID);
3870
else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3871
hostid = fnvlist_lookup_uint64(config,
3872
ZPOOL_CONFIG_HOSTID);
3873
3874
(void) fprintf(stderr, gettext("cannot import '%s': "
3875
"pool was previously in use from another system.\n"
3876
"Last accessed by %s (hostid=%"PRIx64") at %s"
3877
"The pool can be imported, use 'zpool import -f' "
3878
"to import the pool.\n"), name, hostname,
3879
hostid, ctime(&timestamp));
3880
}
3881
3882
return (1);
3883
}
3884
3885
if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3886
return (1);
3887
3888
if (newname != NULL)
3889
name = newname;
3890
3891
if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3892
return (1);
3893
3894
/*
3895
* Loading keys is best effort. We don't want to return immediately
3896
* if it fails but we do want to give the error to the caller.
3897
*/
3898
if (flags & ZFS_IMPORT_LOAD_KEYS &&
3899
zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3900
ret = 1;
3901
3902
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3903
!(flags & ZFS_IMPORT_ONLY)) {
3904
ms_status = zpool_enable_datasets(zhp, mntopts, 0, mntthreads);
3905
if (ms_status == EZFS_SHAREFAILED) {
3906
(void) fprintf(stderr, gettext("Import was "
3907
"successful, but unable to share some datasets\n"));
3908
} else if (ms_status == EZFS_MOUNTFAILED) {
3909
(void) fprintf(stderr, gettext("Import was "
3910
"successful, but unable to mount some datasets\n"));
3911
}
3912
}
3913
3914
zpool_close(zhp);
3915
return (ret);
3916
}
3917
3918
typedef struct import_parameters {
3919
nvlist_t *ip_config;
3920
const char *ip_mntopts;
3921
nvlist_t *ip_props;
3922
int ip_flags;
3923
uint_t ip_mntthreads;
3924
int *ip_err;
3925
} import_parameters_t;
3926
3927
static void
3928
do_import_task(void *arg)
3929
{
3930
import_parameters_t *ip = arg;
3931
*ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts,
3932
ip->ip_props, ip->ip_flags, ip->ip_mntthreads);
3933
free(ip);
3934
}
3935
3936
3937
static int
3938
import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3939
char *orig_name, char *new_name, importargs_t *import)
3940
{
3941
nvlist_t *config = NULL;
3942
nvlist_t *found_config = NULL;
3943
uint64_t pool_state;
3944
boolean_t pool_specified = (import->poolname != NULL ||
3945
import->guid != 0);
3946
uint_t npools = 0;
3947
3948
3949
taskq_t *tq = NULL;
3950
if (import->do_all) {
3951
tq = taskq_create("zpool_import_all",
3952
5 * sysconf(_SC_NPROCESSORS_ONLN), minclsyspri, 1, INT_MAX,
3953
TASKQ_DYNAMIC);
3954
}
3955
3956
/*
3957
* At this point we have a list of import candidate configs. Even if
3958
* we were searching by pool name or guid, we still need to
3959
* post-process the list to deal with pool state and possible
3960
* duplicate names.
3961
*/
3962
int err = 0;
3963
nvpair_t *elem = NULL;
3964
boolean_t first = B_TRUE;
3965
if (!pool_specified && import->do_all) {
3966
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL)
3967
npools++;
3968
}
3969
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3970
3971
verify(nvpair_value_nvlist(elem, &config) == 0);
3972
3973
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3974
&pool_state) == 0);
3975
if (!import->do_destroyed &&
3976
pool_state == POOL_STATE_DESTROYED)
3977
continue;
3978
if (import->do_destroyed &&
3979
pool_state != POOL_STATE_DESTROYED)
3980
continue;
3981
3982
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3983
import->policy) == 0);
3984
3985
if (!pool_specified) {
3986
if (first)
3987
first = B_FALSE;
3988
else if (!import->do_all)
3989
(void) fputc('\n', stdout);
3990
3991
if (import->do_all) {
3992
import_parameters_t *ip = safe_malloc(
3993
sizeof (import_parameters_t));
3994
3995
ip->ip_config = config;
3996
ip->ip_mntopts = mntopts;
3997
ip->ip_props = props;
3998
ip->ip_flags = flags;
3999
ip->ip_mntthreads = mount_tp_nthr / npools;
4000
ip->ip_err = &err;
4001
4002
(void) taskq_dispatch(tq, do_import_task,
4003
(void *)ip, TQ_SLEEP);
4004
} else {
4005
/*
4006
* If we're importing from cachefile, then
4007
* we don't want to report errors until we
4008
* are in the scan phase of the import. If
4009
* we get an error, then we return that error
4010
* to invoke the scan phase.
4011
*/
4012
if (import->cachefile && !import->scan)
4013
err = show_import(config, B_FALSE);
4014
else
4015
(void) show_import(config, B_TRUE);
4016
}
4017
} else if (import->poolname != NULL) {
4018
const char *name;
4019
4020
/*
4021
* We are searching for a pool based on name.
4022
*/
4023
verify(nvlist_lookup_string(config,
4024
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
4025
4026
if (strcmp(name, import->poolname) == 0) {
4027
if (found_config != NULL) {
4028
(void) fprintf(stderr, gettext(
4029
"cannot import '%s': more than "
4030
"one matching pool\n"),
4031
import->poolname);
4032
(void) fprintf(stderr, gettext(
4033
"import by numeric ID instead\n"));
4034
err = B_TRUE;
4035
}
4036
found_config = config;
4037
}
4038
} else {
4039
uint64_t guid;
4040
4041
/*
4042
* Search for a pool by guid.
4043
*/
4044
verify(nvlist_lookup_uint64(config,
4045
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
4046
4047
if (guid == import->guid)
4048
found_config = config;
4049
}
4050
}
4051
if (import->do_all) {
4052
taskq_wait(tq);
4053
taskq_destroy(tq);
4054
}
4055
4056
/*
4057
* If we were searching for a specific pool, verify that we found a
4058
* pool, and then do the import.
4059
*/
4060
if (pool_specified && err == 0) {
4061
if (found_config == NULL) {
4062
(void) fprintf(stderr, gettext("cannot import '%s': "
4063
"no such pool available\n"), orig_name);
4064
err = B_TRUE;
4065
} else {
4066
err |= do_import(found_config, new_name,
4067
mntopts, props, flags, mount_tp_nthr);
4068
}
4069
}
4070
4071
/*
4072
* If we were just looking for pools, report an error if none were
4073
* found.
4074
*/
4075
if (!pool_specified && first)
4076
(void) fprintf(stderr,
4077
gettext("no pools available to import\n"));
4078
return (err);
4079
}
4080
4081
typedef struct target_exists_args {
4082
const char *poolname;
4083
uint64_t poolguid;
4084
} target_exists_args_t;
4085
4086
static int
4087
name_or_guid_exists(zpool_handle_t *zhp, void *data)
4088
{
4089
target_exists_args_t *args = data;
4090
nvlist_t *config = zpool_get_config(zhp, NULL);
4091
int found = 0;
4092
4093
if (config == NULL)
4094
return (0);
4095
4096
if (args->poolname != NULL) {
4097
const char *pool_name;
4098
4099
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
4100
&pool_name) == 0);
4101
if (strcmp(pool_name, args->poolname) == 0)
4102
found = 1;
4103
} else {
4104
uint64_t pool_guid;
4105
4106
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
4107
&pool_guid) == 0);
4108
if (pool_guid == args->poolguid)
4109
found = 1;
4110
}
4111
zpool_close(zhp);
4112
4113
return (found);
4114
}
4115
/*
4116
* zpool checkpoint <pool>
4117
* checkpoint --discard <pool>
4118
*
4119
* -d Discard the checkpoint from a checkpointed
4120
* --discard pool.
4121
*
4122
* -w Wait for discarding a checkpoint to complete.
4123
* --wait
4124
*
4125
* Checkpoints the specified pool, by taking a "snapshot" of its
4126
* current state. A pool can only have one checkpoint at a time.
4127
*/
4128
int
4129
zpool_do_checkpoint(int argc, char **argv)
4130
{
4131
boolean_t discard, wait;
4132
char *pool;
4133
zpool_handle_t *zhp;
4134
int c, err;
4135
4136
struct option long_options[] = {
4137
{"discard", no_argument, NULL, 'd'},
4138
{"wait", no_argument, NULL, 'w'},
4139
{0, 0, 0, 0}
4140
};
4141
4142
discard = B_FALSE;
4143
wait = B_FALSE;
4144
while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
4145
switch (c) {
4146
case 'd':
4147
discard = B_TRUE;
4148
break;
4149
case 'w':
4150
wait = B_TRUE;
4151
break;
4152
case '?':
4153
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
4154
optopt);
4155
usage(B_FALSE);
4156
}
4157
}
4158
4159
if (wait && !discard) {
4160
(void) fprintf(stderr, gettext("--wait only valid when "
4161
"--discard also specified\n"));
4162
usage(B_FALSE);
4163
}
4164
4165
argc -= optind;
4166
argv += optind;
4167
4168
if (argc < 1) {
4169
(void) fprintf(stderr, gettext("missing pool argument\n"));
4170
usage(B_FALSE);
4171
}
4172
4173
if (argc > 1) {
4174
(void) fprintf(stderr, gettext("too many arguments\n"));
4175
usage(B_FALSE);
4176
}
4177
4178
pool = argv[0];
4179
4180
if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
4181
/* As a special case, check for use of '/' in the name */
4182
if (strchr(pool, '/') != NULL)
4183
(void) fprintf(stderr, gettext("'zpool checkpoint' "
4184
"doesn't work on datasets. To save the state "
4185
"of a dataset from a specific point in time "
4186
"please use 'zfs snapshot'\n"));
4187
return (1);
4188
}
4189
4190
if (discard) {
4191
err = (zpool_discard_checkpoint(zhp) != 0);
4192
if (err == 0 && wait)
4193
err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
4194
} else {
4195
err = (zpool_checkpoint(zhp) != 0);
4196
}
4197
4198
zpool_close(zhp);
4199
4200
return (err);
4201
}
4202
4203
#define CHECKPOINT_OPT 1024
4204
4205
/*
4206
* zpool prefetch [-t <type>] <pool>
4207
*
4208
* Prefetchs a particular type of data in the specified pool.
4209
*/
4210
int
4211
zpool_do_prefetch(int argc, char **argv)
4212
{
4213
int c;
4214
char *poolname;
4215
char *typestr = NULL;
4216
zpool_prefetch_type_t type;
4217
zpool_handle_t *zhp;
4218
int err = 0;
4219
4220
while ((c = getopt(argc, argv, "t:")) != -1) {
4221
switch (c) {
4222
case 't':
4223
typestr = optarg;
4224
break;
4225
case ':':
4226
(void) fprintf(stderr, gettext("missing argument for "
4227
"'%c' option\n"), optopt);
4228
usage(B_FALSE);
4229
break;
4230
case '?':
4231
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
4232
optopt);
4233
usage(B_FALSE);
4234
}
4235
}
4236
argc -= optind;
4237
argv += optind;
4238
4239
if (argc < 1) {
4240
(void) fprintf(stderr, gettext("missing pool name argument\n"));
4241
usage(B_FALSE);
4242
}
4243
4244
if (argc > 1) {
4245
(void) fprintf(stderr, gettext("too many arguments\n"));
4246
usage(B_FALSE);
4247
}
4248
4249
poolname = argv[0];
4250
4251
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
4252
return (1);
4253
4254
if (typestr == NULL) {
4255
/* Prefetch all types */
4256
err = zpool_prefetch(zhp, ZPOOL_PREFETCH_DDT);
4257
if (err == 0)
4258
err = zpool_prefetch(zhp, ZPOOL_PREFETCH_BRT);
4259
} else {
4260
if (strcmp(typestr, "ddt") == 0) {
4261
type = ZPOOL_PREFETCH_DDT;
4262
} else if (strcmp(typestr, "brt") == 0) {
4263
type = ZPOOL_PREFETCH_BRT;
4264
} else {
4265
(void) fprintf(stderr,
4266
gettext("unsupported prefetch type\n"));
4267
zpool_close(zhp);
4268
usage(B_FALSE);
4269
}
4270
err = zpool_prefetch(zhp, type);
4271
}
4272
4273
zpool_close(zhp);
4274
4275
return (err);
4276
}
4277
4278
/*
4279
* zpool import [-d dir] [-D]
4280
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4281
* [-d dir | -c cachefile | -s] [-f] -a
4282
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
4283
* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
4284
* [newpool]
4285
*
4286
* -c Read pool information from a cachefile instead of searching
4287
* devices. If importing from a cachefile config fails, then
4288
* fallback to searching for devices only in the directories that
4289
* exist in the cachefile.
4290
*
4291
* -d Scan in a specific directory, other than /dev/. More than
4292
* one directory can be specified using multiple '-d' options.
4293
*
4294
* -D Scan for previously destroyed pools or import all or only
4295
* specified destroyed pools.
4296
*
4297
* -R Temporarily import the pool, with all mountpoints relative to
4298
* the given root. The pool will remain exported when the machine
4299
* is rebooted.
4300
*
4301
* -V Import even in the presence of faulted vdevs. This is an
4302
* intentionally undocumented option for testing purposes, and
4303
* treats the pool configuration as complete, leaving any bad
4304
* vdevs in the FAULTED state. In other words, it does verbatim
4305
* import.
4306
*
4307
* -f Force import, even if it appears that the pool is active.
4308
*
4309
* -F Attempt rewind if necessary.
4310
*
4311
* -n See if rewind would work, but don't actually rewind.
4312
*
4313
* -N Import the pool but don't mount datasets.
4314
*
4315
* -T Specify a starting txg to use for import. This option is
4316
* intentionally undocumented option for testing purposes.
4317
*
4318
* -a Import all pools found.
4319
*
4320
* -l Load encryption keys while importing.
4321
*
4322
* -o Set property=value and/or temporary mount options (without '=').
4323
*
4324
* -s Scan using the default search path, the libblkid cache will
4325
* not be consulted.
4326
*
4327
* --rewind-to-checkpoint
4328
* Import the pool and revert back to the checkpoint.
4329
*
4330
* The import command scans for pools to import, and import pools based on pool
4331
* name and GUID. The pool can also be renamed as part of the import process.
4332
*/
4333
int
4334
zpool_do_import(int argc, char **argv)
4335
{
4336
char **searchdirs = NULL;
4337
char *env, *envdup = NULL;
4338
int nsearch = 0;
4339
int c;
4340
int err = 0;
4341
nvlist_t *pools = NULL;
4342
boolean_t do_all = B_FALSE;
4343
boolean_t do_destroyed = B_FALSE;
4344
char *mntopts = NULL;
4345
uint64_t searchguid = 0;
4346
char *searchname = NULL;
4347
char *propval;
4348
nvlist_t *policy = NULL;
4349
nvlist_t *props = NULL;
4350
int flags = ZFS_IMPORT_NORMAL;
4351
uint32_t rewind_policy = ZPOOL_NO_REWIND;
4352
boolean_t dryrun = B_FALSE;
4353
boolean_t do_rewind = B_FALSE;
4354
boolean_t xtreme_rewind = B_FALSE;
4355
boolean_t do_scan = B_FALSE;
4356
boolean_t pool_exists = B_FALSE;
4357
uint64_t txg = -1ULL;
4358
char *cachefile = NULL;
4359
importargs_t idata = { 0 };
4360
char *endptr;
4361
4362
struct option long_options[] = {
4363
{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
4364
{0, 0, 0, 0}
4365
};
4366
4367
/* check options */
4368
while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
4369
long_options, NULL)) != -1) {
4370
switch (c) {
4371
case 'a':
4372
do_all = B_TRUE;
4373
break;
4374
case 'c':
4375
cachefile = optarg;
4376
break;
4377
case 'd':
4378
searchdirs = safe_realloc(searchdirs,
4379
(nsearch + 1) * sizeof (char *));
4380
searchdirs[nsearch++] = optarg;
4381
break;
4382
case 'D':
4383
do_destroyed = B_TRUE;
4384
break;
4385
case 'f':
4386
flags |= ZFS_IMPORT_ANY_HOST;
4387
break;
4388
case 'F':
4389
do_rewind = B_TRUE;
4390
break;
4391
case 'l':
4392
flags |= ZFS_IMPORT_LOAD_KEYS;
4393
break;
4394
case 'm':
4395
flags |= ZFS_IMPORT_MISSING_LOG;
4396
break;
4397
case 'n':
4398
dryrun = B_TRUE;
4399
break;
4400
case 'N':
4401
flags |= ZFS_IMPORT_ONLY;
4402
break;
4403
case 'o':
4404
if ((propval = strchr(optarg, '=')) != NULL) {
4405
*propval = '\0';
4406
propval++;
4407
if (add_prop_list(optarg, propval,
4408
&props, B_TRUE))
4409
goto error;
4410
} else {
4411
mntopts = optarg;
4412
}
4413
break;
4414
case 'R':
4415
if (add_prop_list(zpool_prop_to_name(
4416
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
4417
goto error;
4418
if (add_prop_list_default(zpool_prop_to_name(
4419
ZPOOL_PROP_CACHEFILE), "none", &props))
4420
goto error;
4421
break;
4422
case 's':
4423
do_scan = B_TRUE;
4424
break;
4425
case 't':
4426
flags |= ZFS_IMPORT_TEMP_NAME;
4427
if (add_prop_list_default(zpool_prop_to_name(
4428
ZPOOL_PROP_CACHEFILE), "none", &props))
4429
goto error;
4430
break;
4431
4432
case 'T':
4433
errno = 0;
4434
txg = strtoull(optarg, &endptr, 0);
4435
if (errno != 0 || *endptr != '\0') {
4436
(void) fprintf(stderr,
4437
gettext("invalid txg value\n"));
4438
usage(B_FALSE);
4439
}
4440
rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
4441
break;
4442
case 'V':
4443
flags |= ZFS_IMPORT_VERBATIM;
4444
break;
4445
case 'X':
4446
xtreme_rewind = B_TRUE;
4447
break;
4448
case CHECKPOINT_OPT:
4449
flags |= ZFS_IMPORT_CHECKPOINT;
4450
break;
4451
case ':':
4452
(void) fprintf(stderr, gettext("missing argument for "
4453
"'%c' option\n"), optopt);
4454
usage(B_FALSE);
4455
break;
4456
case '?':
4457
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
4458
optopt);
4459
usage(B_FALSE);
4460
}
4461
}
4462
4463
argc -= optind;
4464
argv += optind;
4465
4466
if (cachefile && nsearch != 0) {
4467
(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
4468
usage(B_FALSE);
4469
}
4470
4471
if (cachefile && do_scan) {
4472
(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
4473
usage(B_FALSE);
4474
}
4475
4476
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
4477
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
4478
usage(B_FALSE);
4479
}
4480
4481
if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
4482
(void) fprintf(stderr, gettext("-l is only meaningful during "
4483
"an import\n"));
4484
usage(B_FALSE);
4485
}
4486
4487
if ((dryrun || xtreme_rewind) && !do_rewind) {
4488
(void) fprintf(stderr,
4489
gettext("-n or -X only meaningful with -F\n"));
4490
usage(B_FALSE);
4491
}
4492
if (dryrun)
4493
rewind_policy = ZPOOL_TRY_REWIND;
4494
else if (do_rewind)
4495
rewind_policy = ZPOOL_DO_REWIND;
4496
if (xtreme_rewind)
4497
rewind_policy |= ZPOOL_EXTREME_REWIND;
4498
4499
/* In the future, we can capture further policy and include it here */
4500
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
4501
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
4502
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
4503
rewind_policy) != 0)
4504
goto error;
4505
4506
/* check argument count */
4507
if (do_all) {
4508
if (argc != 0) {
4509
(void) fprintf(stderr, gettext("too many arguments\n"));
4510
usage(B_FALSE);
4511
}
4512
} else {
4513
if (argc > 2) {
4514
(void) fprintf(stderr, gettext("too many arguments\n"));
4515
usage(B_FALSE);
4516
}
4517
}
4518
4519
/*
4520
* Check for the effective uid. We do this explicitly here because
4521
* otherwise any attempt to discover pools will silently fail.
4522
*/
4523
if (argc == 0 && geteuid() != 0) {
4524
(void) fprintf(stderr, gettext("cannot "
4525
"discover pools: permission denied\n"));
4526
4527
free(searchdirs);
4528
nvlist_free(props);
4529
nvlist_free(policy);
4530
return (1);
4531
}
4532
4533
/*
4534
* Depending on the arguments given, we do one of the following:
4535
*
4536
* <none> Iterate through all pools and display information about
4537
* each one.
4538
*
4539
* -a Iterate through all pools and try to import each one.
4540
*
4541
* <id> Find the pool that corresponds to the given GUID/pool
4542
* name and import that one.
4543
*
4544
* -D Above options applies only to destroyed pools.
4545
*/
4546
if (argc != 0) {
4547
char *endptr;
4548
4549
errno = 0;
4550
searchguid = strtoull(argv[0], &endptr, 10);
4551
if (errno != 0 || *endptr != '\0') {
4552
searchname = argv[0];
4553
searchguid = 0;
4554
}
4555
4556
/*
4557
* User specified a name or guid. Ensure it's unique.
4558
*/
4559
target_exists_args_t search = {searchname, searchguid};
4560
pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
4561
}
4562
4563
/*
4564
* Check the environment for the preferred search path.
4565
*/
4566
if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
4567
char *dir, *tmp = NULL;
4568
4569
envdup = strdup(env);
4570
4571
for (dir = strtok_r(envdup, ":", &tmp);
4572
dir != NULL;
4573
dir = strtok_r(NULL, ":", &tmp)) {
4574
searchdirs = safe_realloc(searchdirs,
4575
(nsearch + 1) * sizeof (char *));
4576
searchdirs[nsearch++] = dir;
4577
}
4578
}
4579
4580
idata.path = searchdirs;
4581
idata.paths = nsearch;
4582
idata.poolname = searchname;
4583
idata.guid = searchguid;
4584
idata.cachefile = cachefile;
4585
idata.scan = do_scan;
4586
idata.policy = policy;
4587
idata.do_destroyed = do_destroyed;
4588
idata.do_all = do_all;
4589
4590
libpc_handle_t lpch = {
4591
.lpc_lib_handle = g_zfs,
4592
.lpc_ops = &libzfs_config_ops,
4593
.lpc_printerr = B_TRUE
4594
};
4595
pools = zpool_search_import(&lpch, &idata);
4596
4597
if (pools != NULL && pool_exists &&
4598
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
4599
(void) fprintf(stderr, gettext("cannot import '%s': "
4600
"a pool with that name already exists\n"),
4601
argv[0]);
4602
(void) fprintf(stderr, gettext("use the form '%s "
4603
"<pool | id> <newpool>' to give it a new name\n"),
4604
"zpool import");
4605
err = 1;
4606
} else if (pools == NULL && pool_exists) {
4607
(void) fprintf(stderr, gettext("cannot import '%s': "
4608
"a pool with that name is already created/imported,\n"),
4609
argv[0]);
4610
(void) fprintf(stderr, gettext("and no additional pools "
4611
"with that name were found\n"));
4612
err = 1;
4613
} else if (pools == NULL) {
4614
if (argc != 0) {
4615
(void) fprintf(stderr, gettext("cannot import '%s': "
4616
"no such pool available\n"), argv[0]);
4617
}
4618
err = 1;
4619
}
4620
4621
if (err == 1) {
4622
free(searchdirs);
4623
free(envdup);
4624
nvlist_free(policy);
4625
nvlist_free(pools);
4626
nvlist_free(props);
4627
return (1);
4628
}
4629
4630
err = import_pools(pools, props, mntopts, flags,
4631
argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata);
4632
4633
/*
4634
* If we're using the cachefile and we failed to import, then
4635
* fallback to scanning the directory for pools that match
4636
* those in the cachefile.
4637
*/
4638
if (err != 0 && cachefile != NULL) {
4639
(void) printf(gettext("cachefile import failed, retrying\n"));
4640
4641
/*
4642
* We use the scan flag to gather the directories that exist
4643
* in the cachefile. If we need to fallback to searching for
4644
* the pool config, we will only search devices in these
4645
* directories.
4646
*/
4647
idata.scan = B_TRUE;
4648
nvlist_free(pools);
4649
pools = zpool_search_import(&lpch, &idata);
4650
4651
err = import_pools(pools, props, mntopts, flags,
4652
argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL,
4653
&idata);
4654
}
4655
4656
error:
4657
nvlist_free(props);
4658
nvlist_free(pools);
4659
nvlist_free(policy);
4660
free(searchdirs);
4661
free(envdup);
4662
4663
return (err ? 1 : 0);
4664
}
4665
4666
/*
4667
* zpool sync [-f] [pool] ...
4668
*
4669
* -f (undocumented) force uberblock (and config including zpool cache file)
4670
* update.
4671
*
4672
* Sync the specified pool(s).
4673
* Without arguments "zpool sync" will sync all pools.
4674
* This command initiates TXG sync(s) and will return after the TXG(s) commit.
4675
*
4676
*/
4677
static int
4678
zpool_do_sync(int argc, char **argv)
4679
{
4680
int ret;
4681
boolean_t force = B_FALSE;
4682
4683
/* check options */
4684
while ((ret = getopt(argc, argv, "f")) != -1) {
4685
switch (ret) {
4686
case 'f':
4687
force = B_TRUE;
4688
break;
4689
case '?':
4690
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
4691
optopt);
4692
usage(B_FALSE);
4693
}
4694
}
4695
4696
argc -= optind;
4697
argv += optind;
4698
4699
/* if argc == 0 we will execute zpool_sync_one on all pools */
4700
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
4701
B_FALSE, zpool_sync_one, &force);
4702
4703
return (ret);
4704
}
4705
4706
typedef struct iostat_cbdata {
4707
uint64_t cb_flags;
4708
int cb_namewidth;
4709
int cb_iteration;
4710
boolean_t cb_verbose;
4711
boolean_t cb_literal;
4712
boolean_t cb_scripted;
4713
zpool_list_t *cb_list;
4714
vdev_cmd_data_list_t *vcdl;
4715
vdev_cbdata_t cb_vdevs;
4716
} iostat_cbdata_t;
4717
4718
/* iostat labels */
4719
typedef struct name_and_columns {
4720
const char *name; /* Column name */
4721
unsigned int columns; /* Center name to this number of columns */
4722
} name_and_columns_t;
4723
4724
#define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
4725
4726
static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
4727
{
4728
[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
4729
{NULL}},
4730
[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4731
{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
4732
{NULL}},
4733
[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
4734
{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
4735
{"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
4736
[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
4737
{"asyncq_wait", 2}, {NULL}},
4738
[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
4739
{"async_read", 2}, {"async_write", 2}, {"scrub", 2},
4740
{"trim", 2}, {"rebuild", 2}, {NULL}},
4741
};
4742
4743
/* Shorthand - if "columns" field not set, default to 1 column */
4744
static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
4745
{
4746
[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
4747
{"write"}, {NULL}},
4748
[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4749
{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
4750
{NULL}},
4751
[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
4752
{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
4753
{"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
4754
[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
4755
{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
4756
{NULL}},
4757
[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4758
{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
4759
{"ind"}, {"agg"}, {NULL}},
4760
};
4761
4762
static const char *histo_to_title[] = {
4763
[IOS_L_HISTO] = "latency",
4764
[IOS_RQ_HISTO] = "req_size",
4765
};
4766
4767
/*
4768
* Return the number of labels in a null-terminated name_and_columns_t
4769
* array.
4770
*
4771
*/
4772
static unsigned int
4773
label_array_len(const name_and_columns_t *labels)
4774
{
4775
int i = 0;
4776
4777
while (labels[i].name)
4778
i++;
4779
4780
return (i);
4781
}
4782
4783
/*
4784
* Return the number of strings in a null-terminated string array.
4785
* For example:
4786
*
4787
* const char foo[] = {"bar", "baz", NULL}
4788
*
4789
* returns 2
4790
*/
4791
static uint64_t
4792
str_array_len(const char *array[])
4793
{
4794
uint64_t i = 0;
4795
while (array[i])
4796
i++;
4797
4798
return (i);
4799
}
4800
4801
4802
/*
4803
* Return a default column width for default/latency/queue columns. This does
4804
* not include histograms, which have their columns autosized.
4805
*/
4806
static unsigned int
4807
default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
4808
{
4809
unsigned long column_width = 5; /* Normal niceprint */
4810
static unsigned long widths[] = {
4811
/*
4812
* Choose some sane default column sizes for printing the
4813
* raw numbers.
4814
*/
4815
[IOS_DEFAULT] = 15, /* 1PB capacity */
4816
[IOS_LATENCY] = 10, /* 1B ns = 10sec */
4817
[IOS_QUEUES] = 6, /* 1M queue entries */
4818
[IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4819
[IOS_RQ_HISTO] = 6, /* 1M queue entries */
4820
};
4821
4822
if (cb->cb_literal)
4823
column_width = widths[type];
4824
4825
return (column_width);
4826
}
4827
4828
/*
4829
* Print the column labels, i.e:
4830
*
4831
* capacity operations bandwidth
4832
* alloc free read write read write ...
4833
*
4834
* If force_column_width is set, use it for the column width. If not set, use
4835
* the default column width.
4836
*/
4837
static void
4838
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4839
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4840
{
4841
int i, idx, s;
4842
int text_start, rw_column_width, spaces_to_end;
4843
uint64_t flags = cb->cb_flags;
4844
uint64_t f;
4845
unsigned int column_width = force_column_width;
4846
4847
/* For each bit set in flags */
4848
for (f = flags; f; f &= ~(1ULL << idx)) {
4849
idx = lowbit64(f) - 1;
4850
if (!force_column_width)
4851
column_width = default_column_width(cb, idx);
4852
/* Print our top labels centered over "read write" label. */
4853
for (i = 0; i < label_array_len(labels[idx]); i++) {
4854
const char *name = labels[idx][i].name;
4855
/*
4856
* We treat labels[][].columns == 0 as shorthand
4857
* for one column. It makes writing out the label
4858
* tables more concise.
4859
*/
4860
unsigned int columns = MAX(1, labels[idx][i].columns);
4861
unsigned int slen = strlen(name);
4862
4863
rw_column_width = (column_width * columns) +
4864
(2 * (columns - 1));
4865
4866
text_start = (int)((rw_column_width) / columns -
4867
slen / columns);
4868
if (text_start < 0)
4869
text_start = 0;
4870
4871
printf(" "); /* Two spaces between columns */
4872
4873
/* Space from beginning of column to label */
4874
for (s = 0; s < text_start; s++)
4875
printf(" ");
4876
4877
printf("%s", name);
4878
4879
/* Print space after label to end of column */
4880
spaces_to_end = rw_column_width - text_start - slen;
4881
if (spaces_to_end < 0)
4882
spaces_to_end = 0;
4883
4884
for (s = 0; s < spaces_to_end; s++)
4885
printf(" ");
4886
}
4887
}
4888
}
4889
4890
4891
/*
4892
* print_cmd_columns - Print custom column titles from -c
4893
*
4894
* If the user specified the "zpool status|iostat -c" then print their custom
4895
* column titles in the header. For example, print_cmd_columns() would print
4896
* the " col1 col2" part of this:
4897
*
4898
* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4899
* ...
4900
* capacity operations bandwidth
4901
* pool alloc free read write read write col1 col2
4902
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
4903
* mypool 269K 1008M 0 0 107 946
4904
* mirror 269K 1008M 0 0 107 946
4905
* sdb - - 0 0 102 473 val1 val2
4906
* sdc - - 0 0 5 473 val1 val2
4907
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
4908
*/
4909
static void
4910
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4911
{
4912
int i, j;
4913
vdev_cmd_data_t *data = &vcdl->data[0];
4914
4915
if (vcdl->count == 0 || data == NULL)
4916
return;
4917
4918
/*
4919
* Each vdev cmd should have the same column names unless the user did
4920
* something weird with their cmd. Just take the column names from the
4921
* first vdev and assume it works for all of them.
4922
*/
4923
for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4924
printf(" ");
4925
if (use_dashes) {
4926
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4927
printf("-");
4928
} else {
4929
(void) printf_color(ANSI_BOLD, "%*s",
4930
vcdl->uniq_cols_width[i],
4931
vcdl->uniq_cols[i]);
4932
}
4933
}
4934
}
4935
4936
4937
/*
4938
* Utility function to print out a line of dashes like:
4939
*
4940
* -------------------------------- ----- ----- ----- ----- -----
4941
*
4942
* ...or a dashed named-row line like:
4943
*
4944
* logs - - - - -
4945
*
4946
* @cb: iostat data
4947
*
4948
* @force_column_width If non-zero, use the value as the column width.
4949
* Otherwise use the default column widths.
4950
*
4951
* @name: Print a dashed named-row line starting
4952
* with @name. Otherwise, print a regular
4953
* dashed line.
4954
*/
4955
static void
4956
print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4957
const char *name)
4958
{
4959
int i;
4960
unsigned int namewidth;
4961
uint64_t flags = cb->cb_flags;
4962
uint64_t f;
4963
int idx;
4964
const name_and_columns_t *labels;
4965
const char *title;
4966
4967
4968
if (cb->cb_flags & IOS_ANYHISTO_M) {
4969
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4970
} else if (cb->cb_vdevs.cb_names_count) {
4971
title = "vdev";
4972
} else {
4973
title = "pool";
4974
}
4975
4976
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4977
name ? strlen(name) : 0);
4978
4979
4980
if (name) {
4981
printf("%-*s", namewidth, name);
4982
} else {
4983
for (i = 0; i < namewidth; i++)
4984
(void) printf("-");
4985
}
4986
4987
/* For each bit in flags */
4988
for (f = flags; f; f &= ~(1ULL << idx)) {
4989
unsigned int column_width;
4990
idx = lowbit64(f) - 1;
4991
if (force_column_width)
4992
column_width = force_column_width;
4993
else
4994
column_width = default_column_width(cb, idx);
4995
4996
labels = iostat_bottom_labels[idx];
4997
for (i = 0; i < label_array_len(labels); i++) {
4998
if (name)
4999
printf(" %*s-", column_width - 1, " ");
5000
else
5001
printf(" %.*s", column_width,
5002
"--------------------");
5003
}
5004
}
5005
}
5006
5007
5008
static void
5009
print_iostat_separator_impl(iostat_cbdata_t *cb,
5010
unsigned int force_column_width)
5011
{
5012
print_iostat_dashes(cb, force_column_width, NULL);
5013
}
5014
5015
static void
5016
print_iostat_separator(iostat_cbdata_t *cb)
5017
{
5018
print_iostat_separator_impl(cb, 0);
5019
}
5020
5021
static void
5022
print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
5023
const char *histo_vdev_name)
5024
{
5025
unsigned int namewidth;
5026
const char *title;
5027
5028
color_start(ANSI_BOLD);
5029
5030
if (cb->cb_flags & IOS_ANYHISTO_M) {
5031
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
5032
} else if (cb->cb_vdevs.cb_names_count) {
5033
title = "vdev";
5034
} else {
5035
title = "pool";
5036
}
5037
5038
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
5039
histo_vdev_name ? strlen(histo_vdev_name) : 0);
5040
5041
if (histo_vdev_name)
5042
printf("%-*s", namewidth, histo_vdev_name);
5043
else
5044
printf("%*s", namewidth, "");
5045
5046
5047
print_iostat_labels(cb, force_column_width, iostat_top_labels);
5048
printf("\n");
5049
5050
printf("%-*s", namewidth, title);
5051
5052
print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
5053
if (cb->vcdl != NULL)
5054
print_cmd_columns(cb->vcdl, 0);
5055
5056
printf("\n");
5057
5058
print_iostat_separator_impl(cb, force_column_width);
5059
5060
if (cb->vcdl != NULL)
5061
print_cmd_columns(cb->vcdl, 1);
5062
5063
color_end();
5064
5065
printf("\n");
5066
}
5067
5068
static void
5069
print_iostat_header(iostat_cbdata_t *cb)
5070
{
5071
print_iostat_header_impl(cb, 0, NULL);
5072
}
5073
5074
/*
5075
* Prints a size string (i.e. 120M) with the suffix ("M") colored
5076
* by order of magnitude. Uses column_size to add padding.
5077
*/
5078
static void
5079
print_stat_color(const char *statbuf, unsigned int column_size)
5080
{
5081
(void) fputs(" ", stdout);
5082
size_t len = strlen(statbuf);
5083
while (len < column_size) {
5084
(void) fputc(' ', stdout);
5085
column_size--;
5086
}
5087
if (*statbuf == '0') {
5088
color_start(ANSI_GRAY);
5089
(void) fputc('0', stdout);
5090
} else {
5091
for (; *statbuf; statbuf++) {
5092
if (*statbuf == 'K') color_start(ANSI_GREEN);
5093
else if (*statbuf == 'M') color_start(ANSI_YELLOW);
5094
else if (*statbuf == 'G') color_start(ANSI_RED);
5095
else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
5096
else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
5097
else if (*statbuf == 'E') color_start(ANSI_CYAN);
5098
(void) fputc(*statbuf, stdout);
5099
if (--column_size <= 0)
5100
break;
5101
}
5102
}
5103
color_end();
5104
}
5105
5106
/*
5107
* Display a single statistic.
5108
*/
5109
static void
5110
print_one_stat(uint64_t value, enum zfs_nicenum_format format,
5111
unsigned int column_size, boolean_t scripted)
5112
{
5113
char buf[64];
5114
5115
zfs_nicenum_format(value, buf, sizeof (buf), format);
5116
5117
if (scripted)
5118
printf("\t%s", buf);
5119
else
5120
print_stat_color(buf, column_size);
5121
}
5122
5123
/*
5124
* Calculate the default vdev stats
5125
*
5126
* Subtract oldvs from newvs, apply a scaling factor, and save the resulting
5127
* stats into calcvs.
5128
*/
5129
static void
5130
calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
5131
vdev_stat_t *calcvs)
5132
{
5133
int i;
5134
5135
memcpy(calcvs, newvs, sizeof (*calcvs));
5136
for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
5137
calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
5138
5139
for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
5140
calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
5141
}
5142
5143
/*
5144
* Internal representation of the extended iostats data.
5145
*
5146
* The extended iostat stats are exported in nvlists as either uint64_t arrays
5147
* or single uint64_t's. We make both look like arrays to make them easier
5148
* to process. In order to make single uint64_t's look like arrays, we set
5149
* __data to the stat data, and then set *data = &__data with count = 1. Then,
5150
* we can just use *data and count.
5151
*/
5152
struct stat_array {
5153
uint64_t *data;
5154
uint_t count; /* Number of entries in data[] */
5155
uint64_t __data; /* Only used when data is a single uint64_t */
5156
};
5157
5158
static uint64_t
5159
stat_histo_max(struct stat_array *nva, unsigned int len)
5160
{
5161
uint64_t max = 0;
5162
int i;
5163
for (i = 0; i < len; i++)
5164
max = MAX(max, array64_max(nva[i].data, nva[i].count));
5165
5166
return (max);
5167
}
5168
5169
/*
5170
* Helper function to lookup a uint64_t array or uint64_t value and store its
5171
* data as a stat_array. If the nvpair is a single uint64_t value, then we make
5172
* it look like a one element array to make it easier to process.
5173
*/
5174
static int
5175
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
5176
struct stat_array *nva)
5177
{
5178
nvpair_t *tmp;
5179
int ret;
5180
5181
verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
5182
switch (nvpair_type(tmp)) {
5183
case DATA_TYPE_UINT64_ARRAY:
5184
ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
5185
break;
5186
case DATA_TYPE_UINT64:
5187
ret = nvpair_value_uint64(tmp, &nva->__data);
5188
nva->data = &nva->__data;
5189
nva->count = 1;
5190
break;
5191
default:
5192
/* Not a uint64_t */
5193
ret = EINVAL;
5194
break;
5195
}
5196
5197
return (ret);
5198
}
5199
5200
/*
5201
* Given a list of nvlist names, look up the extended stats in newnv and oldnv,
5202
* subtract them, and return the results in a newly allocated stat_array.
5203
* You must free the returned array after you are done with it with
5204
* free_calc_stats().
5205
*
5206
* Additionally, you can set "oldnv" to NULL if you simply want the newnv
5207
* values.
5208
*/
5209
static struct stat_array *
5210
calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
5211
nvlist_t *newnv)
5212
{
5213
nvlist_t *oldnvx = NULL, *newnvx;
5214
struct stat_array *oldnva, *newnva, *calcnva;
5215
int i, j;
5216
unsigned int alloc_size = (sizeof (struct stat_array)) * len;
5217
5218
/* Extract our extended stats nvlist from the main list */
5219
verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5220
&newnvx) == 0);
5221
if (oldnv) {
5222
verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
5223
&oldnvx) == 0);
5224
}
5225
5226
newnva = safe_malloc(alloc_size);
5227
oldnva = safe_malloc(alloc_size);
5228
calcnva = safe_malloc(alloc_size);
5229
5230
for (j = 0; j < len; j++) {
5231
verify(nvpair64_to_stat_array(newnvx, names[j],
5232
&newnva[j]) == 0);
5233
calcnva[j].count = newnva[j].count;
5234
alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
5235
calcnva[j].data = safe_malloc(alloc_size);
5236
memcpy(calcnva[j].data, newnva[j].data, alloc_size);
5237
5238
if (oldnvx) {
5239
verify(nvpair64_to_stat_array(oldnvx, names[j],
5240
&oldnva[j]) == 0);
5241
for (i = 0; i < oldnva[j].count; i++)
5242
calcnva[j].data[i] -= oldnva[j].data[i];
5243
}
5244
}
5245
free(newnva);
5246
free(oldnva);
5247
return (calcnva);
5248
}
5249
5250
static void
5251
free_calc_stats(struct stat_array *nva, unsigned int len)
5252
{
5253
int i;
5254
for (i = 0; i < len; i++)
5255
free(nva[i].data);
5256
5257
free(nva);
5258
}
5259
5260
static void
5261
print_iostat_histo(struct stat_array *nva, unsigned int len,
5262
iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
5263
double scale)
5264
{
5265
int i, j;
5266
char buf[6];
5267
uint64_t val;
5268
enum zfs_nicenum_format format;
5269
unsigned int buckets;
5270
unsigned int start_bucket;
5271
5272
if (cb->cb_literal)
5273
format = ZFS_NICENUM_RAW;
5274
else
5275
format = ZFS_NICENUM_1024;
5276
5277
/* All these histos are the same size, so just use nva[0].count */
5278
buckets = nva[0].count;
5279
5280
if (cb->cb_flags & IOS_RQ_HISTO_M) {
5281
/* Start at 512 - req size should never be lower than this */
5282
start_bucket = 9;
5283
} else {
5284
start_bucket = 0;
5285
}
5286
5287
for (j = start_bucket; j < buckets; j++) {
5288
/* Print histogram bucket label */
5289
if (cb->cb_flags & IOS_L_HISTO_M) {
5290
/* Ending range of this bucket */
5291
val = (1UL << (j + 1)) - 1;
5292
zfs_nicetime(val, buf, sizeof (buf));
5293
} else {
5294
/* Request size (starting range of bucket) */
5295
val = (1UL << j);
5296
zfs_nicenum(val, buf, sizeof (buf));
5297
}
5298
5299
if (cb->cb_scripted)
5300
printf("%llu", (u_longlong_t)val);
5301
else
5302
printf("%-*s", namewidth, buf);
5303
5304
/* Print the values on the line */
5305
for (i = 0; i < len; i++) {
5306
print_one_stat(nva[i].data[j] * scale, format,
5307
column_width, cb->cb_scripted);
5308
}
5309
printf("\n");
5310
}
5311
}
5312
5313
static void
5314
print_solid_separator(unsigned int length)
5315
{
5316
while (length--)
5317
printf("-");
5318
printf("\n");
5319
}
5320
5321
static void
5322
print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
5323
nvlist_t *newnv, double scale, const char *name)
5324
{
5325
unsigned int column_width;
5326
unsigned int namewidth;
5327
unsigned int entire_width;
5328
enum iostat_type type;
5329
struct stat_array *nva;
5330
const char **names;
5331
unsigned int names_len;
5332
5333
/* What type of histo are we? */
5334
type = IOS_HISTO_IDX(cb->cb_flags);
5335
5336
/* Get NULL-terminated array of nvlist names for our histo */
5337
names = vsx_type_to_nvlist[type];
5338
names_len = str_array_len(names); /* num of names */
5339
5340
nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
5341
5342
if (cb->cb_literal) {
5343
column_width = MAX(5,
5344
(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
5345
} else {
5346
column_width = 5;
5347
}
5348
5349
namewidth = MAX(cb->cb_namewidth,
5350
strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
5351
5352
/*
5353
* Calculate the entire line width of what we're printing. The
5354
* +2 is for the two spaces between columns:
5355
*/
5356
/* read write */
5357
/* ----- ----- */
5358
/* |___| <---------- column_width */
5359
/* */
5360
/* |__________| <--- entire_width */
5361
/* */
5362
entire_width = namewidth + (column_width + 2) *
5363
label_array_len(iostat_bottom_labels[type]);
5364
5365
if (cb->cb_scripted)
5366
printf("%s\n", name);
5367
else
5368
print_iostat_header_impl(cb, column_width, name);
5369
5370
print_iostat_histo(nva, names_len, cb, column_width,
5371
namewidth, scale);
5372
5373
free_calc_stats(nva, names_len);
5374
if (!cb->cb_scripted)
5375
print_solid_separator(entire_width);
5376
}
5377
5378
/*
5379
* Calculate the average latency of a power-of-two latency histogram
5380
*/
5381
static uint64_t
5382
single_histo_average(uint64_t *histo, unsigned int buckets)
5383
{
5384
int i;
5385
uint64_t count = 0, total = 0;
5386
5387
for (i = 0; i < buckets; i++) {
5388
/*
5389
* Our buckets are power-of-two latency ranges. Use the
5390
* midpoint latency of each bucket to calculate the average.
5391
* For example:
5392
*
5393
* Bucket Midpoint
5394
* 8ns-15ns: 12ns
5395
* 16ns-31ns: 24ns
5396
* ...
5397
*/
5398
if (histo[i] != 0) {
5399
total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
5400
count += histo[i];
5401
}
5402
}
5403
5404
/* Prevent divide by zero */
5405
return (count == 0 ? 0 : total / count);
5406
}
5407
5408
static void
5409
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
5410
{
5411
const char *names[] = {
5412
ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
5413
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
5414
ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
5415
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
5416
ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
5417
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
5418
ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
5419
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
5420
ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
5421
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
5422
ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
5423
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
5424
ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
5425
ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
5426
};
5427
5428
struct stat_array *nva;
5429
5430
unsigned int column_width = default_column_width(cb, IOS_QUEUES);
5431
enum zfs_nicenum_format format;
5432
5433
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
5434
5435
if (cb->cb_literal)
5436
format = ZFS_NICENUM_RAW;
5437
else
5438
format = ZFS_NICENUM_1024;
5439
5440
for (int i = 0; i < ARRAY_SIZE(names); i++) {
5441
uint64_t val = nva[i].data[0];
5442
print_one_stat(val, format, column_width, cb->cb_scripted);
5443
}
5444
5445
free_calc_stats(nva, ARRAY_SIZE(names));
5446
}
5447
5448
static void
5449
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
5450
nvlist_t *newnv)
5451
{
5452
int i;
5453
uint64_t val;
5454
const char *names[] = {
5455
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
5456
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
5457
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
5458
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
5459
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
5460
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
5461
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
5462
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
5463
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
5464
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
5465
ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
5466
};
5467
struct stat_array *nva;
5468
5469
unsigned int column_width = default_column_width(cb, IOS_LATENCY);
5470
enum zfs_nicenum_format format;
5471
5472
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
5473
5474
if (cb->cb_literal)
5475
format = ZFS_NICENUM_RAWTIME;
5476
else
5477
format = ZFS_NICENUM_TIME;
5478
5479
/* Print our avg latencies on the line */
5480
for (i = 0; i < ARRAY_SIZE(names); i++) {
5481
/* Compute average latency for a latency histo */
5482
val = single_histo_average(nva[i].data, nva[i].count);
5483
print_one_stat(val, format, column_width, cb->cb_scripted);
5484
}
5485
free_calc_stats(nva, ARRAY_SIZE(names));
5486
}
5487
5488
/*
5489
* Print default statistics (capacity/operations/bandwidth)
5490
*/
5491
static void
5492
print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
5493
{
5494
unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
5495
enum zfs_nicenum_format format;
5496
char na; /* char to print for "not applicable" values */
5497
5498
if (cb->cb_literal) {
5499
format = ZFS_NICENUM_RAW;
5500
na = '0';
5501
} else {
5502
format = ZFS_NICENUM_1024;
5503
na = '-';
5504
}
5505
5506
/* only toplevel vdevs have capacity stats */
5507
if (vs->vs_space == 0) {
5508
if (cb->cb_scripted)
5509
printf("\t%c\t%c", na, na);
5510
else
5511
printf(" %*c %*c", column_width, na, column_width,
5512
na);
5513
} else {
5514
print_one_stat(vs->vs_alloc, format, column_width,
5515
cb->cb_scripted);
5516
print_one_stat(vs->vs_space - vs->vs_alloc, format,
5517
column_width, cb->cb_scripted);
5518
}
5519
5520
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
5521
format, column_width, cb->cb_scripted);
5522
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
5523
format, column_width, cb->cb_scripted);
5524
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
5525
format, column_width, cb->cb_scripted);
5526
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
5527
format, column_width, cb->cb_scripted);
5528
}
5529
5530
static const char *const class_name[] = {
5531
VDEV_ALLOC_BIAS_DEDUP,
5532
VDEV_ALLOC_BIAS_SPECIAL,
5533
VDEV_ALLOC_CLASS_LOGS
5534
};
5535
5536
/*
5537
* Print out all the statistics for the given vdev. This can either be the
5538
* toplevel configuration, or called recursively. If 'name' is NULL, then this
5539
* is a verbose output, and we don't want to display the toplevel pool stats.
5540
*
5541
* Returns the number of stat lines printed.
5542
*/
5543
static unsigned int
5544
print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
5545
nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
5546
{
5547
nvlist_t **oldchild, **newchild;
5548
uint_t c, children, oldchildren;
5549
vdev_stat_t *oldvs, *newvs, *calcvs;
5550
vdev_stat_t zerovs = { 0 };
5551
char *vname;
5552
int i;
5553
int ret = 0;
5554
uint64_t tdelta;
5555
double scale;
5556
5557
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
5558
return (ret);
5559
5560
calcvs = safe_malloc(sizeof (*calcvs));
5561
5562
if (oldnv != NULL) {
5563
verify(nvlist_lookup_uint64_array(oldnv,
5564
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
5565
} else {
5566
oldvs = &zerovs;
5567
}
5568
5569
/* Do we only want to see a specific vdev? */
5570
for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
5571
/* Yes we do. Is this the vdev? */
5572
if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
5573
/*
5574
* This is our vdev. Since it is the only vdev we
5575
* will be displaying, make depth = 0 so that it
5576
* doesn't get indented.
5577
*/
5578
depth = 0;
5579
break;
5580
}
5581
}
5582
5583
if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
5584
/* Couldn't match the name */
5585
goto children;
5586
}
5587
5588
5589
verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
5590
(uint64_t **)&newvs, &c) == 0);
5591
5592
/*
5593
* Print the vdev name unless it's is a histogram. Histograms
5594
* display the vdev name in the header itself.
5595
*/
5596
if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
5597
if (cb->cb_scripted) {
5598
printf("%s", name);
5599
} else {
5600
if (strlen(name) + depth > cb->cb_namewidth)
5601
(void) printf("%*s%s", depth, "", name);
5602
else
5603
(void) printf("%*s%s%*s", depth, "", name,
5604
(int)(cb->cb_namewidth - strlen(name) -
5605
depth), "");
5606
}
5607
}
5608
5609
/* Calculate our scaling factor */
5610
tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
5611
if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
5612
/*
5613
* If we specify printing histograms with no time interval, then
5614
* print the histogram numbers over the entire lifetime of the
5615
* vdev.
5616
*/
5617
scale = 1;
5618
} else {
5619
if (tdelta == 0)
5620
scale = 1.0;
5621
else
5622
scale = (double)NANOSEC / tdelta;
5623
}
5624
5625
if (cb->cb_flags & IOS_DEFAULT_M) {
5626
calc_default_iostats(oldvs, newvs, calcvs);
5627
print_iostat_default(calcvs, cb, scale);
5628
}
5629
if (cb->cb_flags & IOS_LATENCY_M)
5630
print_iostat_latency(cb, oldnv, newnv);
5631
if (cb->cb_flags & IOS_QUEUES_M)
5632
print_iostat_queues(cb, newnv);
5633
if (cb->cb_flags & IOS_ANYHISTO_M) {
5634
printf("\n");
5635
print_iostat_histos(cb, oldnv, newnv, scale, name);
5636
}
5637
5638
if (cb->vcdl != NULL) {
5639
const char *path;
5640
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
5641
&path) == 0) {
5642
printf(" ");
5643
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
5644
}
5645
}
5646
5647
if (!(cb->cb_flags & IOS_ANYHISTO_M))
5648
printf("\n");
5649
5650
ret++;
5651
5652
children:
5653
5654
free(calcvs);
5655
5656
if (!cb->cb_verbose)
5657
return (ret);
5658
5659
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
5660
&newchild, &children) != 0)
5661
return (ret);
5662
5663
if (oldnv) {
5664
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
5665
&oldchild, &oldchildren) != 0)
5666
return (ret);
5667
5668
children = MIN(oldchildren, children);
5669
}
5670
5671
/*
5672
* print normal top-level devices
5673
*/
5674
for (c = 0; c < children; c++) {
5675
uint64_t ishole = B_FALSE, islog = B_FALSE;
5676
5677
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
5678
&ishole);
5679
5680
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
5681
&islog);
5682
5683
if (ishole || islog)
5684
continue;
5685
5686
if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
5687
continue;
5688
5689
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5690
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5691
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
5692
newchild[c], cb, depth + 2);
5693
free(vname);
5694
}
5695
5696
/*
5697
* print all other top-level devices
5698
*/
5699
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
5700
boolean_t printed = B_FALSE;
5701
5702
for (c = 0; c < children; c++) {
5703
uint64_t islog = B_FALSE;
5704
const char *bias = NULL;
5705
const char *type = NULL;
5706
5707
(void) nvlist_lookup_uint64(newchild[c],
5708
ZPOOL_CONFIG_IS_LOG, &islog);
5709
if (islog) {
5710
bias = VDEV_ALLOC_CLASS_LOGS;
5711
} else {
5712
(void) nvlist_lookup_string(newchild[c],
5713
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
5714
(void) nvlist_lookup_string(newchild[c],
5715
ZPOOL_CONFIG_TYPE, &type);
5716
}
5717
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
5718
continue;
5719
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
5720
continue;
5721
5722
if (!printed) {
5723
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
5724
!cb->cb_scripted &&
5725
!cb->cb_vdevs.cb_names) {
5726
print_iostat_dashes(cb, 0,
5727
class_name[n]);
5728
}
5729
printf("\n");
5730
printed = B_TRUE;
5731
}
5732
5733
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5734
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
5735
ret += print_vdev_stats(zhp, vname, oldnv ?
5736
oldchild[c] : NULL, newchild[c], cb, depth + 2);
5737
free(vname);
5738
}
5739
}
5740
5741
/*
5742
* Include level 2 ARC devices in iostat output
5743
*/
5744
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
5745
&newchild, &children) != 0)
5746
return (ret);
5747
5748
if (oldnv) {
5749
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
5750
&oldchild, &oldchildren) != 0)
5751
return (ret);
5752
5753
children = MIN(oldchildren, children);
5754
}
5755
5756
if (children > 0) {
5757
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
5758
!cb->cb_vdevs.cb_names) {
5759
print_iostat_dashes(cb, 0, "cache");
5760
}
5761
printf("\n");
5762
5763
for (c = 0; c < children; c++) {
5764
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
5765
cb->cb_vdevs.cb_name_flags);
5766
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
5767
: NULL, newchild[c], cb, depth + 2);
5768
free(vname);
5769
}
5770
}
5771
5772
return (ret);
5773
}
5774
5775
/*
5776
* Callback to print out the iostats for the given pool.
5777
*/
5778
static int
5779
print_iostat(zpool_handle_t *zhp, void *data)
5780
{
5781
iostat_cbdata_t *cb = data;
5782
nvlist_t *oldconfig, *newconfig;
5783
nvlist_t *oldnvroot, *newnvroot;
5784
int ret;
5785
5786
newconfig = zpool_get_config(zhp, &oldconfig);
5787
5788
if (cb->cb_iteration == 1)
5789
oldconfig = NULL;
5790
5791
verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
5792
&newnvroot) == 0);
5793
5794
if (oldconfig == NULL)
5795
oldnvroot = NULL;
5796
else
5797
verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
5798
&oldnvroot) == 0);
5799
5800
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
5801
cb, 0);
5802
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
5803
!cb->cb_scripted && cb->cb_verbose &&
5804
!cb->cb_vdevs.cb_names_count) {
5805
print_iostat_separator(cb);
5806
if (cb->vcdl != NULL) {
5807
print_cmd_columns(cb->vcdl, 1);
5808
}
5809
printf("\n");
5810
}
5811
5812
return (ret);
5813
}
5814
5815
static int
5816
get_columns(void)
5817
{
5818
struct winsize ws;
5819
int columns = 80;
5820
int error;
5821
5822
if (isatty(STDOUT_FILENO)) {
5823
error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
5824
if (error == 0)
5825
columns = ws.ws_col;
5826
} else {
5827
columns = 999;
5828
}
5829
5830
return (columns);
5831
}
5832
5833
/*
5834
* Return the required length of the pool/vdev name column. The minimum
5835
* allowed width and output formatting flags must be provided.
5836
*/
5837
static int
5838
get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5839
{
5840
nvlist_t *config, *nvroot;
5841
int width = min_width;
5842
5843
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5844
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5845
&nvroot) == 0);
5846
size_t poolname_len = strlen(zpool_get_name(zhp));
5847
if (verbose == B_FALSE) {
5848
width = MAX(poolname_len, min_width);
5849
} else {
5850
width = MAX(poolname_len,
5851
max_width(zhp, nvroot, 0, min_width, flags));
5852
}
5853
}
5854
5855
return (width);
5856
}
5857
5858
/*
5859
* Parse the input string, get the 'interval' and 'count' value if there is one.
5860
*/
5861
static void
5862
get_interval_count(int *argcp, char **argv, float *iv,
5863
unsigned long *cnt)
5864
{
5865
float interval = 0;
5866
unsigned long count = 0;
5867
int argc = *argcp;
5868
5869
/*
5870
* Determine if the last argument is an integer or a pool name
5871
*/
5872
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5873
char *end;
5874
5875
errno = 0;
5876
interval = strtof(argv[argc - 1], &end);
5877
5878
if (*end == '\0' && errno == 0) {
5879
if (interval == 0) {
5880
(void) fprintf(stderr, gettext(
5881
"interval cannot be zero\n"));
5882
usage(B_FALSE);
5883
}
5884
/*
5885
* Ignore the last parameter
5886
*/
5887
argc--;
5888
} else {
5889
/*
5890
* If this is not a valid number, just plow on. The
5891
* user will get a more informative error message later
5892
* on.
5893
*/
5894
interval = 0;
5895
}
5896
}
5897
5898
/*
5899
* If the last argument is also an integer, then we have both a count
5900
* and an interval.
5901
*/
5902
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5903
char *end;
5904
5905
errno = 0;
5906
count = interval;
5907
interval = strtof(argv[argc - 1], &end);
5908
5909
if (*end == '\0' && errno == 0) {
5910
if (interval == 0) {
5911
(void) fprintf(stderr, gettext(
5912
"interval cannot be zero\n"));
5913
usage(B_FALSE);
5914
}
5915
5916
/*
5917
* Ignore the last parameter
5918
*/
5919
argc--;
5920
} else {
5921
interval = 0;
5922
}
5923
}
5924
5925
*iv = interval;
5926
*cnt = count;
5927
*argcp = argc;
5928
}
5929
5930
static void
5931
get_timestamp_arg(char c)
5932
{
5933
if (c == 'u')
5934
timestamp_fmt = UDATE;
5935
else if (c == 'd')
5936
timestamp_fmt = DDATE;
5937
else
5938
usage(B_FALSE);
5939
}
5940
5941
/*
5942
* Return stat flags that are supported by all pools by both the module and
5943
* zpool iostat. "*data" should be initialized to all 0xFFs before running.
5944
* It will get ANDed down until only the flags that are supported on all pools
5945
* remain.
5946
*/
5947
static int
5948
get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5949
{
5950
uint64_t *mask = data;
5951
nvlist_t *config, *nvroot, *nvx;
5952
uint64_t flags = 0;
5953
int i, j;
5954
5955
config = zpool_get_config(zhp, NULL);
5956
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5957
&nvroot) == 0);
5958
5959
/* Default stats are always supported, but for completeness.. */
5960
if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5961
flags |= IOS_DEFAULT_M;
5962
5963
/* Get our extended stats nvlist from the main list */
5964
if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5965
&nvx) != 0) {
5966
/*
5967
* No extended stats; they're probably running an older
5968
* module. No big deal, we support that too.
5969
*/
5970
goto end;
5971
}
5972
5973
/* For each extended stat, make sure all its nvpairs are supported */
5974
for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5975
if (!vsx_type_to_nvlist[j][0])
5976
continue;
5977
5978
/* Start off by assuming the flag is supported, then check */
5979
flags |= (1ULL << j);
5980
for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5981
if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5982
/* flag isn't supported */
5983
flags = flags & ~(1ULL << j);
5984
break;
5985
}
5986
}
5987
}
5988
end:
5989
*mask = *mask & flags;
5990
return (0);
5991
}
5992
5993
/*
5994
* Return a bitmask of stats that are supported on all pools by both the module
5995
* and zpool iostat.
5996
*/
5997
static uint64_t
5998
get_stat_flags(zpool_list_t *list)
5999
{
6000
uint64_t mask = -1;
6001
6002
/*
6003
* get_stat_flags_cb() will lop off bits from "mask" until only the
6004
* flags that are supported on all pools remain.
6005
*/
6006
(void) pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
6007
return (mask);
6008
}
6009
6010
/*
6011
* Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
6012
*/
6013
static int
6014
is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
6015
{
6016
uint64_t guid;
6017
vdev_cbdata_t *cb = cb_data;
6018
zpool_handle_t *zhp = zhp_data;
6019
6020
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
6021
return (0);
6022
6023
return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
6024
}
6025
6026
/*
6027
* Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
6028
*/
6029
static int
6030
is_vdev(zpool_handle_t *zhp, void *cb_data)
6031
{
6032
return (for_each_vdev(zhp, is_vdev_cb, cb_data));
6033
}
6034
6035
/*
6036
* Check if vdevs are in a pool
6037
*
6038
* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
6039
* return 0. If pool_name is NULL, then search all pools.
6040
*/
6041
static int
6042
are_vdevs_in_pool(int argc, char **argv, char *pool_name,
6043
vdev_cbdata_t *cb)
6044
{
6045
char **tmp_name;
6046
int ret = 0;
6047
int i;
6048
int pool_count = 0;
6049
6050
if ((argc == 0) || !*argv)
6051
return (0);
6052
6053
if (pool_name)
6054
pool_count = 1;
6055
6056
/* Temporarily hijack cb_names for a second... */
6057
tmp_name = cb->cb_names;
6058
6059
/* Go though our list of prospective vdev names */
6060
for (i = 0; i < argc; i++) {
6061
cb->cb_names = argv + i;
6062
6063
/* Is this name a vdev in our pools? */
6064
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
6065
ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
6066
if (!ret) {
6067
/* No match */
6068
break;
6069
}
6070
}
6071
6072
cb->cb_names = tmp_name;
6073
6074
return (ret);
6075
}
6076
6077
static int
6078
is_pool_cb(zpool_handle_t *zhp, void *data)
6079
{
6080
char *name = data;
6081
if (strcmp(name, zpool_get_name(zhp)) == 0)
6082
return (1);
6083
6084
return (0);
6085
}
6086
6087
/*
6088
* Do we have a pool named *name? If so, return 1, otherwise 0.
6089
*/
6090
static int
6091
is_pool(char *name)
6092
{
6093
return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
6094
is_pool_cb, name));
6095
}
6096
6097
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
6098
static int
6099
are_all_pools(int argc, char **argv)
6100
{
6101
if ((argc == 0) || !*argv)
6102
return (0);
6103
6104
while (--argc >= 0)
6105
if (!is_pool(argv[argc]))
6106
return (0);
6107
6108
return (1);
6109
}
6110
6111
/*
6112
* Helper function to print out vdev/pool names we can't resolve. Used for an
6113
* error message.
6114
*/
6115
static void
6116
error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
6117
vdev_cbdata_t *cb)
6118
{
6119
int i;
6120
char *name;
6121
char *str;
6122
for (i = 0; i < argc; i++) {
6123
name = argv[i];
6124
6125
if (is_pool(name))
6126
str = gettext("pool");
6127
else if (are_vdevs_in_pool(1, &name, pool_name, cb))
6128
str = gettext("vdev in this pool");
6129
else if (are_vdevs_in_pool(1, &name, NULL, cb))
6130
str = gettext("vdev in another pool");
6131
else
6132
str = gettext("unknown");
6133
6134
fprintf(stderr, "\t%s (%s)\n", name, str);
6135
}
6136
}
6137
6138
/*
6139
* Same as get_interval_count(), but with additional checks to not misinterpret
6140
* guids as interval/count values. Assumes VDEV_NAME_GUID is set in
6141
* cb.cb_vdevs.cb_name_flags.
6142
*/
6143
static void
6144
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
6145
unsigned long *count, iostat_cbdata_t *cb)
6146
{
6147
int argc_for_interval = 0;
6148
6149
/* Is the last arg an interval value? Or a guid? */
6150
if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
6151
&cb->cb_vdevs)) {
6152
/*
6153
* The last arg is not a guid, so it's probably an
6154
* interval value.
6155
*/
6156
argc_for_interval++;
6157
6158
if (*argc >= 2 &&
6159
!are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
6160
&cb->cb_vdevs)) {
6161
/*
6162
* The 2nd to last arg is not a guid, so it's probably
6163
* an interval value.
6164
*/
6165
argc_for_interval++;
6166
}
6167
}
6168
6169
/* Point to our list of possible intervals */
6170
char **tmpargv = &argv[*argc - argc_for_interval];
6171
6172
*argc = *argc - argc_for_interval;
6173
get_interval_count(&argc_for_interval, tmpargv,
6174
interval, count);
6175
}
6176
6177
/*
6178
* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
6179
* if we were unable to determine its size.
6180
*/
6181
static int
6182
terminal_height(void)
6183
{
6184
struct winsize win;
6185
6186
if (isatty(STDOUT_FILENO) == 0)
6187
return (-1);
6188
6189
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
6190
return (win.ws_row);
6191
6192
return (-1);
6193
}
6194
6195
/*
6196
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
6197
* print the result.
6198
*
6199
* name: Short name of the script ('iostat').
6200
* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
6201
*/
6202
static void
6203
print_zpool_script_help(char *name, char *path)
6204
{
6205
char *argv[] = {path, (char *)"-h", NULL};
6206
char **lines = NULL;
6207
int lines_cnt = 0;
6208
int rc;
6209
6210
rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
6211
&lines_cnt);
6212
if (rc != 0 || lines == NULL || lines_cnt <= 0) {
6213
if (lines != NULL)
6214
libzfs_free_str_array(lines, lines_cnt);
6215
return;
6216
}
6217
6218
for (int i = 0; i < lines_cnt; i++)
6219
if (!is_blank_str(lines[i]))
6220
printf(" %-14s %s\n", name, lines[i]);
6221
6222
libzfs_free_str_array(lines, lines_cnt);
6223
}
6224
6225
/*
6226
* Go though the zpool status/iostat -c scripts in the user's path, run their
6227
* help option (-h), and print out the results.
6228
*/
6229
static void
6230
print_zpool_dir_scripts(char *dirpath)
6231
{
6232
DIR *dir;
6233
struct dirent *ent;
6234
char fullpath[MAXPATHLEN];
6235
struct stat dir_stat;
6236
6237
if ((dir = opendir(dirpath)) != NULL) {
6238
/* print all the files and directories within directory */
6239
while ((ent = readdir(dir)) != NULL) {
6240
if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
6241
dirpath, ent->d_name) >= sizeof (fullpath)) {
6242
(void) fprintf(stderr,
6243
gettext("internal error: "
6244
"ZPOOL_SCRIPTS_PATH too large.\n"));
6245
exit(1);
6246
}
6247
6248
/* Print the scripts */
6249
if (stat(fullpath, &dir_stat) == 0)
6250
if (dir_stat.st_mode & S_IXUSR &&
6251
S_ISREG(dir_stat.st_mode))
6252
print_zpool_script_help(ent->d_name,
6253
fullpath);
6254
}
6255
(void) closedir(dir);
6256
}
6257
}
6258
6259
/*
6260
* Print out help text for all zpool status/iostat -c scripts.
6261
*/
6262
static void
6263
print_zpool_script_list(const char *subcommand)
6264
{
6265
char *dir, *sp, *tmp;
6266
6267
printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
6268
6269
sp = zpool_get_cmd_search_path();
6270
if (sp == NULL)
6271
return;
6272
6273
for (dir = strtok_r(sp, ":", &tmp);
6274
dir != NULL;
6275
dir = strtok_r(NULL, ":", &tmp))
6276
print_zpool_dir_scripts(dir);
6277
6278
free(sp);
6279
}
6280
6281
/*
6282
* Set the minimum pool/vdev name column width. The width must be at least 10,
6283
* but may be as large as the column width - 42 so it still fits on one line.
6284
* NOTE: 42 is the width of the default capacity/operations/bandwidth output
6285
*/
6286
static int
6287
get_namewidth_iostat(zpool_handle_t *zhp, void *data)
6288
{
6289
iostat_cbdata_t *cb = data;
6290
int width, available_width;
6291
6292
/*
6293
* get_namewidth() returns the maximum width of any name in that column
6294
* for any pool/vdev/device line that will be output.
6295
*/
6296
width = get_namewidth(zhp, cb->cb_namewidth,
6297
cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6298
6299
/*
6300
* The width we are calculating is the width of the header and also the
6301
* padding width for names that are less than maximum width. The stats
6302
* take up 42 characters, so the width available for names is:
6303
*/
6304
available_width = get_columns() - 42;
6305
6306
/*
6307
* If the maximum width fits on a screen, then great! Make everything
6308
* line up by justifying all lines to the same width. If that max
6309
* width is larger than what's available, the name plus stats won't fit
6310
* on one line, and justifying to that width would cause every line to
6311
* wrap on the screen. We only want lines with long names to wrap.
6312
* Limit the padding to what won't wrap.
6313
*/
6314
if (width > available_width)
6315
width = available_width;
6316
6317
/*
6318
* And regardless of whatever the screen width is (get_columns can
6319
* return 0 if the width is not known or less than 42 for a narrow
6320
* terminal) have the width be a minimum of 10.
6321
*/
6322
if (width < 10)
6323
width = 10;
6324
6325
/* Save the calculated width */
6326
cb->cb_namewidth = width;
6327
6328
return (0);
6329
}
6330
6331
/*
6332
* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
6333
* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
6334
* [interval [count]]
6335
*
6336
* -c CMD For each vdev, run command CMD
6337
* -g Display guid for individual vdev name.
6338
* -L Follow links when resolving vdev path name.
6339
* -P Display full path for vdev name.
6340
* -v Display statistics for individual vdevs
6341
* -h Display help
6342
* -p Display values in parsable (exact) format.
6343
* -H Scripted mode. Don't display headers, and separate properties
6344
* by a single tab.
6345
* -l Display average latency
6346
* -q Display queue depths
6347
* -w Display latency histograms
6348
* -r Display request size histogram
6349
* -T Display a timestamp in date(1) or Unix format
6350
* -n Only print headers once
6351
*
6352
* This command can be tricky because we want to be able to deal with pool
6353
* creation/destruction as well as vdev configuration changes. The bulk of this
6354
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
6355
* on pool_list_refresh() to detect the addition and removal of pools.
6356
* Configuration changes are all handled within libzfs.
6357
*/
6358
int
6359
zpool_do_iostat(int argc, char **argv)
6360
{
6361
int c;
6362
int ret;
6363
float interval = 0;
6364
unsigned long count = 0;
6365
zpool_list_t *list;
6366
boolean_t verbose = B_FALSE;
6367
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
6368
boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
6369
boolean_t omit_since_boot = B_FALSE;
6370
boolean_t guid = B_FALSE;
6371
boolean_t follow_links = B_FALSE;
6372
boolean_t full_name = B_FALSE;
6373
boolean_t headers_once = B_FALSE;
6374
iostat_cbdata_t cb = { 0 };
6375
char *cmd = NULL;
6376
6377
/* Used for printing error message */
6378
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
6379
[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
6380
6381
uint64_t unsupported_flags;
6382
6383
/* check options */
6384
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
6385
switch (c) {
6386
case 'c':
6387
if (cmd != NULL) {
6388
fprintf(stderr,
6389
gettext("Can't set -c flag twice\n"));
6390
exit(1);
6391
}
6392
6393
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
6394
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
6395
fprintf(stderr, gettext(
6396
"Can't run -c, disabled by "
6397
"ZPOOL_SCRIPTS_ENABLED.\n"));
6398
exit(1);
6399
}
6400
6401
if ((getuid() <= 0 || geteuid() <= 0) &&
6402
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
6403
fprintf(stderr, gettext(
6404
"Can't run -c with root privileges "
6405
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
6406
exit(1);
6407
}
6408
cmd = optarg;
6409
verbose = B_TRUE;
6410
break;
6411
case 'g':
6412
guid = B_TRUE;
6413
break;
6414
case 'L':
6415
follow_links = B_TRUE;
6416
break;
6417
case 'P':
6418
full_name = B_TRUE;
6419
break;
6420
case 'T':
6421
get_timestamp_arg(*optarg);
6422
break;
6423
case 'v':
6424
verbose = B_TRUE;
6425
break;
6426
case 'p':
6427
parsable = B_TRUE;
6428
break;
6429
case 'l':
6430
latency = B_TRUE;
6431
break;
6432
case 'q':
6433
queues = B_TRUE;
6434
break;
6435
case 'H':
6436
scripted = B_TRUE;
6437
break;
6438
case 'w':
6439
l_histo = B_TRUE;
6440
break;
6441
case 'r':
6442
rq_histo = B_TRUE;
6443
break;
6444
case 'y':
6445
omit_since_boot = B_TRUE;
6446
break;
6447
case 'n':
6448
headers_once = B_TRUE;
6449
break;
6450
case 'h':
6451
usage(B_FALSE);
6452
break;
6453
case '?':
6454
if (optopt == 'c') {
6455
print_zpool_script_list("iostat");
6456
exit(0);
6457
} else {
6458
fprintf(stderr,
6459
gettext("invalid option '%c'\n"), optopt);
6460
}
6461
usage(B_FALSE);
6462
}
6463
}
6464
6465
argc -= optind;
6466
argv += optind;
6467
6468
cb.cb_literal = parsable;
6469
cb.cb_scripted = scripted;
6470
6471
if (guid)
6472
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
6473
if (follow_links)
6474
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6475
if (full_name)
6476
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
6477
cb.cb_iteration = 0;
6478
cb.cb_namewidth = 0;
6479
cb.cb_verbose = verbose;
6480
6481
/* Get our interval and count values (if any) */
6482
if (guid) {
6483
get_interval_count_filter_guids(&argc, argv, &interval,
6484
&count, &cb);
6485
} else {
6486
get_interval_count(&argc, argv, &interval, &count);
6487
}
6488
6489
if (argc == 0) {
6490
/* No args, so just print the defaults. */
6491
} else if (are_all_pools(argc, argv)) {
6492
/* All the args are pool names */
6493
} else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
6494
/* All the args are vdevs */
6495
cb.cb_vdevs.cb_names = argv;
6496
cb.cb_vdevs.cb_names_count = argc;
6497
argc = 0; /* No pools to process */
6498
} else if (are_all_pools(1, argv)) {
6499
/* The first arg is a pool name */
6500
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
6501
&cb.cb_vdevs)) {
6502
/* ...and the rest are vdev names */
6503
cb.cb_vdevs.cb_names = argv + 1;
6504
cb.cb_vdevs.cb_names_count = argc - 1;
6505
argc = 1; /* One pool to process */
6506
} else {
6507
fprintf(stderr, gettext("Expected either a list of "));
6508
fprintf(stderr, gettext("pools, or list of vdevs in"));
6509
fprintf(stderr, " \"%s\", ", argv[0]);
6510
fprintf(stderr, gettext("but got:\n"));
6511
error_list_unresolved_vdevs(argc - 1, argv + 1,
6512
argv[0], &cb.cb_vdevs);
6513
fprintf(stderr, "\n");
6514
usage(B_FALSE);
6515
}
6516
} else {
6517
/*
6518
* The args don't make sense. The first arg isn't a pool name,
6519
* nor are all the args vdevs.
6520
*/
6521
fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
6522
fprintf(stderr, "\n");
6523
return (1);
6524
}
6525
6526
if (cb.cb_vdevs.cb_names_count != 0) {
6527
/*
6528
* If user specified vdevs, it implies verbose.
6529
*/
6530
cb.cb_verbose = B_TRUE;
6531
}
6532
6533
/*
6534
* Construct the list of all interesting pools.
6535
*/
6536
ret = 0;
6537
if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
6538
&ret)) == NULL)
6539
return (1);
6540
6541
if (pool_list_count(list) == 0 && argc != 0) {
6542
pool_list_free(list);
6543
return (1);
6544
}
6545
6546
if (pool_list_count(list) == 0 && interval == 0) {
6547
pool_list_free(list);
6548
(void) fprintf(stderr, gettext("no pools available\n"));
6549
return (1);
6550
}
6551
6552
if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
6553
pool_list_free(list);
6554
(void) fprintf(stderr,
6555
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
6556
usage(B_FALSE);
6557
}
6558
6559
if (l_histo && rq_histo) {
6560
pool_list_free(list);
6561
(void) fprintf(stderr,
6562
gettext("Only one of [-r|-w] can be passed at a time\n"));
6563
usage(B_FALSE);
6564
}
6565
6566
/*
6567
* Enter the main iostat loop.
6568
*/
6569
cb.cb_list = list;
6570
6571
if (l_histo) {
6572
/*
6573
* Histograms tables look out of place when you try to display
6574
* them with the other stats, so make a rule that you can only
6575
* print histograms by themselves.
6576
*/
6577
cb.cb_flags = IOS_L_HISTO_M;
6578
} else if (rq_histo) {
6579
cb.cb_flags = IOS_RQ_HISTO_M;
6580
} else {
6581
cb.cb_flags = IOS_DEFAULT_M;
6582
if (latency)
6583
cb.cb_flags |= IOS_LATENCY_M;
6584
if (queues)
6585
cb.cb_flags |= IOS_QUEUES_M;
6586
}
6587
6588
/*
6589
* See if the module supports all the stats we want to display.
6590
*/
6591
unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
6592
if (unsupported_flags) {
6593
uint64_t f;
6594
int idx;
6595
fprintf(stderr,
6596
gettext("The loaded zfs module doesn't support:"));
6597
6598
/* for each bit set in unsupported_flags */
6599
for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
6600
idx = lowbit64(f) - 1;
6601
fprintf(stderr, " -%c", flag_to_arg[idx]);
6602
}
6603
6604
fprintf(stderr, ". Try running a newer module.\n");
6605
pool_list_free(list);
6606
6607
return (1);
6608
}
6609
6610
int last_npools = 0;
6611
for (;;) {
6612
/*
6613
* Refresh all pools in list, adding or removing pools as
6614
* necessary.
6615
*/
6616
int npools = pool_list_refresh(list);
6617
if (npools == 0) {
6618
(void) fprintf(stderr, gettext("no pools available\n"));
6619
} else {
6620
/*
6621
* If the list of pools has changed since last time
6622
* around, reset the iteration count to force the
6623
* header to be redisplayed.
6624
*/
6625
if (last_npools != npools)
6626
cb.cb_iteration = 0;
6627
6628
/*
6629
* If this is the first iteration and -y was supplied
6630
* we skip any printing.
6631
*/
6632
boolean_t skip = (omit_since_boot &&
6633
cb.cb_iteration == 0);
6634
6635
/*
6636
* Iterate over all pools to determine the maximum width
6637
* for the pool / device name column across all pools.
6638
*/
6639
cb.cb_namewidth = 0;
6640
(void) pool_list_iter(list, B_FALSE,
6641
get_namewidth_iostat, &cb);
6642
6643
if (timestamp_fmt != NODATE)
6644
print_timestamp(timestamp_fmt);
6645
6646
if (cmd != NULL && cb.cb_verbose &&
6647
!(cb.cb_flags & IOS_ANYHISTO_M)) {
6648
cb.vcdl = all_pools_for_each_vdev_run(argc,
6649
argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
6650
cb.cb_vdevs.cb_names_count,
6651
cb.cb_vdevs.cb_name_flags);
6652
} else {
6653
cb.vcdl = NULL;
6654
}
6655
6656
6657
/*
6658
* Check terminal size so we can print headers
6659
* even when terminal window has its height
6660
* changed.
6661
*/
6662
int winheight = terminal_height();
6663
/*
6664
* Are we connected to TTY? If not, headers_once
6665
* should be true, to avoid breaking scripts.
6666
*/
6667
if (winheight < 0)
6668
headers_once = B_TRUE;
6669
6670
/*
6671
* If it's the first time and we're not skipping it,
6672
* or either skip or verbose mode, print the header.
6673
*
6674
* The histogram code explicitly prints its header on
6675
* every vdev, so skip this for histograms.
6676
*/
6677
if (((++cb.cb_iteration == 1 && !skip) ||
6678
(skip != verbose) ||
6679
(!headers_once &&
6680
(cb.cb_iteration % winheight) == 0)) &&
6681
(!(cb.cb_flags & IOS_ANYHISTO_M)) &&
6682
!cb.cb_scripted)
6683
print_iostat_header(&cb);
6684
6685
if (skip) {
6686
(void) fflush(stdout);
6687
(void) fsleep(interval);
6688
last_npools = npools;
6689
continue;
6690
}
6691
6692
(void) pool_list_iter(list, B_FALSE, print_iostat, &cb);
6693
6694
/*
6695
* If there's more than one pool, and we're not in
6696
* verbose mode (which prints a separator for us),
6697
* then print a separator.
6698
*
6699
* In addition, if we're printing specific vdevs then
6700
* we also want an ending separator.
6701
*/
6702
if (((npools > 1 && !verbose &&
6703
!(cb.cb_flags & IOS_ANYHISTO_M)) ||
6704
(!(cb.cb_flags & IOS_ANYHISTO_M) &&
6705
cb.cb_vdevs.cb_names_count)) &&
6706
!cb.cb_scripted) {
6707
print_iostat_separator(&cb);
6708
if (cb.vcdl != NULL)
6709
print_cmd_columns(cb.vcdl, 1);
6710
printf("\n");
6711
}
6712
6713
if (cb.vcdl != NULL)
6714
free_vdev_cmd_data_list(cb.vcdl);
6715
6716
}
6717
6718
if (interval == 0)
6719
break;
6720
6721
if (count != 0 && --count == 0)
6722
break;
6723
6724
(void) fflush(stdout);
6725
(void) fsleep(interval);
6726
6727
last_npools = npools;
6728
}
6729
6730
pool_list_free(list);
6731
6732
return (ret);
6733
}
6734
6735
typedef struct list_cbdata {
6736
boolean_t cb_verbose;
6737
int cb_name_flags;
6738
int cb_namewidth;
6739
boolean_t cb_json;
6740
boolean_t cb_scripted;
6741
zprop_list_t *cb_proplist;
6742
boolean_t cb_literal;
6743
nvlist_t *cb_jsobj;
6744
boolean_t cb_json_as_int;
6745
boolean_t cb_json_pool_key_guid;
6746
} list_cbdata_t;
6747
6748
6749
/*
6750
* Given a list of columns to display, print an appropriate line. If
6751
* `vdev_name` is not NULL, we print `vdev_name` followed by a line of dashes.
6752
* If `vdev_name` is NULL, we print a line of the headers.
6753
*/
6754
static void
6755
print_line(list_cbdata_t *cb, const char *vdev_name)
6756
{
6757
zprop_list_t *pl = cb->cb_proplist;
6758
char headerbuf[ZPOOL_MAXPROPLEN];
6759
const char *header;
6760
boolean_t first = B_TRUE;
6761
boolean_t right_justify;
6762
size_t width = 0;
6763
6764
boolean_t print_header = (vdev_name == NULL);
6765
6766
for (; pl != NULL; pl = pl->pl_next) {
6767
width = pl->pl_width;
6768
if (first && cb->cb_verbose) {
6769
/*
6770
* Reset the width to accommodate the verbose listing
6771
* of devices.
6772
*/
6773
width = cb->cb_namewidth;
6774
}
6775
6776
if (!first)
6777
(void) fputs(" ", stdout);
6778
6779
if (print_header) {
6780
right_justify = B_FALSE;
6781
if (pl->pl_prop != ZPROP_USERPROP) {
6782
header = zpool_prop_column_name(pl->pl_prop);
6783
right_justify = zpool_prop_align_right(
6784
pl->pl_prop);
6785
} else {
6786
int i;
6787
6788
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
6789
headerbuf[i] = toupper(
6790
pl->pl_user_prop[i]);
6791
headerbuf[i] = '\0';
6792
header = headerbuf;
6793
}
6794
6795
}
6796
/*
6797
* If `print_header` is false, we want to print a line of
6798
* dashes.
6799
*/
6800
else {
6801
if (first) {
6802
header = vdev_name;
6803
right_justify = B_FALSE;
6804
} else {
6805
header = "-";
6806
right_justify = B_TRUE;
6807
}
6808
}
6809
6810
if (pl->pl_next == NULL && !right_justify)
6811
(void) fputs(header, stdout);
6812
else if (right_justify)
6813
(void) printf("%*s", (int)width, header);
6814
else
6815
(void) printf("%-*s", (int)width, header);
6816
6817
if (first)
6818
first = B_FALSE;
6819
}
6820
6821
(void) fputc('\n', stdout);
6822
}
6823
6824
/*
6825
* Given a pool and a list of properties, print out all the properties according
6826
* to the described layout. Used by zpool_do_list().
6827
*/
6828
static void
6829
collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
6830
{
6831
zprop_list_t *pl = cb->cb_proplist;
6832
boolean_t first = B_TRUE;
6833
char property[ZPOOL_MAXPROPLEN];
6834
const char *propstr;
6835
boolean_t right_justify;
6836
size_t width;
6837
zprop_source_t sourcetype = ZPROP_SRC_NONE;
6838
nvlist_t *item, *d, *props;
6839
item = d = props = NULL;
6840
6841
if (cb->cb_json) {
6842
item = fnvlist_alloc();
6843
props = fnvlist_alloc();
6844
d = fnvlist_lookup_nvlist(cb->cb_jsobj, "pools");
6845
if (d == NULL) {
6846
fprintf(stderr, "pools obj not found.\n");
6847
exit(1);
6848
}
6849
fill_pool_info(item, zhp, B_TRUE, cb->cb_json_as_int);
6850
}
6851
6852
for (; pl != NULL; pl = pl->pl_next) {
6853
6854
width = pl->pl_width;
6855
if (first && cb->cb_verbose) {
6856
/*
6857
* Reset the width to accommodate the verbose listing
6858
* of devices.
6859
*/
6860
width = cb->cb_namewidth;
6861
}
6862
6863
if (!cb->cb_json && !first) {
6864
if (cb->cb_scripted)
6865
(void) fputc('\t', stdout);
6866
else
6867
(void) fputs(" ", stdout);
6868
} else {
6869
first = B_FALSE;
6870
}
6871
6872
right_justify = B_FALSE;
6873
if (pl->pl_prop != ZPROP_USERPROP) {
6874
if (zpool_get_prop(zhp, pl->pl_prop, property,
6875
sizeof (property), &sourcetype,
6876
cb->cb_literal) != 0)
6877
propstr = "-";
6878
else
6879
propstr = property;
6880
6881
right_justify = zpool_prop_align_right(pl->pl_prop);
6882
} else if ((zpool_prop_feature(pl->pl_user_prop) ||
6883
zpool_prop_unsupported(pl->pl_user_prop)) &&
6884
zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6885
sizeof (property)) == 0) {
6886
propstr = property;
6887
sourcetype = ZPROP_SRC_LOCAL;
6888
} else if (zfs_prop_user(pl->pl_user_prop) &&
6889
zpool_get_userprop(zhp, pl->pl_user_prop, property,
6890
sizeof (property), &sourcetype) == 0) {
6891
propstr = property;
6892
} else {
6893
propstr = "-";
6894
}
6895
6896
if (cb->cb_json) {
6897
if (pl->pl_prop == ZPOOL_PROP_NAME)
6898
continue;
6899
const char *prop_name;
6900
if (pl->pl_prop != ZPROP_USERPROP)
6901
prop_name = zpool_prop_to_name(pl->pl_prop);
6902
else
6903
prop_name = pl->pl_user_prop;
6904
(void) zprop_nvlist_one_property(
6905
prop_name, propstr,
6906
sourcetype, NULL, NULL, props, cb->cb_json_as_int);
6907
} else {
6908
/*
6909
* If this is being called in scripted mode, or if this
6910
* is the last column and it is left-justified, don't
6911
* include a width format specifier.
6912
*/
6913
if (cb->cb_scripted || (pl->pl_next == NULL &&
6914
!right_justify))
6915
(void) fputs(propstr, stdout);
6916
else if (right_justify)
6917
(void) printf("%*s", (int)width, propstr);
6918
else
6919
(void) printf("%-*s", (int)width, propstr);
6920
}
6921
}
6922
6923
if (cb->cb_json) {
6924
fnvlist_add_nvlist(item, "properties", props);
6925
if (cb->cb_json_pool_key_guid) {
6926
char pool_guid[256];
6927
uint64_t guid = fnvlist_lookup_uint64(
6928
zpool_get_config(zhp, NULL),
6929
ZPOOL_CONFIG_POOL_GUID);
6930
(void) snprintf(pool_guid, 256, "%llu",
6931
(u_longlong_t)guid);
6932
fnvlist_add_nvlist(d, pool_guid, item);
6933
} else {
6934
fnvlist_add_nvlist(d, zpool_get_name(zhp),
6935
item);
6936
}
6937
fnvlist_free(props);
6938
fnvlist_free(item);
6939
} else
6940
(void) fputc('\n', stdout);
6941
}
6942
6943
static void
6944
collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
6945
boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format,
6946
boolean_t json, nvlist_t *nvl, boolean_t as_int)
6947
{
6948
char propval[64];
6949
boolean_t fixed;
6950
size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6951
6952
switch (prop) {
6953
case ZPOOL_PROP_SIZE:
6954
case ZPOOL_PROP_EXPANDSZ:
6955
case ZPOOL_PROP_CHECKPOINT:
6956
case ZPOOL_PROP_DEDUPRATIO:
6957
case ZPOOL_PROP_DEDUPCACHED:
6958
if (value == 0)
6959
(void) strlcpy(propval, "-", sizeof (propval));
6960
else
6961
zfs_nicenum_format(value, propval, sizeof (propval),
6962
format);
6963
break;
6964
case ZPOOL_PROP_FRAGMENTATION:
6965
if (value == ZFS_FRAG_INVALID) {
6966
(void) strlcpy(propval, "-", sizeof (propval));
6967
} else if (format == ZFS_NICENUM_RAW) {
6968
(void) snprintf(propval, sizeof (propval), "%llu",
6969
(unsigned long long)value);
6970
} else {
6971
(void) snprintf(propval, sizeof (propval), "%llu%%",
6972
(unsigned long long)value);
6973
}
6974
break;
6975
case ZPOOL_PROP_CAPACITY:
6976
/* capacity value is in parts-per-10,000 (aka permyriad) */
6977
if (format == ZFS_NICENUM_RAW)
6978
(void) snprintf(propval, sizeof (propval), "%llu",
6979
(unsigned long long)value / 100);
6980
else
6981
(void) snprintf(propval, sizeof (propval),
6982
value < 1000 ? "%1.2f%%" : value < 10000 ?
6983
"%2.1f%%" : "%3.0f%%", value / 100.0);
6984
break;
6985
case ZPOOL_PROP_HEALTH:
6986
width = 8;
6987
(void) strlcpy(propval, str, sizeof (propval));
6988
break;
6989
default:
6990
zfs_nicenum_format(value, propval, sizeof (propval), format);
6991
}
6992
6993
if (!valid)
6994
(void) strlcpy(propval, "-", sizeof (propval));
6995
6996
if (json) {
6997
(void) zprop_nvlist_one_property(zpool_prop_to_name(prop),
6998
propval, ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
6999
} else {
7000
if (scripted)
7001
(void) printf("\t%s", propval);
7002
else
7003
(void) printf(" %*s", (int)width, propval);
7004
}
7005
}
7006
7007
/*
7008
* print static default line per vdev
7009
*/
7010
static void
7011
collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
7012
list_cbdata_t *cb, int depth, boolean_t isspare, nvlist_t *item)
7013
{
7014
nvlist_t **child;
7015
vdev_stat_t *vs;
7016
uint_t c, children = 0;
7017
char *vname;
7018
boolean_t scripted = cb->cb_scripted;
7019
uint64_t islog = B_FALSE;
7020
nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;
7021
props = ent = ch = obj = sp = l2c = NULL;
7022
7023
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
7024
(uint64_t **)&vs, &c) == 0);
7025
7026
if (name != NULL) {
7027
boolean_t toplevel = (vs->vs_space != 0);
7028
uint64_t cap;
7029
enum zfs_nicenum_format format;
7030
const char *state;
7031
7032
if (cb->cb_literal)
7033
format = ZFS_NICENUM_RAW;
7034
else
7035
format = ZFS_NICENUM_1024;
7036
7037
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
7038
return;
7039
7040
if (cb->cb_json) {
7041
props = fnvlist_alloc();
7042
ent = fnvlist_alloc();
7043
fill_vdev_info(ent, zhp, (char *)name, B_FALSE,
7044
cb->cb_json_as_int);
7045
} else {
7046
if (scripted)
7047
(void) printf("\t%s", name);
7048
else if (strlen(name) + depth > cb->cb_namewidth)
7049
(void) printf("%*s%s", depth, "", name);
7050
else
7051
(void) printf("%*s%s%*s", depth, "", name,
7052
(int)(cb->cb_namewidth - strlen(name) -
7053
depth), "");
7054
}
7055
7056
/*
7057
* Print the properties for the individual vdevs. Some
7058
* properties are only applicable to toplevel vdevs. The
7059
* 'toplevel' boolean value is passed to the print_one_column()
7060
* to indicate that the value is valid.
7061
*/
7062
for (zprop_list_t *pl = cb->cb_proplist; pl != NULL;
7063
pl = pl->pl_next) {
7064
switch (pl->pl_prop) {
7065
case ZPOOL_PROP_SIZE:
7066
if (VDEV_STAT_VALID(vs_pspace, c) &&
7067
vs->vs_pspace) {
7068
collect_vdev_prop(
7069
ZPOOL_PROP_SIZE, vs->vs_pspace,
7070
NULL, scripted, B_TRUE, format,
7071
cb->cb_json, props,
7072
cb->cb_json_as_int);
7073
} else {
7074
collect_vdev_prop(
7075
ZPOOL_PROP_SIZE, vs->vs_space, NULL,
7076
scripted, toplevel, format,
7077
cb->cb_json, props,
7078
cb->cb_json_as_int);
7079
}
7080
break;
7081
case ZPOOL_PROP_ALLOCATED:
7082
collect_vdev_prop(ZPOOL_PROP_ALLOCATED,
7083
vs->vs_alloc, NULL, scripted, toplevel,
7084
format, cb->cb_json, props,
7085
cb->cb_json_as_int);
7086
break;
7087
7088
case ZPOOL_PROP_FREE:
7089
collect_vdev_prop(ZPOOL_PROP_FREE,
7090
vs->vs_space - vs->vs_alloc, NULL, scripted,
7091
toplevel, format, cb->cb_json, props,
7092
cb->cb_json_as_int);
7093
break;
7094
7095
case ZPOOL_PROP_CHECKPOINT:
7096
collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
7097
vs->vs_checkpoint_space, NULL, scripted,
7098
toplevel, format, cb->cb_json, props,
7099
cb->cb_json_as_int);
7100
break;
7101
7102
case ZPOOL_PROP_EXPANDSZ:
7103
collect_vdev_prop(ZPOOL_PROP_EXPANDSZ,
7104
vs->vs_esize, NULL, scripted, B_TRUE,
7105
format, cb->cb_json, props,
7106
cb->cb_json_as_int);
7107
break;
7108
7109
case ZPOOL_PROP_FRAGMENTATION:
7110
collect_vdev_prop(
7111
ZPOOL_PROP_FRAGMENTATION,
7112
vs->vs_fragmentation, NULL, scripted,
7113
(vs->vs_fragmentation != ZFS_FRAG_INVALID &&
7114
toplevel),
7115
format, cb->cb_json, props,
7116
cb->cb_json_as_int);
7117
break;
7118
7119
case ZPOOL_PROP_CAPACITY:
7120
cap = (vs->vs_space == 0) ?
7121
0 : (vs->vs_alloc * 10000 / vs->vs_space);
7122
collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap,
7123
NULL, scripted, toplevel, format,
7124
cb->cb_json, props, cb->cb_json_as_int);
7125
break;
7126
7127
case ZPOOL_PROP_HEALTH:
7128
state = zpool_state_to_name(vs->vs_state,
7129
vs->vs_aux);
7130
if (isspare) {
7131
if (vs->vs_aux == VDEV_AUX_SPARED)
7132
state = "INUSE";
7133
else if (vs->vs_state ==
7134
VDEV_STATE_HEALTHY)
7135
state = "AVAIL";
7136
}
7137
collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state,
7138
scripted, B_TRUE, format, cb->cb_json,
7139
props, cb->cb_json_as_int);
7140
break;
7141
7142
case ZPOOL_PROP_NAME:
7143
break;
7144
7145
default:
7146
collect_vdev_prop(pl->pl_prop, 0,
7147
NULL, scripted, B_FALSE, format,
7148
cb->cb_json, props, cb->cb_json_as_int);
7149
7150
}
7151
7152
7153
}
7154
7155
if (cb->cb_json) {
7156
fnvlist_add_nvlist(ent, "properties", props);
7157
fnvlist_free(props);
7158
} else
7159
(void) fputc('\n', stdout);
7160
}
7161
7162
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
7163
&child, &children) != 0) {
7164
if (cb->cb_json) {
7165
fnvlist_add_nvlist(item, name, ent);
7166
fnvlist_free(ent);
7167
}
7168
return;
7169
}
7170
7171
if (cb->cb_json) {
7172
ch = fnvlist_alloc();
7173
}
7174
7175
/* list the normal vdevs first */
7176
for (c = 0; c < children; c++) {
7177
uint64_t ishole = B_FALSE;
7178
7179
if (nvlist_lookup_uint64(child[c],
7180
ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
7181
continue;
7182
7183
if (nvlist_lookup_uint64(child[c],
7184
ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
7185
continue;
7186
7187
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
7188
continue;
7189
7190
vname = zpool_vdev_name(g_zfs, zhp, child[c],
7191
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7192
7193
if (name == NULL || cb->cb_json != B_TRUE)
7194
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7195
B_FALSE, item);
7196
else if (cb->cb_json) {
7197
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7198
B_FALSE, ch);
7199
}
7200
free(vname);
7201
}
7202
7203
if (cb->cb_json) {
7204
if (!nvlist_empty(ch))
7205
fnvlist_add_nvlist(ent, "vdevs", ch);
7206
fnvlist_free(ch);
7207
}
7208
7209
/* list the classes: 'logs', 'dedup', and 'special' */
7210
for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
7211
boolean_t printed = B_FALSE;
7212
if (cb->cb_json)
7213
obj = fnvlist_alloc();
7214
for (c = 0; c < children; c++) {
7215
const char *bias = NULL;
7216
const char *type = NULL;
7217
7218
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
7219
&islog) == 0 && islog) {
7220
bias = VDEV_ALLOC_CLASS_LOGS;
7221
} else {
7222
(void) nvlist_lookup_string(child[c],
7223
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
7224
(void) nvlist_lookup_string(child[c],
7225
ZPOOL_CONFIG_TYPE, &type);
7226
}
7227
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
7228
continue;
7229
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
7230
continue;
7231
7232
if (!printed && !cb->cb_json) {
7233
print_line(cb, class_name[n]);
7234
printed = B_TRUE;
7235
}
7236
vname = zpool_vdev_name(g_zfs, zhp, child[c],
7237
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
7238
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7239
B_FALSE, obj);
7240
free(vname);
7241
}
7242
if (cb->cb_json) {
7243
if (!nvlist_empty(obj))
7244
fnvlist_add_nvlist(item, class_name[n], obj);
7245
fnvlist_free(obj);
7246
}
7247
}
7248
7249
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
7250
&child, &children) == 0 && children > 0) {
7251
if (cb->cb_json) {
7252
l2c = fnvlist_alloc();
7253
} else {
7254
print_line(cb, "cache");
7255
}
7256
for (c = 0; c < children; c++) {
7257
vname = zpool_vdev_name(g_zfs, zhp, child[c],
7258
cb->cb_name_flags);
7259
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7260
B_FALSE, l2c);
7261
free(vname);
7262
}
7263
if (cb->cb_json) {
7264
if (!nvlist_empty(l2c))
7265
fnvlist_add_nvlist(item, "l2cache", l2c);
7266
fnvlist_free(l2c);
7267
}
7268
}
7269
7270
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
7271
&children) == 0 && children > 0) {
7272
if (cb->cb_json) {
7273
sp = fnvlist_alloc();
7274
} else {
7275
print_line(cb, "spare");
7276
}
7277
for (c = 0; c < children; c++) {
7278
vname = zpool_vdev_name(g_zfs, zhp, child[c],
7279
cb->cb_name_flags);
7280
collect_list_stats(zhp, vname, child[c], cb, depth + 2,
7281
B_TRUE, sp);
7282
free(vname);
7283
}
7284
if (cb->cb_json) {
7285
if (!nvlist_empty(sp))
7286
fnvlist_add_nvlist(item, "spares", sp);
7287
fnvlist_free(sp);
7288
}
7289
}
7290
7291
if (name != NULL && cb->cb_json) {
7292
fnvlist_add_nvlist(item, name, ent);
7293
fnvlist_free(ent);
7294
}
7295
}
7296
7297
/*
7298
* Generic callback function to list a pool.
7299
*/
7300
static int
7301
list_callback(zpool_handle_t *zhp, void *data)
7302
{
7303
nvlist_t *p, *d, *nvdevs;
7304
uint64_t guid;
7305
char pool_guid[256];
7306
const char *pool_name = zpool_get_name(zhp);
7307
list_cbdata_t *cbp = data;
7308
p = d = nvdevs = NULL;
7309
7310
collect_pool(zhp, cbp);
7311
7312
if (cbp->cb_verbose) {
7313
nvlist_t *config, *nvroot;
7314
config = zpool_get_config(zhp, NULL);
7315
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
7316
&nvroot) == 0);
7317
if (cbp->cb_json) {
7318
d = fnvlist_lookup_nvlist(cbp->cb_jsobj,
7319
"pools");
7320
if (cbp->cb_json_pool_key_guid) {
7321
guid = fnvlist_lookup_uint64(config,
7322
ZPOOL_CONFIG_POOL_GUID);
7323
(void) snprintf(pool_guid, 256, "%llu",
7324
(u_longlong_t)guid);
7325
p = fnvlist_lookup_nvlist(d, pool_guid);
7326
} else {
7327
p = fnvlist_lookup_nvlist(d, pool_name);
7328
}
7329
nvdevs = fnvlist_alloc();
7330
}
7331
collect_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE, nvdevs);
7332
if (cbp->cb_json) {
7333
fnvlist_add_nvlist(p, "vdevs", nvdevs);
7334
if (cbp->cb_json_pool_key_guid)
7335
fnvlist_add_nvlist(d, pool_guid, p);
7336
else
7337
fnvlist_add_nvlist(d, pool_name, p);
7338
fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
7339
fnvlist_free(nvdevs);
7340
}
7341
}
7342
7343
return (0);
7344
}
7345
7346
/*
7347
* Set the minimum pool/vdev name column width. The width must be at least 9,
7348
* but may be as large as needed.
7349
*/
7350
static int
7351
get_namewidth_list(zpool_handle_t *zhp, void *data)
7352
{
7353
list_cbdata_t *cb = data;
7354
int width;
7355
7356
width = get_namewidth(zhp, cb->cb_namewidth,
7357
cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
7358
7359
if (width < 9)
7360
width = 9;
7361
7362
cb->cb_namewidth = width;
7363
7364
return (0);
7365
}
7366
7367
/*
7368
* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
7369
*
7370
* -g Display guid for individual vdev name.
7371
* -H Scripted mode. Don't display headers, and separate properties
7372
* by a single tab.
7373
* -L Follow links when resolving vdev path name.
7374
* -o List of properties to display. Defaults to
7375
* "name,size,allocated,free,expandsize,fragmentation,capacity,"
7376
* "dedupratio,health,altroot"
7377
* -p Display values in parsable (exact) format.
7378
* -P Display full path for vdev name.
7379
* -T Display a timestamp in date(1) or Unix format
7380
* -j Display the output in JSON format
7381
* --json-int Display the numbers as integer instead of strings.
7382
* --json-pool-key-guid Set pool GUID as key for pool objects.
7383
*
7384
* List all pools in the system, whether or not they're healthy. Output space
7385
* statistics for each one, as well as health status summary.
7386
*/
7387
int
7388
zpool_do_list(int argc, char **argv)
7389
{
7390
int c;
7391
int ret = 0;
7392
list_cbdata_t cb = { 0 };
7393
static char default_props[] =
7394
"name,size,allocated,free,checkpoint,expandsize,fragmentation,"
7395
"capacity,dedupratio,health,altroot";
7396
char *props = default_props;
7397
float interval = 0;
7398
unsigned long count = 0;
7399
zpool_list_t *list;
7400
boolean_t first = B_TRUE;
7401
nvlist_t *data = NULL;
7402
current_prop_type = ZFS_TYPE_POOL;
7403
7404
struct option long_options[] = {
7405
{"json", no_argument, NULL, 'j'},
7406
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
7407
{"json-pool-key-guid", no_argument, NULL,
7408
ZPOOL_OPTION_POOL_KEY_GUID},
7409
{0, 0, 0, 0}
7410
};
7411
7412
/* check options */
7413
while ((c = getopt_long(argc, argv, ":gjHLo:pPT:v", long_options,
7414
NULL)) != -1) {
7415
switch (c) {
7416
case 'g':
7417
cb.cb_name_flags |= VDEV_NAME_GUID;
7418
break;
7419
case 'H':
7420
cb.cb_scripted = B_TRUE;
7421
break;
7422
case 'L':
7423
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
7424
break;
7425
case 'o':
7426
props = optarg;
7427
break;
7428
case 'P':
7429
cb.cb_name_flags |= VDEV_NAME_PATH;
7430
break;
7431
case 'p':
7432
cb.cb_literal = B_TRUE;
7433
break;
7434
case 'j':
7435
cb.cb_json = B_TRUE;
7436
break;
7437
case ZPOOL_OPTION_JSON_NUMS_AS_INT:
7438
cb.cb_json_as_int = B_TRUE;
7439
cb.cb_literal = B_TRUE;
7440
break;
7441
case ZPOOL_OPTION_POOL_KEY_GUID:
7442
cb.cb_json_pool_key_guid = B_TRUE;
7443
break;
7444
case 'T':
7445
get_timestamp_arg(*optarg);
7446
break;
7447
case 'v':
7448
cb.cb_verbose = B_TRUE;
7449
cb.cb_namewidth = 8; /* 8 until precalc is avail */
7450
break;
7451
case ':':
7452
(void) fprintf(stderr, gettext("missing argument for "
7453
"'%c' option\n"), optopt);
7454
usage(B_FALSE);
7455
break;
7456
case '?':
7457
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7458
optopt);
7459
usage(B_FALSE);
7460
}
7461
}
7462
7463
argc -= optind;
7464
argv += optind;
7465
7466
if (!cb.cb_json && cb.cb_json_as_int) {
7467
(void) fprintf(stderr, gettext("'--json-int' only works with"
7468
" '-j' option\n"));
7469
usage(B_FALSE);
7470
}
7471
7472
if (!cb.cb_json && cb.cb_json_pool_key_guid) {
7473
(void) fprintf(stderr, gettext("'json-pool-key-guid' only"
7474
" works with '-j' option\n"));
7475
usage(B_FALSE);
7476
}
7477
7478
get_interval_count(&argc, argv, &interval, &count);
7479
7480
if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
7481
usage(B_FALSE);
7482
7483
for (;;) {
7484
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
7485
ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
7486
return (1);
7487
7488
if (pool_list_count(list) == 0)
7489
break;
7490
7491
if (cb.cb_json) {
7492
cb.cb_jsobj = zpool_json_schema(0, 1);
7493
data = fnvlist_alloc();
7494
fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
7495
fnvlist_free(data);
7496
}
7497
7498
cb.cb_namewidth = 0;
7499
(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
7500
7501
if (timestamp_fmt != NODATE) {
7502
if (cb.cb_json) {
7503
if (cb.cb_json_as_int) {
7504
fnvlist_add_uint64(cb.cb_jsobj, "time",
7505
time(NULL));
7506
} else {
7507
char ts[128];
7508
get_timestamp(timestamp_fmt, ts, 128);
7509
fnvlist_add_string(cb.cb_jsobj, "time",
7510
ts);
7511
}
7512
} else
7513
print_timestamp(timestamp_fmt);
7514
}
7515
7516
if (!cb.cb_scripted && (first || cb.cb_verbose) &&
7517
!cb.cb_json) {
7518
print_line(&cb, NULL);
7519
first = B_FALSE;
7520
}
7521
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
7522
7523
if (ret == 0 && cb.cb_json)
7524
zcmd_print_json(cb.cb_jsobj);
7525
else if (ret != 0 && cb.cb_json)
7526
nvlist_free(cb.cb_jsobj);
7527
7528
if (interval == 0)
7529
break;
7530
7531
if (count != 0 && --count == 0)
7532
break;
7533
7534
pool_list_free(list);
7535
7536
(void) fflush(stdout);
7537
(void) fsleep(interval);
7538
}
7539
7540
if (argc == 0 && !cb.cb_scripted && !cb.cb_json &&
7541
pool_list_count(list) == 0) {
7542
(void) printf(gettext("no pools available\n"));
7543
ret = 0;
7544
}
7545
7546
pool_list_free(list);
7547
zprop_free_list(cb.cb_proplist);
7548
return (ret);
7549
}
7550
7551
static int
7552
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
7553
{
7554
boolean_t force = B_FALSE;
7555
boolean_t rebuild = B_FALSE;
7556
boolean_t wait = B_FALSE;
7557
int c;
7558
nvlist_t *nvroot;
7559
char *poolname, *old_disk, *new_disk;
7560
zpool_handle_t *zhp;
7561
nvlist_t *props = NULL;
7562
char *propval;
7563
int ret;
7564
7565
/* check options */
7566
while ((c = getopt(argc, argv, "fo:sw")) != -1) {
7567
switch (c) {
7568
case 'f':
7569
force = B_TRUE;
7570
break;
7571
case 'o':
7572
if ((propval = strchr(optarg, '=')) == NULL) {
7573
(void) fprintf(stderr, gettext("missing "
7574
"'=' for -o option\n"));
7575
usage(B_FALSE);
7576
}
7577
*propval = '\0';
7578
propval++;
7579
7580
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
7581
(add_prop_list(optarg, propval, &props, B_TRUE)))
7582
usage(B_FALSE);
7583
break;
7584
case 's':
7585
rebuild = B_TRUE;
7586
break;
7587
case 'w':
7588
wait = B_TRUE;
7589
break;
7590
case '?':
7591
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7592
optopt);
7593
usage(B_FALSE);
7594
}
7595
}
7596
7597
argc -= optind;
7598
argv += optind;
7599
7600
/* get pool name and check number of arguments */
7601
if (argc < 1) {
7602
(void) fprintf(stderr, gettext("missing pool name argument\n"));
7603
usage(B_FALSE);
7604
}
7605
7606
poolname = argv[0];
7607
7608
if (argc < 2) {
7609
(void) fprintf(stderr,
7610
gettext("missing <device> specification\n"));
7611
usage(B_FALSE);
7612
}
7613
7614
old_disk = argv[1];
7615
7616
if (argc < 3) {
7617
if (!replacing) {
7618
(void) fprintf(stderr,
7619
gettext("missing <new_device> specification\n"));
7620
usage(B_FALSE);
7621
}
7622
new_disk = old_disk;
7623
argc -= 1;
7624
argv += 1;
7625
} else {
7626
new_disk = argv[2];
7627
argc -= 2;
7628
argv += 2;
7629
}
7630
7631
if (argc > 1) {
7632
(void) fprintf(stderr, gettext("too many arguments\n"));
7633
usage(B_FALSE);
7634
}
7635
7636
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
7637
nvlist_free(props);
7638
return (1);
7639
}
7640
7641
if (zpool_get_config(zhp, NULL) == NULL) {
7642
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
7643
poolname);
7644
zpool_close(zhp);
7645
nvlist_free(props);
7646
return (1);
7647
}
7648
7649
/* unless manually specified use "ashift" pool property (if set) */
7650
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
7651
int intval;
7652
zprop_source_t src;
7653
char strval[ZPOOL_MAXPROPLEN];
7654
7655
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
7656
if (src != ZPROP_SRC_DEFAULT) {
7657
(void) sprintf(strval, "%" PRId32, intval);
7658
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
7659
&props, B_TRUE) == 0);
7660
}
7661
}
7662
7663
nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
7664
argc, argv);
7665
if (nvroot == NULL) {
7666
zpool_close(zhp);
7667
nvlist_free(props);
7668
return (1);
7669
}
7670
7671
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
7672
rebuild);
7673
7674
if (ret == 0 && wait) {
7675
zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
7676
char raidz_prefix[] = "raidz";
7677
if (replacing) {
7678
activity = ZPOOL_WAIT_REPLACE;
7679
} else if (strncmp(old_disk,
7680
raidz_prefix, strlen(raidz_prefix)) == 0) {
7681
activity = ZPOOL_WAIT_RAIDZ_EXPAND;
7682
}
7683
ret = zpool_wait(zhp, activity);
7684
}
7685
7686
nvlist_free(props);
7687
nvlist_free(nvroot);
7688
zpool_close(zhp);
7689
7690
return (ret);
7691
}
7692
7693
/*
7694
* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
7695
*
7696
* -f Force attach, even if <new_device> appears to be in use.
7697
* -s Use sequential instead of healing reconstruction for resilver.
7698
* -o Set property=value.
7699
* -w Wait for replacing to complete before returning
7700
*
7701
* Replace <device> with <new_device>.
7702
*/
7703
int
7704
zpool_do_replace(int argc, char **argv)
7705
{
7706
return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
7707
}
7708
7709
/*
7710
* zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device>
7711
*
7712
* -f Force attach, even if <new_device> appears to be in use.
7713
* -s Use sequential instead of healing reconstruction for resilver.
7714
* -o Set property=value.
7715
* -w Wait for resilvering (mirror) or expansion (raidz) to complete
7716
* before returning.
7717
*
7718
* Attach <new_device> to a <vdev>, where the vdev can be of type
7719
* device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will
7720
* be transformed into a mirror of <vdev> and <new_device>. When a mirror
7721
* is involved, <new_device> will begin life with a DTL of [0, now], and will
7722
* immediately begin to resilver itself. For the raidz case, a expansion will
7723
* commence and reflow the raidz data across all the disks including the
7724
* <new_device>.
7725
*/
7726
int
7727
zpool_do_attach(int argc, char **argv)
7728
{
7729
return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
7730
}
7731
7732
/*
7733
* zpool detach [-f] <pool> <device>
7734
*
7735
* -f Force detach of <device>, even if DTLs argue against it
7736
* (not supported yet)
7737
*
7738
* Detach a device from a mirror. The operation will be refused if <device>
7739
* is the last device in the mirror, or if the DTLs indicate that this device
7740
* has the only valid copy of some data.
7741
*/
7742
int
7743
zpool_do_detach(int argc, char **argv)
7744
{
7745
int c;
7746
char *poolname, *path;
7747
zpool_handle_t *zhp;
7748
int ret;
7749
7750
/* check options */
7751
while ((c = getopt(argc, argv, "")) != -1) {
7752
switch (c) {
7753
case '?':
7754
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7755
optopt);
7756
usage(B_FALSE);
7757
}
7758
}
7759
7760
argc -= optind;
7761
argv += optind;
7762
7763
/* get pool name and check number of arguments */
7764
if (argc < 1) {
7765
(void) fprintf(stderr, gettext("missing pool name argument\n"));
7766
usage(B_FALSE);
7767
}
7768
7769
if (argc < 2) {
7770
(void) fprintf(stderr,
7771
gettext("missing <device> specification\n"));
7772
usage(B_FALSE);
7773
}
7774
7775
poolname = argv[0];
7776
path = argv[1];
7777
7778
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7779
return (1);
7780
7781
ret = zpool_vdev_detach(zhp, path);
7782
7783
zpool_close(zhp);
7784
7785
return (ret);
7786
}
7787
7788
/*
7789
* zpool split [-gLnP] [-o prop=val] ...
7790
* [-o mntopt] ...
7791
* [-R altroot] <pool> <newpool> [<device> ...]
7792
*
7793
* -g Display guid for individual vdev name.
7794
* -L Follow links when resolving vdev path name.
7795
* -n Do not split the pool, but display the resulting layout if
7796
* it were to be split.
7797
* -o Set property=value, or set mount options.
7798
* -P Display full path for vdev name.
7799
* -R Mount the split-off pool under an alternate root.
7800
* -l Load encryption keys while importing.
7801
*
7802
* Splits the named pool and gives it the new pool name. Devices to be split
7803
* off may be listed, provided that no more than one device is specified
7804
* per top-level vdev mirror. The newly split pool is left in an exported
7805
* state unless -R is specified.
7806
*
7807
* Restrictions: the top-level of the pool pool must only be made up of
7808
* mirrors; all devices in the pool must be healthy; no device may be
7809
* undergoing a resilvering operation.
7810
*/
7811
int
7812
zpool_do_split(int argc, char **argv)
7813
{
7814
char *srcpool, *newpool, *propval;
7815
char *mntopts = NULL;
7816
splitflags_t flags;
7817
int c, ret = 0;
7818
int ms_status = 0;
7819
boolean_t loadkeys = B_FALSE;
7820
zpool_handle_t *zhp;
7821
nvlist_t *config, *props = NULL;
7822
7823
flags.dryrun = B_FALSE;
7824
flags.import = B_FALSE;
7825
flags.name_flags = 0;
7826
7827
/* check options */
7828
while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
7829
switch (c) {
7830
case 'g':
7831
flags.name_flags |= VDEV_NAME_GUID;
7832
break;
7833
case 'L':
7834
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
7835
break;
7836
case 'R':
7837
flags.import = B_TRUE;
7838
if (add_prop_list(
7839
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
7840
&props, B_TRUE) != 0) {
7841
nvlist_free(props);
7842
usage(B_FALSE);
7843
}
7844
break;
7845
case 'l':
7846
loadkeys = B_TRUE;
7847
break;
7848
case 'n':
7849
flags.dryrun = B_TRUE;
7850
break;
7851
case 'o':
7852
if ((propval = strchr(optarg, '=')) != NULL) {
7853
*propval = '\0';
7854
propval++;
7855
if (add_prop_list(optarg, propval,
7856
&props, B_TRUE) != 0) {
7857
nvlist_free(props);
7858
usage(B_FALSE);
7859
}
7860
} else {
7861
mntopts = optarg;
7862
}
7863
break;
7864
case 'P':
7865
flags.name_flags |= VDEV_NAME_PATH;
7866
break;
7867
case ':':
7868
(void) fprintf(stderr, gettext("missing argument for "
7869
"'%c' option\n"), optopt);
7870
usage(B_FALSE);
7871
break;
7872
case '?':
7873
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
7874
optopt);
7875
usage(B_FALSE);
7876
break;
7877
}
7878
}
7879
7880
if (!flags.import && mntopts != NULL) {
7881
(void) fprintf(stderr, gettext("setting mntopts is only "
7882
"valid when importing the pool\n"));
7883
usage(B_FALSE);
7884
}
7885
7886
if (!flags.import && loadkeys) {
7887
(void) fprintf(stderr, gettext("loading keys is only "
7888
"valid when importing the pool\n"));
7889
usage(B_FALSE);
7890
}
7891
7892
argc -= optind;
7893
argv += optind;
7894
7895
if (argc < 1) {
7896
(void) fprintf(stderr, gettext("Missing pool name\n"));
7897
usage(B_FALSE);
7898
}
7899
if (argc < 2) {
7900
(void) fprintf(stderr, gettext("Missing new pool name\n"));
7901
usage(B_FALSE);
7902
}
7903
7904
srcpool = argv[0];
7905
newpool = argv[1];
7906
7907
argc -= 2;
7908
argv += 2;
7909
7910
if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
7911
nvlist_free(props);
7912
return (1);
7913
}
7914
7915
config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
7916
if (config == NULL) {
7917
ret = 1;
7918
} else {
7919
if (flags.dryrun) {
7920
(void) printf(gettext("would create '%s' with the "
7921
"following layout:\n\n"), newpool);
7922
print_vdev_tree(NULL, newpool, config, 0, "",
7923
flags.name_flags);
7924
print_vdev_tree(NULL, "dedup", config, 0,
7925
VDEV_ALLOC_BIAS_DEDUP, 0);
7926
print_vdev_tree(NULL, "special", config, 0,
7927
VDEV_ALLOC_BIAS_SPECIAL, 0);
7928
}
7929
}
7930
7931
zpool_close(zhp);
7932
7933
if (ret != 0 || flags.dryrun || !flags.import) {
7934
nvlist_free(config);
7935
nvlist_free(props);
7936
return (ret);
7937
}
7938
7939
/*
7940
* The split was successful. Now we need to open the new
7941
* pool and import it.
7942
*/
7943
if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
7944
nvlist_free(config);
7945
nvlist_free(props);
7946
return (1);
7947
}
7948
7949
if (loadkeys) {
7950
ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
7951
if (ret != 0)
7952
ret = 1;
7953
}
7954
7955
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
7956
ms_status = zpool_enable_datasets(zhp, mntopts, 0,
7957
mount_tp_nthr);
7958
if (ms_status == EZFS_SHAREFAILED) {
7959
(void) fprintf(stderr, gettext("Split was successful, "
7960
"datasets are mounted but sharing of some datasets "
7961
"has failed\n"));
7962
} else if (ms_status == EZFS_MOUNTFAILED) {
7963
(void) fprintf(stderr, gettext("Split was successful"
7964
", but some datasets could not be mounted\n"));
7965
(void) fprintf(stderr, gettext("Try doing '%s' with a "
7966
"different altroot\n"), "zpool import");
7967
}
7968
}
7969
zpool_close(zhp);
7970
nvlist_free(config);
7971
nvlist_free(props);
7972
7973
return (ret);
7974
}
7975
7976
7977
/*
7978
* zpool online [--power] <pool> <device> ...
7979
*
7980
* --power: Power on the enclosure slot to the drive (if possible)
7981
*/
7982
int
7983
zpool_do_online(int argc, char **argv)
7984
{
7985
int c, i;
7986
char *poolname;
7987
zpool_handle_t *zhp;
7988
int ret = 0;
7989
vdev_state_t newstate;
7990
int flags = 0;
7991
boolean_t is_power_on = B_FALSE;
7992
struct option long_options[] = {
7993
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
7994
{0, 0, 0, 0}
7995
};
7996
7997
/* check options */
7998
while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) {
7999
switch (c) {
8000
case 'e':
8001
flags |= ZFS_ONLINE_EXPAND;
8002
break;
8003
case ZPOOL_OPTION_POWER:
8004
is_power_on = B_TRUE;
8005
break;
8006
case '?':
8007
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
8008
optopt);
8009
usage(B_FALSE);
8010
}
8011
}
8012
8013
if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
8014
is_power_on = B_TRUE;
8015
8016
argc -= optind;
8017
argv += optind;
8018
8019
/* get pool name and check number of arguments */
8020
if (argc < 1) {
8021
(void) fprintf(stderr, gettext("missing pool name\n"));
8022
usage(B_FALSE);
8023
}
8024
if (argc < 2) {
8025
(void) fprintf(stderr, gettext("missing device name\n"));
8026
usage(B_FALSE);
8027
}
8028
8029
poolname = argv[0];
8030
8031
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
8032
(void) fprintf(stderr, gettext("failed to open pool "
8033
"\"%s\""), poolname);
8034
return (1);
8035
}
8036
8037
for (i = 1; i < argc; i++) {
8038
vdev_state_t oldstate;
8039
boolean_t avail_spare, l2cache;
8040
int rc;
8041
8042
if (is_power_on) {
8043
rc = zpool_power_on_and_disk_wait(zhp, argv[i]);
8044
if (rc == ENOTSUP) {
8045
(void) fprintf(stderr,
8046
gettext("Power control not supported\n"));
8047
}
8048
if (rc != 0)
8049
return (rc);
8050
}
8051
8052
nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
8053
&l2cache, NULL);
8054
if (tgt == NULL) {
8055
ret = 1;
8056
(void) fprintf(stderr, gettext("couldn't find device "
8057
"\"%s\" in pool \"%s\"\n"), argv[i], poolname);
8058
continue;
8059
}
8060
uint_t vsc;
8061
oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
8062
ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
8063
if ((rc = zpool_vdev_online(zhp, argv[i], flags,
8064
&newstate)) == 0) {
8065
if (newstate != VDEV_STATE_HEALTHY) {
8066
(void) printf(gettext("warning: device '%s' "
8067
"onlined, but remains in faulted state\n"),
8068
argv[i]);
8069
if (newstate == VDEV_STATE_FAULTED)
8070
(void) printf(gettext("use 'zpool "
8071
"clear' to restore a faulted "
8072
"device\n"));
8073
else
8074
(void) printf(gettext("use 'zpool "
8075
"replace' to replace devices "
8076
"that are no longer present\n"));
8077
if ((flags & ZFS_ONLINE_EXPAND)) {
8078
(void) printf(gettext("%s: failed "
8079
"to expand usable space on "
8080
"unhealthy device '%s'\n"),
8081
(oldstate >= VDEV_STATE_DEGRADED ?
8082
"error" : "warning"), argv[i]);
8083
if (oldstate >= VDEV_STATE_DEGRADED) {
8084
ret = 1;
8085
break;
8086
}
8087
}
8088
}
8089
} else {
8090
(void) fprintf(stderr, gettext("Failed to online "
8091
"\"%s\" in pool \"%s\": %d\n"),
8092
argv[i], poolname, rc);
8093
ret = 1;
8094
}
8095
}
8096
8097
zpool_close(zhp);
8098
8099
return (ret);
8100
}
8101
8102
/*
8103
* zpool offline [-ft]|[--power] <pool> <device> ...
8104
*
8105
*
8106
* -f Force the device into a faulted state.
8107
*
8108
* -t Only take the device off-line temporarily. The offline/faulted
8109
* state will not be persistent across reboots.
8110
*
8111
* --power Power off the enclosure slot to the drive (if possible)
8112
*/
8113
int
8114
zpool_do_offline(int argc, char **argv)
8115
{
8116
int c, i;
8117
char *poolname;
8118
zpool_handle_t *zhp;
8119
int ret = 0;
8120
boolean_t istmp = B_FALSE;
8121
boolean_t fault = B_FALSE;
8122
boolean_t is_power_off = B_FALSE;
8123
8124
struct option long_options[] = {
8125
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8126
{0, 0, 0, 0}
8127
};
8128
8129
/* check options */
8130
while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) {
8131
switch (c) {
8132
case 'f':
8133
fault = B_TRUE;
8134
break;
8135
case 't':
8136
istmp = B_TRUE;
8137
break;
8138
case ZPOOL_OPTION_POWER:
8139
is_power_off = B_TRUE;
8140
break;
8141
case '?':
8142
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
8143
optopt);
8144
usage(B_FALSE);
8145
}
8146
}
8147
8148
if (is_power_off && fault) {
8149
(void) fprintf(stderr,
8150
gettext("-0 and -f cannot be used together\n"));
8151
usage(B_FALSE);
8152
}
8153
8154
if (is_power_off && istmp) {
8155
(void) fprintf(stderr,
8156
gettext("-0 and -t cannot be used together\n"));
8157
usage(B_FALSE);
8158
}
8159
8160
argc -= optind;
8161
argv += optind;
8162
8163
/* get pool name and check number of arguments */
8164
if (argc < 1) {
8165
(void) fprintf(stderr, gettext("missing pool name\n"));
8166
usage(B_FALSE);
8167
}
8168
if (argc < 2) {
8169
(void) fprintf(stderr, gettext("missing device name\n"));
8170
usage(B_FALSE);
8171
}
8172
8173
poolname = argv[0];
8174
8175
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
8176
(void) fprintf(stderr, gettext("failed to open pool "
8177
"\"%s\""), poolname);
8178
return (1);
8179
}
8180
8181
for (i = 1; i < argc; i++) {
8182
uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
8183
if (is_power_off) {
8184
/*
8185
* Note: we have to power off first, then set REMOVED,
8186
* or else zpool_vdev_set_removed_state() returns
8187
* EAGAIN.
8188
*/
8189
ret = zpool_power_off(zhp, argv[i]);
8190
if (ret != 0) {
8191
(void) fprintf(stderr, "%s %s %d\n",
8192
gettext("unable to power off slot for"),
8193
argv[i], ret);
8194
}
8195
(void) zpool_vdev_set_removed_state(zhp, guid,
8196
VDEV_AUX_NONE);
8197
8198
} else if (fault) {
8199
vdev_aux_t aux;
8200
if (istmp == B_FALSE) {
8201
/* Force the fault to persist across imports */
8202
aux = VDEV_AUX_EXTERNAL_PERSIST;
8203
} else {
8204
aux = VDEV_AUX_EXTERNAL;
8205
}
8206
8207
if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
8208
ret = 1;
8209
} else {
8210
if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
8211
ret = 1;
8212
}
8213
}
8214
8215
zpool_close(zhp);
8216
8217
return (ret);
8218
}
8219
8220
/*
8221
* zpool clear [-nF]|[--power] <pool> [device]
8222
*
8223
* Clear all errors associated with a pool or a particular device.
8224
*/
8225
int
8226
zpool_do_clear(int argc, char **argv)
8227
{
8228
int c;
8229
int ret = 0;
8230
boolean_t dryrun = B_FALSE;
8231
boolean_t do_rewind = B_FALSE;
8232
boolean_t xtreme_rewind = B_FALSE;
8233
boolean_t is_power_on = B_FALSE;
8234
uint32_t rewind_policy = ZPOOL_NO_REWIND;
8235
nvlist_t *policy = NULL;
8236
zpool_handle_t *zhp;
8237
char *pool, *device;
8238
8239
struct option long_options[] = {
8240
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
8241
{0, 0, 0, 0}
8242
};
8243
8244
/* check options */
8245
while ((c = getopt_long(argc, argv, "FnX", long_options,
8246
NULL)) != -1) {
8247
switch (c) {
8248
case 'F':
8249
do_rewind = B_TRUE;
8250
break;
8251
case 'n':
8252
dryrun = B_TRUE;
8253
break;
8254
case 'X':
8255
xtreme_rewind = B_TRUE;
8256
break;
8257
case ZPOOL_OPTION_POWER:
8258
is_power_on = B_TRUE;
8259
break;
8260
case '?':
8261
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
8262
optopt);
8263
usage(B_FALSE);
8264
}
8265
}
8266
8267
if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT"))
8268
is_power_on = B_TRUE;
8269
8270
argc -= optind;
8271
argv += optind;
8272
8273
if (argc < 1) {
8274
(void) fprintf(stderr, gettext("missing pool name\n"));
8275
usage(B_FALSE);
8276
}
8277
8278
if (argc > 2) {
8279
(void) fprintf(stderr, gettext("too many arguments\n"));
8280
usage(B_FALSE);
8281
}
8282
8283
if ((dryrun || xtreme_rewind) && !do_rewind) {
8284
(void) fprintf(stderr,
8285
gettext("-n or -X only meaningful with -F\n"));
8286
usage(B_FALSE);
8287
}
8288
if (dryrun)
8289
rewind_policy = ZPOOL_TRY_REWIND;
8290
else if (do_rewind)
8291
rewind_policy = ZPOOL_DO_REWIND;
8292
if (xtreme_rewind)
8293
rewind_policy |= ZPOOL_EXTREME_REWIND;
8294
8295
/* In future, further rewind policy choices can be passed along here */
8296
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
8297
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
8298
rewind_policy) != 0) {
8299
return (1);
8300
}
8301
8302
pool = argv[0];
8303
device = argc == 2 ? argv[1] : NULL;
8304
8305
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
8306
nvlist_free(policy);
8307
return (1);
8308
}
8309
8310
if (is_power_on) {
8311
if (device == NULL) {
8312
(void) zpool_power_on_pool_and_wait_for_devices(zhp);
8313
} else {
8314
(void) zpool_power_on_and_disk_wait(zhp, device);
8315
}
8316
}
8317
8318
if (zpool_clear(zhp, device, policy) != 0)
8319
ret = 1;
8320
8321
zpool_close(zhp);
8322
8323
nvlist_free(policy);
8324
8325
return (ret);
8326
}
8327
8328
/*
8329
* zpool reguid [-g <guid>] <pool>
8330
*/
8331
int
8332
zpool_do_reguid(int argc, char **argv)
8333
{
8334
uint64_t guid;
8335
uint64_t *guidp = NULL;
8336
int c;
8337
char *endptr;
8338
char *poolname;
8339
zpool_handle_t *zhp;
8340
int ret = 0;
8341
8342
/* check options */
8343
while ((c = getopt(argc, argv, "g:")) != -1) {
8344
switch (c) {
8345
case 'g':
8346
errno = 0;
8347
guid = strtoull(optarg, &endptr, 10);
8348
if (errno != 0 || *endptr != '\0') {
8349
(void) fprintf(stderr,
8350
gettext("invalid GUID: %s\n"), optarg);
8351
usage(B_FALSE);
8352
}
8353
guidp = &guid;
8354
break;
8355
case '?':
8356
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
8357
optopt);
8358
usage(B_FALSE);
8359
}
8360
}
8361
8362
argc -= optind;
8363
argv += optind;
8364
8365
/* get pool name and check number of arguments */
8366
if (argc < 1) {
8367
(void) fprintf(stderr, gettext("missing pool name\n"));
8368
usage(B_FALSE);
8369
}
8370
8371
if (argc > 1) {
8372
(void) fprintf(stderr, gettext("too many arguments\n"));
8373
usage(B_FALSE);
8374
}
8375
8376
poolname = argv[0];
8377
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
8378
return (1);
8379
8380
ret = zpool_set_guid(zhp, guidp);
8381
8382
zpool_close(zhp);
8383
return (ret);
8384
}
8385
8386
8387
/*
8388
* zpool reopen <pool>
8389
*
8390
* Reopen the pool so that the kernel can update the sizes of all vdevs.
8391
*/
8392
int
8393
zpool_do_reopen(int argc, char **argv)
8394
{
8395
int c;
8396
int ret = 0;
8397
boolean_t scrub_restart = B_TRUE;
8398
8399
/* check options */
8400
while ((c = getopt(argc, argv, "n")) != -1) {
8401
switch (c) {
8402
case 'n':
8403
scrub_restart = B_FALSE;
8404
break;
8405
case '?':
8406
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
8407
optopt);
8408
usage(B_FALSE);
8409
}
8410
}
8411
8412
argc -= optind;
8413
argv += optind;
8414
8415
/* if argc == 0 we will execute zpool_reopen_one on all pools */
8416
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8417
B_FALSE, zpool_reopen_one, &scrub_restart);
8418
8419
return (ret);
8420
}
8421
8422
typedef struct scrub_cbdata {
8423
int cb_type;
8424
pool_scrub_cmd_t cb_scrub_cmd;
8425
time_t cb_date_start;
8426
time_t cb_date_end;
8427
} scrub_cbdata_t;
8428
8429
static boolean_t
8430
zpool_has_checkpoint(zpool_handle_t *zhp)
8431
{
8432
nvlist_t *config, *nvroot;
8433
8434
config = zpool_get_config(zhp, NULL);
8435
8436
if (config != NULL) {
8437
pool_checkpoint_stat_t *pcs = NULL;
8438
uint_t c;
8439
8440
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8441
(void) nvlist_lookup_uint64_array(nvroot,
8442
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8443
8444
if (pcs == NULL || pcs->pcs_state == CS_NONE)
8445
return (B_FALSE);
8446
8447
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
8448
pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
8449
return (B_TRUE);
8450
}
8451
8452
return (B_FALSE);
8453
}
8454
8455
static int
8456
scrub_callback(zpool_handle_t *zhp, void *data)
8457
{
8458
scrub_cbdata_t *cb = data;
8459
int err;
8460
8461
/*
8462
* Ignore faulted pools.
8463
*/
8464
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
8465
(void) fprintf(stderr, gettext("cannot scan '%s': pool is "
8466
"currently unavailable\n"), zpool_get_name(zhp));
8467
return (1);
8468
}
8469
8470
err = zpool_scan_range(zhp, cb->cb_type, cb->cb_scrub_cmd,
8471
cb->cb_date_start, cb->cb_date_end);
8472
if (err == 0 && zpool_has_checkpoint(zhp) &&
8473
cb->cb_type == POOL_SCAN_SCRUB) {
8474
(void) printf(gettext("warning: will not scrub state that "
8475
"belongs to the checkpoint of pool '%s'\n"),
8476
zpool_get_name(zhp));
8477
}
8478
8479
return (err != 0);
8480
}
8481
8482
static int
8483
wait_callback(zpool_handle_t *zhp, void *data)
8484
{
8485
zpool_wait_activity_t *act = data;
8486
return (zpool_wait(zhp, *act));
8487
}
8488
8489
static time_t
8490
date_string_to_sec(const char *timestr, boolean_t rounding)
8491
{
8492
struct tm tm = {0};
8493
int adjustment = rounding ? 1 : 0;
8494
8495
/* Allow mktime to determine timezone. */
8496
tm.tm_isdst = -1;
8497
8498
if (strptime(timestr, "%Y-%m-%d %H:%M", &tm) == NULL) {
8499
if (strptime(timestr, "%Y-%m-%d", &tm) == NULL) {
8500
fprintf(stderr, gettext("Failed to parse the date.\n"));
8501
usage(B_FALSE);
8502
}
8503
adjustment *= 24 * 60 * 60;
8504
} else {
8505
adjustment *= 60;
8506
}
8507
8508
return (mktime(&tm) + adjustment);
8509
}
8510
8511
/*
8512
* zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...]
8513
*
8514
* -a Scrub all pools.
8515
* -e Only scrub blocks in the error log.
8516
* -E End date of scrub.
8517
* -S Start date of scrub.
8518
* -s Stop. Stops any in-progress scrub.
8519
* -p Pause. Pause in-progress scrub.
8520
* -w Wait. Blocks until scrub has completed.
8521
* -C Scrub from last saved txg.
8522
*/
8523
int
8524
zpool_do_scrub(int argc, char **argv)
8525
{
8526
int c;
8527
scrub_cbdata_t cb;
8528
boolean_t wait = B_FALSE;
8529
int error;
8530
8531
cb.cb_type = POOL_SCAN_SCRUB;
8532
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8533
cb.cb_date_start = cb.cb_date_end = 0;
8534
8535
boolean_t is_error_scrub = B_FALSE;
8536
boolean_t is_pause = B_FALSE;
8537
boolean_t is_stop = B_FALSE;
8538
boolean_t is_txg_continue = B_FALSE;
8539
boolean_t scrub_all = B_FALSE;
8540
8541
/* check options */
8542
while ((c = getopt(argc, argv, "aspweCE:S:")) != -1) {
8543
switch (c) {
8544
case 'a':
8545
scrub_all = B_TRUE;
8546
break;
8547
case 'e':
8548
is_error_scrub = B_TRUE;
8549
break;
8550
case 'E':
8551
/*
8552
* Round the date. It's better to scrub more data than
8553
* less. This also makes the date inclusive.
8554
*/
8555
cb.cb_date_end = date_string_to_sec(optarg, B_TRUE);
8556
break;
8557
case 's':
8558
is_stop = B_TRUE;
8559
break;
8560
case 'S':
8561
cb.cb_date_start = date_string_to_sec(optarg, B_FALSE);
8562
break;
8563
case 'p':
8564
is_pause = B_TRUE;
8565
break;
8566
case 'w':
8567
wait = B_TRUE;
8568
break;
8569
case 'C':
8570
is_txg_continue = B_TRUE;
8571
break;
8572
case '?':
8573
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
8574
optopt);
8575
usage(B_FALSE);
8576
}
8577
}
8578
8579
if (is_pause && is_stop) {
8580
(void) fprintf(stderr, gettext("invalid option "
8581
"combination: -s and -p are mutually exclusive\n"));
8582
usage(B_FALSE);
8583
} else if (is_pause && is_txg_continue) {
8584
(void) fprintf(stderr, gettext("invalid option "
8585
"combination: -p and -C are mutually exclusive\n"));
8586
usage(B_FALSE);
8587
} else if (is_stop && is_txg_continue) {
8588
(void) fprintf(stderr, gettext("invalid option "
8589
"combination: -s and -C are mutually exclusive\n"));
8590
usage(B_FALSE);
8591
} else if (is_error_scrub && is_txg_continue) {
8592
(void) fprintf(stderr, gettext("invalid option "
8593
"combination: -e and -C are mutually exclusive\n"));
8594
usage(B_FALSE);
8595
} else {
8596
if (is_error_scrub)
8597
cb.cb_type = POOL_SCAN_ERRORSCRUB;
8598
8599
if (is_pause) {
8600
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
8601
} else if (is_stop) {
8602
cb.cb_type = POOL_SCAN_NONE;
8603
} else if (is_txg_continue) {
8604
cb.cb_scrub_cmd = POOL_SCRUB_FROM_LAST_TXG;
8605
} else {
8606
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8607
}
8608
}
8609
8610
if ((cb.cb_date_start != 0 || cb.cb_date_end != 0) &&
8611
cb.cb_scrub_cmd != POOL_SCRUB_NORMAL) {
8612
(void) fprintf(stderr, gettext("invalid option combination: "
8613
"start/end date is available only with normal scrub\n"));
8614
usage(B_FALSE);
8615
}
8616
if (cb.cb_date_start != 0 && cb.cb_date_end != 0 &&
8617
cb.cb_date_start > cb.cb_date_end) {
8618
(void) fprintf(stderr, gettext("invalid arguments: "
8619
"end date has to be later than start date\n"));
8620
usage(B_FALSE);
8621
}
8622
8623
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
8624
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
8625
(void) fprintf(stderr, gettext("invalid option combination: "
8626
"-w cannot be used with -p or -s\n"));
8627
usage(B_FALSE);
8628
}
8629
8630
argc -= optind;
8631
argv += optind;
8632
8633
if (argc < 1 && !scrub_all) {
8634
(void) fprintf(stderr, gettext("missing pool name argument\n"));
8635
usage(B_FALSE);
8636
}
8637
8638
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8639
B_FALSE, scrub_callback, &cb);
8640
8641
if (wait && !error) {
8642
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
8643
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8644
B_FALSE, wait_callback, &act);
8645
}
8646
8647
return (error);
8648
}
8649
8650
/*
8651
* zpool resilver <pool> ...
8652
*
8653
* Restarts any in-progress resilver
8654
*/
8655
int
8656
zpool_do_resilver(int argc, char **argv)
8657
{
8658
int c;
8659
scrub_cbdata_t cb;
8660
8661
cb.cb_type = POOL_SCAN_RESILVER;
8662
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
8663
cb.cb_date_start = cb.cb_date_end = 0;
8664
8665
/* check options */
8666
while ((c = getopt(argc, argv, "")) != -1) {
8667
switch (c) {
8668
case '?':
8669
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
8670
optopt);
8671
usage(B_FALSE);
8672
}
8673
}
8674
8675
argc -= optind;
8676
argv += optind;
8677
8678
if (argc < 1) {
8679
(void) fprintf(stderr, gettext("missing pool name argument\n"));
8680
usage(B_FALSE);
8681
}
8682
8683
return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8684
B_FALSE, scrub_callback, &cb));
8685
}
8686
8687
/*
8688
* zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...]
8689
*
8690
* -a Trim all pools.
8691
* -c Cancel. Ends any in-progress trim.
8692
* -d Secure trim. Requires kernel and device support.
8693
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
8694
* adding a multiplier suffix such as 'k' or 'm'.
8695
* -s Suspend. TRIM can then be restarted with no flags.
8696
* -w Wait. Blocks until trimming has completed.
8697
*/
8698
int
8699
zpool_do_trim(int argc, char **argv)
8700
{
8701
struct option long_options[] = {
8702
{"cancel", no_argument, NULL, 'c'},
8703
{"secure", no_argument, NULL, 'd'},
8704
{"rate", required_argument, NULL, 'r'},
8705
{"suspend", no_argument, NULL, 's'},
8706
{"wait", no_argument, NULL, 'w'},
8707
{"all", no_argument, NULL, 'a'},
8708
{0, 0, 0, 0}
8709
};
8710
8711
pool_trim_func_t cmd_type = POOL_TRIM_START;
8712
uint64_t rate = 0;
8713
boolean_t secure = B_FALSE;
8714
boolean_t wait = B_FALSE;
8715
boolean_t trimall = B_FALSE;
8716
int error;
8717
8718
int c;
8719
while ((c = getopt_long(argc, argv, "acdr:sw", long_options, NULL))
8720
!= -1) {
8721
switch (c) {
8722
case 'a':
8723
trimall = B_TRUE;
8724
break;
8725
case 'c':
8726
if (cmd_type != POOL_TRIM_START &&
8727
cmd_type != POOL_TRIM_CANCEL) {
8728
(void) fprintf(stderr, gettext("-c cannot be "
8729
"combined with other options\n"));
8730
usage(B_FALSE);
8731
}
8732
cmd_type = POOL_TRIM_CANCEL;
8733
break;
8734
case 'd':
8735
if (cmd_type != POOL_TRIM_START) {
8736
(void) fprintf(stderr, gettext("-d cannot be "
8737
"combined with the -c or -s options\n"));
8738
usage(B_FALSE);
8739
}
8740
secure = B_TRUE;
8741
break;
8742
case 'r':
8743
if (cmd_type != POOL_TRIM_START) {
8744
(void) fprintf(stderr, gettext("-r cannot be "
8745
"combined with the -c or -s options\n"));
8746
usage(B_FALSE);
8747
}
8748
if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
8749
(void) fprintf(stderr, "%s: %s\n",
8750
gettext("invalid value for rate"),
8751
libzfs_error_description(g_zfs));
8752
usage(B_FALSE);
8753
}
8754
break;
8755
case 's':
8756
if (cmd_type != POOL_TRIM_START &&
8757
cmd_type != POOL_TRIM_SUSPEND) {
8758
(void) fprintf(stderr, gettext("-s cannot be "
8759
"combined with other options\n"));
8760
usage(B_FALSE);
8761
}
8762
cmd_type = POOL_TRIM_SUSPEND;
8763
break;
8764
case 'w':
8765
wait = B_TRUE;
8766
break;
8767
case '?':
8768
if (optopt != 0) {
8769
(void) fprintf(stderr,
8770
gettext("invalid option '%c'\n"), optopt);
8771
} else {
8772
(void) fprintf(stderr,
8773
gettext("invalid option '%s'\n"),
8774
argv[optind - 1]);
8775
}
8776
usage(B_FALSE);
8777
}
8778
}
8779
8780
argc -= optind;
8781
argv += optind;
8782
8783
trimflags_t trim_flags = {
8784
.secure = secure,
8785
.rate = rate,
8786
.wait = wait,
8787
};
8788
8789
trim_cbdata_t cbdata = {
8790
.trim_flags = trim_flags,
8791
.cmd_type = cmd_type
8792
};
8793
8794
if (argc < 1 && !trimall) {
8795
(void) fprintf(stderr, gettext("missing pool name argument\n"));
8796
usage(B_FALSE);
8797
}
8798
8799
if (wait && (cmd_type != POOL_TRIM_START)) {
8800
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
8801
"-s options\n"));
8802
usage(B_FALSE);
8803
}
8804
8805
if (trimall && argc > 0) {
8806
(void) fprintf(stderr, gettext("-a cannot be combined with "
8807
"individual zpools or vdevs\n"));
8808
usage(B_FALSE);
8809
}
8810
8811
if (argc == 0 && trimall) {
8812
cbdata.trim_flags.fullpool = B_TRUE;
8813
/* Trim each pool recursively */
8814
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8815
B_FALSE, zpool_trim_one, &cbdata);
8816
} else if (argc == 1) {
8817
char *poolname = argv[0];
8818
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8819
if (zhp == NULL)
8820
return (-1);
8821
/* no individual leaf vdevs specified, so add them all */
8822
error = zpool_trim_one(zhp, &cbdata);
8823
zpool_close(zhp);
8824
} else {
8825
char *poolname = argv[0];
8826
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
8827
if (zhp == NULL)
8828
return (-1);
8829
/* leaf vdevs specified, trim only those */
8830
cbdata.trim_flags.fullpool = B_FALSE;
8831
nvlist_t *vdevs = fnvlist_alloc();
8832
for (int i = 1; i < argc; i++) {
8833
fnvlist_add_boolean(vdevs, argv[i]);
8834
}
8835
error = zpool_trim(zhp, cbdata.cmd_type, vdevs,
8836
&cbdata.trim_flags);
8837
fnvlist_free(vdevs);
8838
zpool_close(zhp);
8839
}
8840
8841
return (error);
8842
}
8843
8844
/*
8845
* Converts a total number of seconds to a human readable string broken
8846
* down in to days/hours/minutes/seconds.
8847
*/
8848
static void
8849
secs_to_dhms(uint64_t total, char *buf)
8850
{
8851
uint64_t days = total / 60 / 60 / 24;
8852
uint64_t hours = (total / 60 / 60) % 24;
8853
uint64_t mins = (total / 60) % 60;
8854
uint64_t secs = (total % 60);
8855
8856
if (days > 0) {
8857
(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
8858
(u_longlong_t)days, (u_longlong_t)hours,
8859
(u_longlong_t)mins, (u_longlong_t)secs);
8860
} else {
8861
(void) sprintf(buf, "%02llu:%02llu:%02llu",
8862
(u_longlong_t)hours, (u_longlong_t)mins,
8863
(u_longlong_t)secs);
8864
}
8865
}
8866
8867
/*
8868
* Print out detailed error scrub status.
8869
*/
8870
static void
8871
print_err_scrub_status(pool_scan_stat_t *ps)
8872
{
8873
time_t start, end, pause;
8874
uint64_t total_secs_left;
8875
uint64_t secs_left, mins_left, hours_left, days_left;
8876
uint64_t examined, to_be_examined;
8877
8878
if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) {
8879
return;
8880
}
8881
8882
(void) printf(gettext(" scrub: "));
8883
8884
start = ps->pss_error_scrub_start;
8885
end = ps->pss_error_scrub_end;
8886
pause = ps->pss_pass_error_scrub_pause;
8887
examined = ps->pss_error_scrub_examined;
8888
to_be_examined = ps->pss_error_scrub_to_be_examined;
8889
8890
assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB);
8891
8892
if (ps->pss_error_scrub_state == DSS_FINISHED) {
8893
total_secs_left = end - start;
8894
days_left = total_secs_left / 60 / 60 / 24;
8895
hours_left = (total_secs_left / 60 / 60) % 24;
8896
mins_left = (total_secs_left / 60) % 60;
8897
secs_left = (total_secs_left % 60);
8898
8899
(void) printf(gettext("scrubbed %llu error blocks in %llu days "
8900
"%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined,
8901
(u_longlong_t)days_left, (u_longlong_t)hours_left,
8902
(u_longlong_t)mins_left, (u_longlong_t)secs_left,
8903
ctime(&end));
8904
8905
return;
8906
} else if (ps->pss_error_scrub_state == DSS_CANCELED) {
8907
(void) printf(gettext("error scrub canceled on %s"),
8908
ctime(&end));
8909
return;
8910
}
8911
assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING);
8912
8913
/* Error scrub is in progress. */
8914
if (pause == 0) {
8915
(void) printf(gettext("error scrub in progress since %s"),
8916
ctime(&start));
8917
} else {
8918
(void) printf(gettext("error scrub paused since %s"),
8919
ctime(&pause));
8920
(void) printf(gettext("\terror scrub started on %s"),
8921
ctime(&start));
8922
}
8923
8924
double fraction_done = (double)examined / (to_be_examined + examined);
8925
(void) printf(gettext("\t%.2f%% done, issued I/O for %llu error"
8926
" blocks"), 100 * fraction_done, (u_longlong_t)examined);
8927
8928
(void) printf("\n");
8929
}
8930
8931
/*
8932
* Print out detailed scrub status.
8933
*/
8934
static void
8935
print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
8936
{
8937
time_t start, end, pause;
8938
uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i;
8939
uint64_t elapsed, scan_rate, issue_rate;
8940
double fraction_done;
8941
char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7];
8942
char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
8943
8944
printf(" ");
8945
(void) printf_color(ANSI_BOLD, gettext("scan:"));
8946
printf(" ");
8947
8948
/* If there's never been a scan, there's not much to say. */
8949
if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
8950
ps->pss_func >= POOL_SCAN_FUNCS) {
8951
(void) printf(gettext("none requested\n"));
8952
return;
8953
}
8954
8955
start = ps->pss_start_time;
8956
end = ps->pss_end_time;
8957
pause = ps->pss_pass_scrub_pause;
8958
8959
zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
8960
8961
int is_resilver = ps->pss_func == POOL_SCAN_RESILVER;
8962
int is_scrub = ps->pss_func == POOL_SCAN_SCRUB;
8963
assert(is_resilver || is_scrub);
8964
8965
/* Scan is finished or canceled. */
8966
if (ps->pss_state == DSS_FINISHED) {
8967
secs_to_dhms(end - start, time_buf);
8968
8969
if (is_scrub) {
8970
(void) printf(gettext("scrub repaired %s "
8971
"in %s with %llu errors on %s"), processed_buf,
8972
time_buf, (u_longlong_t)ps->pss_errors,
8973
ctime(&end));
8974
} else if (is_resilver) {
8975
(void) printf(gettext("resilvered %s "
8976
"in %s with %llu errors on %s"), processed_buf,
8977
time_buf, (u_longlong_t)ps->pss_errors,
8978
ctime(&end));
8979
}
8980
return;
8981
} else if (ps->pss_state == DSS_CANCELED) {
8982
if (is_scrub) {
8983
(void) printf(gettext("scrub canceled on %s"),
8984
ctime(&end));
8985
} else if (is_resilver) {
8986
(void) printf(gettext("resilver canceled on %s"),
8987
ctime(&end));
8988
}
8989
return;
8990
}
8991
8992
assert(ps->pss_state == DSS_SCANNING);
8993
8994
/* Scan is in progress. Resilvers can't be paused. */
8995
if (is_scrub) {
8996
if (pause == 0) {
8997
(void) printf(gettext("scrub in progress since %s"),
8998
ctime(&start));
8999
} else {
9000
(void) printf(gettext("scrub paused since %s"),
9001
ctime(&pause));
9002
(void) printf(gettext("\tscrub started on %s"),
9003
ctime(&start));
9004
}
9005
} else if (is_resilver) {
9006
(void) printf(gettext("resilver in progress since %s"),
9007
ctime(&start));
9008
}
9009
9010
scanned = ps->pss_examined;
9011
pass_scanned = ps->pss_pass_exam;
9012
issued = ps->pss_issued;
9013
pass_issued = ps->pss_pass_issued;
9014
total_s = ps->pss_to_examine;
9015
total_i = ps->pss_to_examine - ps->pss_skipped;
9016
9017
/* we are only done with a block once we have issued the IO for it */
9018
fraction_done = (double)issued / total_i;
9019
9020
/* elapsed time for this pass, rounding up to 1 if it's 0 */
9021
elapsed = time(NULL) - ps->pss_pass_start;
9022
elapsed -= ps->pss_pass_scrub_spent_paused;
9023
elapsed = (elapsed != 0) ? elapsed : 1;
9024
9025
scan_rate = pass_scanned / elapsed;
9026
issue_rate = pass_issued / elapsed;
9027
9028
/* format all of the numbers we will be reporting */
9029
zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
9030
zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
9031
zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf));
9032
zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf));
9033
9034
/* do not print estimated time if we have a paused scrub */
9035
(void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf);
9036
if (pause == 0 && scan_rate > 0) {
9037
zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
9038
(void) printf(gettext(" at %s/s"), srate_buf);
9039
}
9040
(void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf);
9041
if (pause == 0 && issue_rate > 0) {
9042
zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
9043
(void) printf(gettext(" at %s/s"), irate_buf);
9044
}
9045
(void) printf(gettext("\n"));
9046
9047
if (is_resilver) {
9048
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
9049
processed_buf, 100 * fraction_done);
9050
} else if (is_scrub) {
9051
(void) printf(gettext("\t%s repaired, %.2f%% done"),
9052
processed_buf, 100 * fraction_done);
9053
}
9054
9055
if (pause == 0) {
9056
/*
9057
* Only provide an estimate iff:
9058
* 1) we haven't yet issued all we expected, and
9059
* 2) the issue rate exceeds 10 MB/s, and
9060
* 3) it's either:
9061
* a) a resilver which has started repairs, or
9062
* b) a scrub which has entered the issue phase.
9063
*/
9064
if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 &&
9065
((is_resilver && ps->pss_processed > 0) ||
9066
(is_scrub && issued > 0))) {
9067
secs_to_dhms((total_i - issued) / issue_rate, time_buf);
9068
(void) printf(gettext(", %s to go\n"), time_buf);
9069
} else {
9070
(void) printf(gettext(", no estimated "
9071
"completion time\n"));
9072
}
9073
} else {
9074
(void) printf(gettext("\n"));
9075
}
9076
}
9077
9078
static void
9079
print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
9080
{
9081
if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
9082
return;
9083
9084
printf(" ");
9085
(void) printf_color(ANSI_BOLD, gettext("scan:"));
9086
printf(" ");
9087
9088
uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
9089
uint64_t bytes_issued = vrs->vrs_bytes_issued;
9090
uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
9091
uint64_t bytes_est_s = vrs->vrs_bytes_est;
9092
uint64_t bytes_est_i = vrs->vrs_bytes_est;
9093
if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8)
9094
bytes_est_i -= vrs->vrs_pass_bytes_skipped;
9095
uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
9096
(vrs->vrs_pass_time_ms + 1)) * 1000;
9097
uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
9098
(vrs->vrs_pass_time_ms + 1)) * 1000;
9099
double scan_pct = MIN((double)bytes_scanned * 100 /
9100
(bytes_est_s + 1), 100);
9101
9102
/* Format all of the numbers we will be reporting */
9103
char bytes_scanned_buf[7], bytes_issued_buf[7];
9104
char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7];
9105
char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
9106
zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
9107
sizeof (bytes_scanned_buf));
9108
zfs_nicebytes(bytes_issued, bytes_issued_buf,
9109
sizeof (bytes_issued_buf));
9110
zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
9111
sizeof (bytes_rebuilt_buf));
9112
zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf));
9113
zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf));
9114
9115
time_t start = vrs->vrs_start_time;
9116
time_t end = vrs->vrs_end_time;
9117
9118
/* Rebuild is finished or canceled. */
9119
if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
9120
secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
9121
(void) printf(gettext("resilvered (%s) %s in %s "
9122
"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
9123
time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
9124
return;
9125
} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
9126
(void) printf(gettext("resilver (%s) canceled on %s"),
9127
vdev_name, ctime(&end));
9128
return;
9129
} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9130
(void) printf(gettext("resilver (%s) in progress since %s"),
9131
vdev_name, ctime(&start));
9132
}
9133
9134
assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
9135
9136
(void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf,
9137
bytes_est_s_buf);
9138
if (scan_rate > 0) {
9139
zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
9140
(void) printf(gettext(" at %s/s"), scan_rate_buf);
9141
}
9142
(void) printf(gettext(", %s / %s issued"), bytes_issued_buf,
9143
bytes_est_i_buf);
9144
if (issue_rate > 0) {
9145
zfs_nicebytes(issue_rate, issue_rate_buf,
9146
sizeof (issue_rate_buf));
9147
(void) printf(gettext(" at %s/s"), issue_rate_buf);
9148
}
9149
(void) printf(gettext("\n"));
9150
9151
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
9152
bytes_rebuilt_buf, scan_pct);
9153
9154
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9155
if (bytes_est_s >= bytes_scanned &&
9156
scan_rate >= 10 * 1024 * 1024) {
9157
secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate,
9158
time_buf);
9159
(void) printf(gettext(", %s to go\n"), time_buf);
9160
} else {
9161
(void) printf(gettext(", no estimated "
9162
"completion time\n"));
9163
}
9164
} else {
9165
(void) printf(gettext("\n"));
9166
}
9167
}
9168
9169
/*
9170
* Print rebuild status for top-level vdevs.
9171
*/
9172
static void
9173
print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
9174
{
9175
nvlist_t **child;
9176
uint_t children;
9177
9178
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9179
&child, &children) != 0)
9180
children = 0;
9181
9182
for (uint_t c = 0; c < children; c++) {
9183
vdev_rebuild_stat_t *vrs;
9184
uint_t i;
9185
9186
if (nvlist_lookup_uint64_array(child[c],
9187
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9188
char *name = zpool_vdev_name(g_zfs, zhp,
9189
child[c], VDEV_NAME_TYPE_ID);
9190
print_rebuild_status_impl(vrs, i, name);
9191
free(name);
9192
}
9193
}
9194
}
9195
9196
/*
9197
* As we don't scrub checkpointed blocks, we want to warn the user that we
9198
* skipped scanning some blocks if a checkpoint exists or existed at any
9199
* time during the scan. If a sequential instead of healing reconstruction
9200
* was performed then the blocks were reconstructed. However, their checksums
9201
* have not been verified so we still print the warning.
9202
*/
9203
static void
9204
print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
9205
{
9206
if (ps == NULL || pcs == NULL)
9207
return;
9208
9209
if (pcs->pcs_state == CS_NONE ||
9210
pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
9211
return;
9212
9213
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
9214
9215
if (ps->pss_state == DSS_NONE)
9216
return;
9217
9218
if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
9219
ps->pss_end_time < pcs->pcs_start_time)
9220
return;
9221
9222
if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
9223
(void) printf(gettext(" scan warning: skipped blocks "
9224
"that are only referenced by the checkpoint.\n"));
9225
} else {
9226
assert(ps->pss_state == DSS_SCANNING);
9227
(void) printf(gettext(" scan warning: skipping blocks "
9228
"that are only referenced by the checkpoint.\n"));
9229
}
9230
}
9231
9232
/*
9233
* Returns B_TRUE if there is an active rebuild in progress. Otherwise,
9234
* B_FALSE is returned and 'rebuild_end_time' is set to the end time for
9235
* the last completed (or cancelled) rebuild.
9236
*/
9237
static boolean_t
9238
check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
9239
{
9240
nvlist_t **child;
9241
uint_t children;
9242
boolean_t rebuilding = B_FALSE;
9243
uint64_t end_time = 0;
9244
9245
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9246
&child, &children) != 0)
9247
children = 0;
9248
9249
for (uint_t c = 0; c < children; c++) {
9250
vdev_rebuild_stat_t *vrs;
9251
uint_t i;
9252
9253
if (nvlist_lookup_uint64_array(child[c],
9254
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
9255
9256
if (vrs->vrs_end_time > end_time)
9257
end_time = vrs->vrs_end_time;
9258
9259
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
9260
rebuilding = B_TRUE;
9261
end_time = 0;
9262
break;
9263
}
9264
}
9265
}
9266
9267
if (rebuild_end_time != NULL)
9268
*rebuild_end_time = end_time;
9269
9270
return (rebuilding);
9271
}
9272
9273
static void
9274
vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9275
int depth, boolean_t isspare, char *parent, nvlist_t *item)
9276
{
9277
nvlist_t *vds, **child, *ch = NULL;
9278
uint_t vsc, children;
9279
vdev_stat_t *vs;
9280
char *vname;
9281
uint64_t notpresent;
9282
const char *type, *path;
9283
9284
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
9285
&child, &children) != 0)
9286
children = 0;
9287
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
9288
(uint64_t **)&vs, &vsc) == 0);
9289
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
9290
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9291
return;
9292
9293
if (cb->cb_print_unhealthy && depth > 0 &&
9294
for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) {
9295
return;
9296
}
9297
vname = zpool_vdev_name(g_zfs, zhp, nv,
9298
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9299
vds = fnvlist_alloc();
9300
fill_vdev_info(vds, zhp, vname, B_FALSE, cb->cb_json_as_int);
9301
if (cb->cb_flat_vdevs && parent != NULL) {
9302
fnvlist_add_string(vds, "parent", parent);
9303
}
9304
9305
if (isspare) {
9306
if (vs->vs_aux == VDEV_AUX_SPARED) {
9307
fnvlist_add_string(vds, "state", "INUSE");
9308
used_by_other(zhp, nv, vds);
9309
} else if (vs->vs_state == VDEV_STATE_HEALTHY)
9310
fnvlist_add_string(vds, "state", "AVAIL");
9311
} else {
9312
if (vs->vs_alloc) {
9313
nice_num_str_nvlist(vds, "alloc_space", vs->vs_alloc,
9314
cb->cb_literal, cb->cb_json_as_int,
9315
ZFS_NICENUM_BYTES);
9316
}
9317
if (vs->vs_space) {
9318
nice_num_str_nvlist(vds, "total_space", vs->vs_space,
9319
cb->cb_literal, cb->cb_json_as_int,
9320
ZFS_NICENUM_BYTES);
9321
}
9322
if (vs->vs_dspace) {
9323
nice_num_str_nvlist(vds, "def_space", vs->vs_dspace,
9324
cb->cb_literal, cb->cb_json_as_int,
9325
ZFS_NICENUM_BYTES);
9326
}
9327
if (vs->vs_rsize) {
9328
nice_num_str_nvlist(vds, "rep_dev_size", vs->vs_rsize,
9329
cb->cb_literal, cb->cb_json_as_int,
9330
ZFS_NICENUM_BYTES);
9331
}
9332
if (vs->vs_esize) {
9333
nice_num_str_nvlist(vds, "ex_dev_size", vs->vs_esize,
9334
cb->cb_literal, cb->cb_json_as_int,
9335
ZFS_NICENUM_BYTES);
9336
}
9337
if (vs->vs_self_healed) {
9338
nice_num_str_nvlist(vds, "self_healed",
9339
vs->vs_self_healed, cb->cb_literal,
9340
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9341
}
9342
if (vs->vs_pspace) {
9343
nice_num_str_nvlist(vds, "phys_space", vs->vs_pspace,
9344
cb->cb_literal, cb->cb_json_as_int,
9345
ZFS_NICENUM_BYTES);
9346
}
9347
nice_num_str_nvlist(vds, "read_errors", vs->vs_read_errors,
9348
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9349
nice_num_str_nvlist(vds, "write_errors", vs->vs_write_errors,
9350
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9351
nice_num_str_nvlist(vds, "checksum_errors",
9352
vs->vs_checksum_errors, cb->cb_literal,
9353
cb->cb_json_as_int, ZFS_NICENUM_1024);
9354
if (vs->vs_scan_processed) {
9355
nice_num_str_nvlist(vds, "scan_processed",
9356
vs->vs_scan_processed, cb->cb_literal,
9357
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9358
}
9359
if (vs->vs_checkpoint_space) {
9360
nice_num_str_nvlist(vds, "checkpoint_space",
9361
vs->vs_checkpoint_space, cb->cb_literal,
9362
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9363
}
9364
if (vs->vs_resilver_deferred) {
9365
nice_num_str_nvlist(vds, "resilver_deferred",
9366
vs->vs_resilver_deferred, B_TRUE,
9367
cb->cb_json_as_int, ZFS_NICENUM_1024);
9368
}
9369
if (children == 0) {
9370
nice_num_str_nvlist(vds, "slow_ios", vs->vs_slow_ios,
9371
cb->cb_literal, cb->cb_json_as_int,
9372
ZFS_NICENUM_1024);
9373
}
9374
if (cb->cb_print_power) {
9375
if (children == 0) {
9376
/* Only leaf vdevs have physical slots */
9377
switch (zpool_power_current_state(zhp, (char *)
9378
fnvlist_lookup_string(nv,
9379
ZPOOL_CONFIG_PATH))) {
9380
case 0:
9381
fnvlist_add_string(vds, "power_state",
9382
"off");
9383
break;
9384
case 1:
9385
fnvlist_add_string(vds, "power_state",
9386
"on");
9387
break;
9388
default:
9389
fnvlist_add_string(vds, "power_state",
9390
"-");
9391
}
9392
} else {
9393
fnvlist_add_string(vds, "power_state", "-");
9394
}
9395
}
9396
}
9397
9398
if (cb->cb_print_dio_verify) {
9399
nice_num_str_nvlist(vds, "dio_verify_errors",
9400
vs->vs_dio_verify_errors, cb->cb_literal,
9401
cb->cb_json_as_int, ZFS_NICENUM_1024);
9402
}
9403
9404
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
9405
&notpresent) == 0) {
9406
nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
9407
1, B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9408
fnvlist_add_string(vds, "was",
9409
fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH));
9410
} else if (vs->vs_aux != VDEV_AUX_NONE) {
9411
fnvlist_add_string(vds, "aux", vdev_aux_str[vs->vs_aux]);
9412
} else if (children == 0 && !isspare &&
9413
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
9414
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
9415
vs->vs_configured_ashift < vs->vs_physical_ashift) {
9416
nice_num_str_nvlist(vds, "configured_ashift",
9417
vs->vs_configured_ashift, B_TRUE, cb->cb_json_as_int,
9418
ZFS_NICENUM_1024);
9419
nice_num_str_nvlist(vds, "physical_ashift",
9420
vs->vs_physical_ashift, B_TRUE, cb->cb_json_as_int,
9421
ZFS_NICENUM_1024);
9422
}
9423
if (vs->vs_scan_removing != 0) {
9424
nice_num_str_nvlist(vds, "removing", vs->vs_scan_removing,
9425
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9426
} else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
9427
nice_num_str_nvlist(vds, "noalloc", vs->vs_noalloc,
9428
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9429
}
9430
9431
if (cb->vcdl != NULL) {
9432
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
9433
zpool_nvlist_cmd(cb->vcdl, zpool_get_name(zhp),
9434
path, vds);
9435
}
9436
}
9437
9438
if (children == 0) {
9439
if (cb->cb_print_vdev_init) {
9440
if (vs->vs_initialize_state != 0) {
9441
uint64_t st = vs->vs_initialize_state;
9442
fnvlist_add_string(vds, "init_state",
9443
vdev_init_state_str[st]);
9444
nice_num_str_nvlist(vds, "initialized",
9445
vs->vs_initialize_bytes_done,
9446
cb->cb_literal, cb->cb_json_as_int,
9447
ZFS_NICENUM_BYTES);
9448
nice_num_str_nvlist(vds, "to_initialize",
9449
vs->vs_initialize_bytes_est,
9450
cb->cb_literal, cb->cb_json_as_int,
9451
ZFS_NICENUM_BYTES);
9452
nice_num_str_nvlist(vds, "init_time",
9453
vs->vs_initialize_action_time,
9454
cb->cb_literal, cb->cb_json_as_int,
9455
ZFS_NICE_TIMESTAMP);
9456
nice_num_str_nvlist(vds, "init_errors",
9457
vs->vs_initialize_errors,
9458
cb->cb_literal, cb->cb_json_as_int,
9459
ZFS_NICENUM_1024);
9460
} else {
9461
fnvlist_add_string(vds, "init_state",
9462
"UNINITIALIZED");
9463
}
9464
}
9465
if (cb->cb_print_vdev_trim) {
9466
if (vs->vs_trim_notsup == 0) {
9467
if (vs->vs_trim_state != 0) {
9468
uint64_t st = vs->vs_trim_state;
9469
fnvlist_add_string(vds, "trim_state",
9470
vdev_trim_state_str[st]);
9471
nice_num_str_nvlist(vds, "trimmed",
9472
vs->vs_trim_bytes_done,
9473
cb->cb_literal, cb->cb_json_as_int,
9474
ZFS_NICENUM_BYTES);
9475
nice_num_str_nvlist(vds, "to_trim",
9476
vs->vs_trim_bytes_est,
9477
cb->cb_literal, cb->cb_json_as_int,
9478
ZFS_NICENUM_BYTES);
9479
nice_num_str_nvlist(vds, "trim_time",
9480
vs->vs_trim_action_time,
9481
cb->cb_literal, cb->cb_json_as_int,
9482
ZFS_NICE_TIMESTAMP);
9483
nice_num_str_nvlist(vds, "trim_errors",
9484
vs->vs_trim_errors,
9485
cb->cb_literal, cb->cb_json_as_int,
9486
ZFS_NICENUM_1024);
9487
} else
9488
fnvlist_add_string(vds, "trim_state",
9489
"UNTRIMMED");
9490
}
9491
nice_num_str_nvlist(vds, "trim_notsup",
9492
vs->vs_trim_notsup, B_TRUE,
9493
cb->cb_json_as_int, ZFS_NICENUM_1024);
9494
}
9495
} else {
9496
ch = fnvlist_alloc();
9497
}
9498
9499
if (cb->cb_flat_vdevs && children == 0) {
9500
fnvlist_add_nvlist(item, vname, vds);
9501
}
9502
9503
for (int c = 0; c < children; c++) {
9504
uint64_t islog = B_FALSE, ishole = B_FALSE;
9505
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9506
&islog);
9507
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
9508
&ishole);
9509
if (islog || ishole)
9510
continue;
9511
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
9512
continue;
9513
if (cb->cb_flat_vdevs) {
9514
vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9515
vname, item);
9516
}
9517
vdev_stats_nvlist(zhp, cb, child[c], depth + 2, isspare,
9518
vname, ch);
9519
}
9520
9521
if (ch != NULL) {
9522
if (!nvlist_empty(ch))
9523
fnvlist_add_nvlist(vds, "vdevs", ch);
9524
fnvlist_free(ch);
9525
}
9526
fnvlist_add_nvlist(item, vname, vds);
9527
fnvlist_free(vds);
9528
free(vname);
9529
}
9530
9531
static void
9532
class_vdevs_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9533
const char *class, nvlist_t *item)
9534
{
9535
uint_t c, children;
9536
nvlist_t **child;
9537
nvlist_t *class_obj = NULL;
9538
9539
if (!cb->cb_flat_vdevs)
9540
class_obj = fnvlist_alloc();
9541
9542
assert(zhp != NULL || !cb->cb_verbose);
9543
9544
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
9545
&children) != 0)
9546
return;
9547
9548
for (c = 0; c < children; c++) {
9549
uint64_t is_log = B_FALSE;
9550
const char *bias = NULL;
9551
const char *type = NULL;
9552
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
9553
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
9554
9555
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
9556
&is_log);
9557
9558
if (is_log) {
9559
bias = (char *)VDEV_ALLOC_CLASS_LOGS;
9560
} else {
9561
(void) nvlist_lookup_string(child[c],
9562
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
9563
(void) nvlist_lookup_string(child[c],
9564
ZPOOL_CONFIG_TYPE, &type);
9565
}
9566
9567
if (bias == NULL || strcmp(bias, class) != 0)
9568
continue;
9569
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
9570
continue;
9571
9572
if (cb->cb_flat_vdevs) {
9573
vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9574
NULL, item);
9575
} else {
9576
vdev_stats_nvlist(zhp, cb, child[c], 2, B_FALSE,
9577
NULL, class_obj);
9578
}
9579
free(name);
9580
}
9581
if (!cb->cb_flat_vdevs) {
9582
if (!nvlist_empty(class_obj))
9583
fnvlist_add_nvlist(item, class, class_obj);
9584
fnvlist_free(class_obj);
9585
}
9586
}
9587
9588
static void
9589
l2cache_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9590
nvlist_t *item)
9591
{
9592
nvlist_t *l2c = NULL, **l2cache;
9593
uint_t nl2cache;
9594
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
9595
&l2cache, &nl2cache) == 0) {
9596
if (nl2cache == 0)
9597
return;
9598
if (!cb->cb_flat_vdevs)
9599
l2c = fnvlist_alloc();
9600
for (int i = 0; i < nl2cache; i++) {
9601
if (cb->cb_flat_vdevs) {
9602
vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9603
B_FALSE, NULL, item);
9604
} else {
9605
vdev_stats_nvlist(zhp, cb, l2cache[i], 2,
9606
B_FALSE, NULL, l2c);
9607
}
9608
}
9609
}
9610
if (!cb->cb_flat_vdevs) {
9611
if (!nvlist_empty(l2c))
9612
fnvlist_add_nvlist(item, "l2cache", l2c);
9613
fnvlist_free(l2c);
9614
}
9615
}
9616
9617
static void
9618
spares_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
9619
nvlist_t *item)
9620
{
9621
nvlist_t *sp = NULL, **spares;
9622
uint_t nspares;
9623
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
9624
&spares, &nspares) == 0) {
9625
if (nspares == 0)
9626
return;
9627
if (!cb->cb_flat_vdevs)
9628
sp = fnvlist_alloc();
9629
for (int i = 0; i < nspares; i++) {
9630
if (cb->cb_flat_vdevs) {
9631
vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9632
NULL, item);
9633
} else {
9634
vdev_stats_nvlist(zhp, cb, spares[i], 2, B_TRUE,
9635
NULL, sp);
9636
}
9637
}
9638
}
9639
if (!cb->cb_flat_vdevs) {
9640
if (!nvlist_empty(sp))
9641
fnvlist_add_nvlist(item, "spares", sp);
9642
fnvlist_free(sp);
9643
}
9644
}
9645
9646
static void
9647
errors_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9648
{
9649
uint64_t nerr;
9650
nvlist_t *config = zpool_get_config(zhp, NULL);
9651
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
9652
&nerr) == 0) {
9653
nice_num_str_nvlist(item, ZPOOL_CONFIG_ERRCOUNT, nerr,
9654
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9655
if (nerr != 0 && cb->cb_verbose) {
9656
nvlist_t *nverrlist = NULL;
9657
if (zpool_get_errlog(zhp, &nverrlist) == 0) {
9658
int i = 0;
9659
int count = 0;
9660
size_t len = MAXPATHLEN * 2;
9661
nvpair_t *elem = NULL;
9662
9663
for (nvpair_t *pair =
9664
nvlist_next_nvpair(nverrlist, NULL);
9665
pair != NULL;
9666
pair = nvlist_next_nvpair(nverrlist, pair))
9667
count++;
9668
char **errl = (char **)malloc(
9669
count * sizeof (char *));
9670
9671
while ((elem = nvlist_next_nvpair(nverrlist,
9672
elem)) != NULL) {
9673
nvlist_t *nv;
9674
uint64_t dsobj, obj;
9675
9676
verify(nvpair_value_nvlist(elem,
9677
&nv) == 0);
9678
verify(nvlist_lookup_uint64(nv,
9679
ZPOOL_ERR_DATASET, &dsobj) == 0);
9680
verify(nvlist_lookup_uint64(nv,
9681
ZPOOL_ERR_OBJECT, &obj) == 0);
9682
errl[i] = safe_malloc(len);
9683
zpool_obj_to_path(zhp, dsobj, obj,
9684
errl[i++], len);
9685
}
9686
nvlist_free(nverrlist);
9687
fnvlist_add_string_array(item, "errlist",
9688
(const char **)errl, count);
9689
for (int i = 0; i < count; ++i)
9690
free(errl[i]);
9691
free(errl);
9692
} else
9693
fnvlist_add_string(item, "errlist",
9694
strerror(errno));
9695
}
9696
}
9697
}
9698
9699
static void
9700
ddt_stats_nvlist(ddt_stat_t *dds, status_cbdata_t *cb, nvlist_t *item)
9701
{
9702
nice_num_str_nvlist(item, "blocks", dds->dds_blocks,
9703
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9704
nice_num_str_nvlist(item, "logical_size", dds->dds_lsize,
9705
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9706
nice_num_str_nvlist(item, "physical_size", dds->dds_psize,
9707
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9708
nice_num_str_nvlist(item, "deflated_size", dds->dds_dsize,
9709
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9710
nice_num_str_nvlist(item, "ref_blocks", dds->dds_ref_blocks,
9711
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9712
nice_num_str_nvlist(item, "ref_lsize", dds->dds_ref_lsize,
9713
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9714
nice_num_str_nvlist(item, "ref_psize", dds->dds_ref_psize,
9715
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9716
nice_num_str_nvlist(item, "ref_dsize", dds->dds_ref_dsize,
9717
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9718
}
9719
9720
static void
9721
dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
9722
{
9723
nvlist_t *config;
9724
if (cb->cb_dedup_stats) {
9725
ddt_histogram_t *ddh;
9726
ddt_stat_t *dds;
9727
ddt_object_t *ddo;
9728
nvlist_t *ddt_stat, *ddt_obj, *dedup;
9729
uint_t c;
9730
uint64_t cspace_prop;
9731
9732
config = zpool_get_config(zhp, NULL);
9733
if (nvlist_lookup_uint64_array(config,
9734
ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t **)&ddo, &c) != 0)
9735
return;
9736
9737
dedup = fnvlist_alloc();
9738
ddt_obj = fnvlist_alloc();
9739
nice_num_str_nvlist(dedup, "obj_count", ddo->ddo_count,
9740
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9741
if (ddo->ddo_count == 0) {
9742
fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9743
ddt_obj);
9744
fnvlist_add_nvlist(item, "dedup_stats", dedup);
9745
fnvlist_free(ddt_obj);
9746
fnvlist_free(dedup);
9747
return;
9748
} else {
9749
nice_num_str_nvlist(dedup, "dspace", ddo->ddo_dspace,
9750
cb->cb_literal, cb->cb_json_as_int,
9751
ZFS_NICENUM_1024);
9752
nice_num_str_nvlist(dedup, "mspace", ddo->ddo_mspace,
9753
cb->cb_literal, cb->cb_json_as_int,
9754
ZFS_NICENUM_1024);
9755
/*
9756
* Squash cached size into in-core size to handle race.
9757
* Only include cached size if it is available.
9758
*/
9759
cspace_prop = zpool_get_prop_int(zhp,
9760
ZPOOL_PROP_DEDUPCACHED, NULL);
9761
cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
9762
nice_num_str_nvlist(dedup, "cspace", cspace_prop,
9763
cb->cb_literal, cb->cb_json_as_int,
9764
ZFS_NICENUM_1024);
9765
}
9766
9767
ddt_stat = fnvlist_alloc();
9768
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
9769
(uint64_t **)&dds, &c) == 0) {
9770
nvlist_t *total = fnvlist_alloc();
9771
if (dds->dds_blocks == 0)
9772
fnvlist_add_string(total, "blocks", "0");
9773
else
9774
ddt_stats_nvlist(dds, cb, total);
9775
fnvlist_add_nvlist(ddt_stat, "total", total);
9776
fnvlist_free(total);
9777
}
9778
if (nvlist_lookup_uint64_array(config,
9779
ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t **)&ddh, &c) == 0) {
9780
nvlist_t *hist = fnvlist_alloc();
9781
nvlist_t *entry = NULL;
9782
char buf[16];
9783
for (int h = 0; h < 64; h++) {
9784
if (ddh->ddh_stat[h].dds_blocks != 0) {
9785
entry = fnvlist_alloc();
9786
ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
9787
entry);
9788
(void) snprintf(buf, 16, "%d", h);
9789
fnvlist_add_nvlist(hist, buf, entry);
9790
fnvlist_free(entry);
9791
}
9792
}
9793
if (!nvlist_empty(hist))
9794
fnvlist_add_nvlist(ddt_stat, "histogram", hist);
9795
fnvlist_free(hist);
9796
}
9797
9798
if (!nvlist_empty(ddt_obj)) {
9799
fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_OBJ_STATS,
9800
ddt_obj);
9801
}
9802
fnvlist_free(ddt_obj);
9803
if (!nvlist_empty(ddt_stat)) {
9804
fnvlist_add_nvlist(dedup, ZPOOL_CONFIG_DDT_STATS,
9805
ddt_stat);
9806
}
9807
fnvlist_free(ddt_stat);
9808
if (!nvlist_empty(dedup))
9809
fnvlist_add_nvlist(item, "dedup_stats", dedup);
9810
fnvlist_free(dedup);
9811
}
9812
}
9813
9814
static void
9815
raidz_expand_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9816
nvlist_t *nvroot, nvlist_t *item)
9817
{
9818
uint_t c;
9819
pool_raidz_expand_stat_t *pres = NULL;
9820
if (nvlist_lookup_uint64_array(nvroot,
9821
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c) == 0) {
9822
nvlist_t **child;
9823
uint_t children;
9824
nvlist_t *nv = fnvlist_alloc();
9825
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9826
&child, &children) == 0);
9827
assert(pres->pres_expanding_vdev < children);
9828
char *name =
9829
zpool_vdev_name(g_zfs, zhp,
9830
child[pres->pres_expanding_vdev], 0);
9831
fill_vdev_info(nv, zhp, name, B_FALSE, cb->cb_json_as_int);
9832
fnvlist_add_string(nv, "state",
9833
pool_scan_state_str[pres->pres_state]);
9834
nice_num_str_nvlist(nv, "expanding_vdev",
9835
pres->pres_expanding_vdev, B_TRUE, cb->cb_json_as_int,
9836
ZFS_NICENUM_1024);
9837
nice_num_str_nvlist(nv, "start_time", pres->pres_start_time,
9838
cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9839
nice_num_str_nvlist(nv, "end_time", pres->pres_end_time,
9840
cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9841
nice_num_str_nvlist(nv, "to_reflow", pres->pres_to_reflow,
9842
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9843
nice_num_str_nvlist(nv, "reflowed", pres->pres_reflowed,
9844
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9845
nice_num_str_nvlist(nv, "waiting_for_resilver",
9846
pres->pres_waiting_for_resilver, B_TRUE,
9847
cb->cb_json_as_int, ZFS_NICENUM_1024);
9848
fnvlist_add_nvlist(item, ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, nv);
9849
fnvlist_free(nv);
9850
free(name);
9851
}
9852
}
9853
9854
static void
9855
checkpoint_status_nvlist(nvlist_t *nvroot, status_cbdata_t *cb,
9856
nvlist_t *item)
9857
{
9858
uint_t c;
9859
pool_checkpoint_stat_t *pcs = NULL;
9860
if (nvlist_lookup_uint64_array(nvroot,
9861
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c) == 0) {
9862
nvlist_t *nv = fnvlist_alloc();
9863
fnvlist_add_string(nv, "state",
9864
checkpoint_state_str[pcs->pcs_state]);
9865
nice_num_str_nvlist(nv, "start_time",
9866
pcs->pcs_start_time, cb->cb_literal, cb->cb_json_as_int,
9867
ZFS_NICE_TIMESTAMP);
9868
nice_num_str_nvlist(nv, "space",
9869
pcs->pcs_space, cb->cb_literal, cb->cb_json_as_int,
9870
ZFS_NICENUM_BYTES);
9871
fnvlist_add_nvlist(item, ZPOOL_CONFIG_CHECKPOINT_STATS, nv);
9872
fnvlist_free(nv);
9873
}
9874
}
9875
9876
static void
9877
removal_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9878
nvlist_t *nvroot, nvlist_t *item)
9879
{
9880
uint_t c;
9881
pool_removal_stat_t *prs = NULL;
9882
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_REMOVAL_STATS,
9883
(uint64_t **)&prs, &c) == 0) {
9884
if (prs->prs_state != DSS_NONE) {
9885
nvlist_t **child;
9886
uint_t children;
9887
verify(nvlist_lookup_nvlist_array(nvroot,
9888
ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
9889
assert(prs->prs_removing_vdev < children);
9890
char *vdev_name = zpool_vdev_name(g_zfs, zhp,
9891
child[prs->prs_removing_vdev], B_TRUE);
9892
nvlist_t *nv = fnvlist_alloc();
9893
fill_vdev_info(nv, zhp, vdev_name, B_FALSE,
9894
cb->cb_json_as_int);
9895
fnvlist_add_string(nv, "state",
9896
pool_scan_state_str[prs->prs_state]);
9897
nice_num_str_nvlist(nv, "removing_vdev",
9898
prs->prs_removing_vdev, B_TRUE, cb->cb_json_as_int,
9899
ZFS_NICENUM_1024);
9900
nice_num_str_nvlist(nv, "start_time",
9901
prs->prs_start_time, cb->cb_literal,
9902
cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9903
nice_num_str_nvlist(nv, "end_time", prs->prs_end_time,
9904
cb->cb_literal, cb->cb_json_as_int,
9905
ZFS_NICE_TIMESTAMP);
9906
nice_num_str_nvlist(nv, "to_copy", prs->prs_to_copy,
9907
cb->cb_literal, cb->cb_json_as_int,
9908
ZFS_NICENUM_BYTES);
9909
nice_num_str_nvlist(nv, "copied", prs->prs_copied,
9910
cb->cb_literal, cb->cb_json_as_int,
9911
ZFS_NICENUM_BYTES);
9912
nice_num_str_nvlist(nv, "mapping_memory",
9913
prs->prs_mapping_memory, cb->cb_literal,
9914
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9915
fnvlist_add_nvlist(item,
9916
ZPOOL_CONFIG_REMOVAL_STATS, nv);
9917
fnvlist_free(nv);
9918
free(vdev_name);
9919
}
9920
}
9921
}
9922
9923
static void
9924
scan_status_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb,
9925
nvlist_t *nvroot, nvlist_t *item)
9926
{
9927
pool_scan_stat_t *ps = NULL;
9928
uint_t c;
9929
nvlist_t *scan = fnvlist_alloc();
9930
nvlist_t **child;
9931
uint_t children;
9932
9933
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
9934
(uint64_t **)&ps, &c) == 0) {
9935
fnvlist_add_string(scan, "function",
9936
pool_scan_func_str[ps->pss_func]);
9937
fnvlist_add_string(scan, "state",
9938
pool_scan_state_str[ps->pss_state]);
9939
nice_num_str_nvlist(scan, "start_time", ps->pss_start_time,
9940
cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9941
nice_num_str_nvlist(scan, "end_time", ps->pss_end_time,
9942
cb->cb_literal, cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9943
nice_num_str_nvlist(scan, "to_examine", ps->pss_to_examine,
9944
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9945
nice_num_str_nvlist(scan, "examined", ps->pss_examined,
9946
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9947
nice_num_str_nvlist(scan, "skipped", ps->pss_skipped,
9948
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9949
nice_num_str_nvlist(scan, "processed", ps->pss_processed,
9950
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9951
nice_num_str_nvlist(scan, "errors", ps->pss_errors,
9952
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_1024);
9953
nice_num_str_nvlist(scan, "bytes_per_scan", ps->pss_pass_exam,
9954
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9955
nice_num_str_nvlist(scan, "pass_start", ps->pss_pass_start,
9956
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9957
nice_num_str_nvlist(scan, "scrub_pause",
9958
ps->pss_pass_scrub_pause, cb->cb_literal,
9959
cb->cb_json_as_int, ZFS_NICE_TIMESTAMP);
9960
nice_num_str_nvlist(scan, "scrub_spent_paused",
9961
ps->pss_pass_scrub_spent_paused,
9962
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9963
nice_num_str_nvlist(scan, "issued_bytes_per_scan",
9964
ps->pss_pass_issued, cb->cb_literal,
9965
cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9966
nice_num_str_nvlist(scan, "issued", ps->pss_issued,
9967
cb->cb_literal, cb->cb_json_as_int, ZFS_NICENUM_BYTES);
9968
if (ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
9969
ps->pss_error_scrub_start > ps->pss_start_time) {
9970
fnvlist_add_string(scan, "err_scrub_func",
9971
pool_scan_func_str[ps->pss_error_scrub_func]);
9972
fnvlist_add_string(scan, "err_scrub_state",
9973
pool_scan_state_str[ps->pss_error_scrub_state]);
9974
nice_num_str_nvlist(scan, "err_scrub_start_time",
9975
ps->pss_error_scrub_start,
9976
cb->cb_literal, cb->cb_json_as_int,
9977
ZFS_NICE_TIMESTAMP);
9978
nice_num_str_nvlist(scan, "err_scrub_end_time",
9979
ps->pss_error_scrub_end,
9980
cb->cb_literal, cb->cb_json_as_int,
9981
ZFS_NICE_TIMESTAMP);
9982
nice_num_str_nvlist(scan, "err_scrub_examined",
9983
ps->pss_error_scrub_examined,
9984
cb->cb_literal, cb->cb_json_as_int,
9985
ZFS_NICENUM_1024);
9986
nice_num_str_nvlist(scan, "err_scrub_to_examine",
9987
ps->pss_error_scrub_to_be_examined,
9988
cb->cb_literal, cb->cb_json_as_int,
9989
ZFS_NICENUM_1024);
9990
nice_num_str_nvlist(scan, "err_scrub_pause",
9991
ps->pss_pass_error_scrub_pause,
9992
B_TRUE, cb->cb_json_as_int, ZFS_NICENUM_1024);
9993
}
9994
}
9995
9996
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
9997
&child, &children) == 0) {
9998
vdev_rebuild_stat_t *vrs;
9999
uint_t i;
10000
char *name;
10001
nvlist_t *nv;
10002
nvlist_t *rebuild = fnvlist_alloc();
10003
uint64_t st;
10004
for (uint_t c = 0; c < children; c++) {
10005
if (nvlist_lookup_uint64_array(child[c],
10006
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs,
10007
&i) == 0) {
10008
if (vrs->vrs_state != VDEV_REBUILD_NONE) {
10009
nv = fnvlist_alloc();
10010
name = zpool_vdev_name(g_zfs, zhp,
10011
child[c], VDEV_NAME_TYPE_ID);
10012
fill_vdev_info(nv, zhp, name, B_FALSE,
10013
cb->cb_json_as_int);
10014
st = vrs->vrs_state;
10015
fnvlist_add_string(nv, "state",
10016
vdev_rebuild_state_str[st]);
10017
nice_num_str_nvlist(nv, "start_time",
10018
vrs->vrs_start_time, cb->cb_literal,
10019
cb->cb_json_as_int,
10020
ZFS_NICE_TIMESTAMP);
10021
nice_num_str_nvlist(nv, "end_time",
10022
vrs->vrs_end_time, cb->cb_literal,
10023
cb->cb_json_as_int,
10024
ZFS_NICE_TIMESTAMP);
10025
nice_num_str_nvlist(nv, "scan_time",
10026
vrs->vrs_scan_time_ms * 1000000,
10027
cb->cb_literal, cb->cb_json_as_int,
10028
ZFS_NICENUM_TIME);
10029
nice_num_str_nvlist(nv, "scanned",
10030
vrs->vrs_bytes_scanned,
10031
cb->cb_literal, cb->cb_json_as_int,
10032
ZFS_NICENUM_BYTES);
10033
nice_num_str_nvlist(nv, "issued",
10034
vrs->vrs_bytes_issued,
10035
cb->cb_literal, cb->cb_json_as_int,
10036
ZFS_NICENUM_BYTES);
10037
nice_num_str_nvlist(nv, "rebuilt",
10038
vrs->vrs_bytes_rebuilt,
10039
cb->cb_literal, cb->cb_json_as_int,
10040
ZFS_NICENUM_BYTES);
10041
nice_num_str_nvlist(nv, "to_scan",
10042
vrs->vrs_bytes_est, cb->cb_literal,
10043
cb->cb_json_as_int,
10044
ZFS_NICENUM_BYTES);
10045
nice_num_str_nvlist(nv, "errors",
10046
vrs->vrs_errors, cb->cb_literal,
10047
cb->cb_json_as_int,
10048
ZFS_NICENUM_1024);
10049
nice_num_str_nvlist(nv, "pass_time",
10050
vrs->vrs_pass_time_ms * 1000000,
10051
cb->cb_literal, cb->cb_json_as_int,
10052
ZFS_NICENUM_TIME);
10053
nice_num_str_nvlist(nv, "pass_scanned",
10054
vrs->vrs_pass_bytes_scanned,
10055
cb->cb_literal, cb->cb_json_as_int,
10056
ZFS_NICENUM_BYTES);
10057
nice_num_str_nvlist(nv, "pass_issued",
10058
vrs->vrs_pass_bytes_issued,
10059
cb->cb_literal, cb->cb_json_as_int,
10060
ZFS_NICENUM_BYTES);
10061
nice_num_str_nvlist(nv, "pass_skipped",
10062
vrs->vrs_pass_bytes_skipped,
10063
cb->cb_literal, cb->cb_json_as_int,
10064
ZFS_NICENUM_BYTES);
10065
fnvlist_add_nvlist(rebuild, name, nv);
10066
free(name);
10067
}
10068
}
10069
}
10070
if (!nvlist_empty(rebuild))
10071
fnvlist_add_nvlist(scan, "rebuild_stats", rebuild);
10072
fnvlist_free(rebuild);
10073
}
10074
10075
if (!nvlist_empty(scan))
10076
fnvlist_add_nvlist(item, ZPOOL_CONFIG_SCAN_STATS, scan);
10077
fnvlist_free(scan);
10078
}
10079
10080
/*
10081
* Print the scan status.
10082
*/
10083
static void
10084
print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
10085
{
10086
uint64_t rebuild_end_time = 0, resilver_end_time = 0;
10087
boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
10088
boolean_t have_errorscrub = B_FALSE;
10089
boolean_t active_resilver = B_FALSE;
10090
pool_checkpoint_stat_t *pcs = NULL;
10091
pool_scan_stat_t *ps = NULL;
10092
uint_t c;
10093
time_t scrub_start = 0, errorscrub_start = 0;
10094
10095
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
10096
(uint64_t **)&ps, &c) == 0) {
10097
if (ps->pss_func == POOL_SCAN_RESILVER) {
10098
resilver_end_time = ps->pss_end_time;
10099
active_resilver = (ps->pss_state == DSS_SCANNING);
10100
}
10101
10102
have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
10103
have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
10104
scrub_start = ps->pss_start_time;
10105
if (c > offsetof(pool_scan_stat_t,
10106
pss_pass_error_scrub_pause) / 8) {
10107
have_errorscrub = (ps->pss_error_scrub_func ==
10108
POOL_SCAN_ERRORSCRUB);
10109
errorscrub_start = ps->pss_error_scrub_start;
10110
}
10111
}
10112
10113
boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
10114
boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
10115
10116
/* Always print the scrub status when available. */
10117
if (have_scrub && scrub_start > errorscrub_start)
10118
print_scan_scrub_resilver_status(ps);
10119
else if (have_errorscrub && errorscrub_start >= scrub_start)
10120
print_err_scrub_status(ps);
10121
10122
/*
10123
* When there is an active resilver or rebuild print its status.
10124
* Otherwise print the status of the last resilver or rebuild.
10125
*/
10126
if (active_resilver || (!active_rebuild && have_resilver &&
10127
resilver_end_time && resilver_end_time > rebuild_end_time)) {
10128
print_scan_scrub_resilver_status(ps);
10129
} else if (active_rebuild || (!active_resilver && have_rebuild &&
10130
rebuild_end_time && rebuild_end_time > resilver_end_time)) {
10131
print_rebuild_status(zhp, nvroot);
10132
}
10133
10134
(void) nvlist_lookup_uint64_array(nvroot,
10135
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10136
print_checkpoint_scan_warning(ps, pcs);
10137
}
10138
10139
/*
10140
* Print out detailed removal status.
10141
*/
10142
static void
10143
print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
10144
{
10145
char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
10146
time_t start, end;
10147
nvlist_t *config, *nvroot;
10148
nvlist_t **child;
10149
uint_t children;
10150
char *vdev_name;
10151
10152
if (prs == NULL || prs->prs_state == DSS_NONE)
10153
return;
10154
10155
/*
10156
* Determine name of vdev.
10157
*/
10158
config = zpool_get_config(zhp, NULL);
10159
nvroot = fnvlist_lookup_nvlist(config,
10160
ZPOOL_CONFIG_VDEV_TREE);
10161
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10162
&child, &children) == 0);
10163
assert(prs->prs_removing_vdev < children);
10164
vdev_name = zpool_vdev_name(g_zfs, zhp,
10165
child[prs->prs_removing_vdev], B_TRUE);
10166
10167
(void) printf_color(ANSI_BOLD, gettext("remove: "));
10168
10169
start = prs->prs_start_time;
10170
end = prs->prs_end_time;
10171
zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
10172
10173
/*
10174
* Removal is finished or canceled.
10175
*/
10176
if (prs->prs_state == DSS_FINISHED) {
10177
uint64_t minutes_taken = (end - start) / 60;
10178
10179
(void) printf(gettext("Removal of vdev %llu copied %s "
10180
"in %lluh%um, completed on %s"),
10181
(longlong_t)prs->prs_removing_vdev,
10182
copied_buf,
10183
(u_longlong_t)(minutes_taken / 60),
10184
(uint_t)(minutes_taken % 60),
10185
ctime((time_t *)&end));
10186
} else if (prs->prs_state == DSS_CANCELED) {
10187
(void) printf(gettext("Removal of %s canceled on %s"),
10188
vdev_name, ctime(&end));
10189
} else {
10190
uint64_t copied, total, elapsed, rate, mins_left, hours_left;
10191
double fraction_done;
10192
10193
assert(prs->prs_state == DSS_SCANNING);
10194
10195
/*
10196
* Removal is in progress.
10197
*/
10198
(void) printf(gettext(
10199
"Evacuation of %s in progress since %s"),
10200
vdev_name, ctime(&start));
10201
10202
copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
10203
total = prs->prs_to_copy;
10204
fraction_done = (double)copied / total;
10205
10206
/* elapsed time for this pass */
10207
elapsed = time(NULL) - prs->prs_start_time;
10208
elapsed = elapsed > 0 ? elapsed : 1;
10209
rate = copied / elapsed;
10210
rate = rate > 0 ? rate : 1;
10211
mins_left = ((total - copied) / rate) / 60;
10212
hours_left = mins_left / 60;
10213
10214
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10215
zfs_nicenum(total, total_buf, sizeof (total_buf));
10216
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10217
10218
/*
10219
* do not print estimated time if hours_left is more than
10220
* 30 days
10221
*/
10222
(void) printf(gettext(
10223
"\t%s copied out of %s at %s/s, %.2f%% done"),
10224
examined_buf, total_buf, rate_buf, 100 * fraction_done);
10225
if (hours_left < (30 * 24)) {
10226
(void) printf(gettext(", %lluh%um to go\n"),
10227
(u_longlong_t)hours_left, (uint_t)(mins_left % 60));
10228
} else {
10229
(void) printf(gettext(
10230
", (copy is slow, no estimated time)\n"));
10231
}
10232
}
10233
free(vdev_name);
10234
10235
if (prs->prs_mapping_memory > 0) {
10236
char mem_buf[7];
10237
zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
10238
(void) printf(gettext(
10239
"\t%s memory used for removed device mappings\n"),
10240
mem_buf);
10241
}
10242
}
10243
10244
/*
10245
* Print out detailed raidz expansion status.
10246
*/
10247
static void
10248
print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
10249
{
10250
char copied_buf[7];
10251
10252
if (pres == NULL || pres->pres_state == DSS_NONE)
10253
return;
10254
10255
/*
10256
* Determine name of vdev.
10257
*/
10258
nvlist_t *config = zpool_get_config(zhp, NULL);
10259
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
10260
ZPOOL_CONFIG_VDEV_TREE);
10261
nvlist_t **child;
10262
uint_t children;
10263
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
10264
&child, &children) == 0);
10265
assert(pres->pres_expanding_vdev < children);
10266
10267
(void) printf_color(ANSI_BOLD, gettext("expand: "));
10268
10269
time_t start = pres->pres_start_time;
10270
time_t end = pres->pres_end_time;
10271
char *vname =
10272
zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
10273
zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
10274
10275
/*
10276
* Expansion is finished or canceled.
10277
*/
10278
if (pres->pres_state == DSS_FINISHED) {
10279
char time_buf[32];
10280
secs_to_dhms(end - start, time_buf);
10281
10282
(void) printf(gettext("expanded %s-%u copied %s in %s, "
10283
"on %s"), vname, (int)pres->pres_expanding_vdev,
10284
copied_buf, time_buf, ctime((time_t *)&end));
10285
} else {
10286
char examined_buf[7], total_buf[7], rate_buf[7];
10287
uint64_t copied, total, elapsed, rate, secs_left;
10288
double fraction_done;
10289
10290
assert(pres->pres_state == DSS_SCANNING);
10291
10292
/*
10293
* Expansion is in progress.
10294
*/
10295
(void) printf(gettext(
10296
"expansion of %s-%u in progress since %s"),
10297
vname, (int)pres->pres_expanding_vdev, ctime(&start));
10298
10299
copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
10300
total = pres->pres_to_reflow;
10301
fraction_done = (double)copied / total;
10302
10303
/* elapsed time for this pass */
10304
elapsed = time(NULL) - pres->pres_start_time;
10305
elapsed = elapsed > 0 ? elapsed : 1;
10306
rate = copied / elapsed;
10307
rate = rate > 0 ? rate : 1;
10308
secs_left = (total - copied) / rate;
10309
10310
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
10311
zfs_nicenum(total, total_buf, sizeof (total_buf));
10312
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
10313
10314
/*
10315
* do not print estimated time if hours_left is more than
10316
* 30 days
10317
*/
10318
(void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
10319
examined_buf, total_buf, rate_buf, 100 * fraction_done);
10320
if (pres->pres_waiting_for_resilver) {
10321
(void) printf(gettext(", paused for resilver or "
10322
"clear\n"));
10323
} else if (secs_left < (30 * 24 * 3600)) {
10324
char time_buf[32];
10325
secs_to_dhms(secs_left, time_buf);
10326
(void) printf(gettext(", %s to go\n"), time_buf);
10327
} else {
10328
(void) printf(gettext(
10329
", (copy is slow, no estimated time)\n"));
10330
}
10331
}
10332
free(vname);
10333
}
10334
static void
10335
print_checkpoint_status(pool_checkpoint_stat_t *pcs)
10336
{
10337
time_t start;
10338
char space_buf[7];
10339
10340
if (pcs == NULL || pcs->pcs_state == CS_NONE)
10341
return;
10342
10343
(void) printf(gettext("checkpoint: "));
10344
10345
start = pcs->pcs_start_time;
10346
zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
10347
10348
if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
10349
char *date = ctime(&start);
10350
10351
/*
10352
* ctime() adds a newline at the end of the generated
10353
* string, thus the weird format specifier and the
10354
* strlen() call used to chop it off from the output.
10355
*/
10356
(void) printf(gettext("created %.*s, consumes %s\n"),
10357
(int)(strlen(date) - 1), date, space_buf);
10358
return;
10359
}
10360
10361
assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
10362
10363
(void) printf(gettext("discarding, %s remaining.\n"),
10364
space_buf);
10365
}
10366
10367
static void
10368
print_error_log(zpool_handle_t *zhp)
10369
{
10370
nvlist_t *nverrlist = NULL;
10371
nvpair_t *elem;
10372
char *pathname;
10373
size_t len = MAXPATHLEN * 2;
10374
10375
if (zpool_get_errlog(zhp, &nverrlist) != 0)
10376
return;
10377
10378
(void) printf("errors: Permanent errors have been "
10379
"detected in the following files:\n\n");
10380
10381
pathname = safe_malloc(len);
10382
elem = NULL;
10383
while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
10384
nvlist_t *nv;
10385
uint64_t dsobj, obj;
10386
10387
verify(nvpair_value_nvlist(elem, &nv) == 0);
10388
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
10389
&dsobj) == 0);
10390
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
10391
&obj) == 0);
10392
zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
10393
(void) printf("%7s %s\n", "", pathname);
10394
}
10395
free(pathname);
10396
nvlist_free(nverrlist);
10397
}
10398
10399
static void
10400
print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
10401
uint_t nspares)
10402
{
10403
uint_t i;
10404
char *name;
10405
10406
if (nspares == 0)
10407
return;
10408
10409
(void) printf(gettext("\tspares\n"));
10410
10411
for (i = 0; i < nspares; i++) {
10412
name = zpool_vdev_name(g_zfs, zhp, spares[i],
10413
cb->cb_name_flags);
10414
print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
10415
free(name);
10416
}
10417
}
10418
10419
static void
10420
print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
10421
uint_t nl2cache)
10422
{
10423
uint_t i;
10424
char *name;
10425
10426
if (nl2cache == 0)
10427
return;
10428
10429
(void) printf(gettext("\tcache\n"));
10430
10431
for (i = 0; i < nl2cache; i++) {
10432
name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
10433
cb->cb_name_flags);
10434
print_status_config(zhp, cb, name, l2cache[i], 2,
10435
B_FALSE, NULL);
10436
free(name);
10437
}
10438
}
10439
10440
static void
10441
print_dedup_stats(zpool_handle_t *zhp, nvlist_t *config, boolean_t literal)
10442
{
10443
ddt_histogram_t *ddh;
10444
ddt_stat_t *dds;
10445
ddt_object_t *ddo;
10446
uint_t c;
10447
/* Extra space provided for literal display */
10448
char dspace[32], mspace[32], cspace[32];
10449
uint64_t cspace_prop;
10450
enum zfs_nicenum_format format;
10451
zprop_source_t src;
10452
10453
/*
10454
* If the pool was faulted then we may not have been able to
10455
* obtain the config. Otherwise, if we have anything in the dedup
10456
* table continue processing the stats.
10457
*/
10458
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
10459
(uint64_t **)&ddo, &c) != 0)
10460
return;
10461
10462
(void) printf("\n");
10463
(void) printf(gettext(" dedup: "));
10464
if (ddo->ddo_count == 0) {
10465
(void) printf(gettext("no DDT entries\n"));
10466
return;
10467
}
10468
10469
/*
10470
* Squash cached size into in-core size to handle race.
10471
* Only include cached size if it is available.
10472
*/
10473
cspace_prop = zpool_get_prop_int(zhp, ZPOOL_PROP_DEDUPCACHED, &src);
10474
cspace_prop = MIN(cspace_prop, ddo->ddo_mspace);
10475
format = literal ? ZFS_NICENUM_RAW : ZFS_NICENUM_1024;
10476
zfs_nicenum_format(cspace_prop, cspace, sizeof (cspace), format);
10477
zfs_nicenum_format(ddo->ddo_dspace, dspace, sizeof (dspace), format);
10478
zfs_nicenum_format(ddo->ddo_mspace, mspace, sizeof (mspace), format);
10479
(void) printf("DDT entries %llu, size %s on disk, %s in core",
10480
(u_longlong_t)ddo->ddo_count,
10481
dspace,
10482
mspace);
10483
if (src != ZPROP_SRC_DEFAULT) {
10484
(void) printf(", %s cached (%.02f%%)",
10485
cspace,
10486
(double)cspace_prop / (double)ddo->ddo_mspace * 100.0);
10487
}
10488
(void) printf("\n");
10489
10490
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
10491
(uint64_t **)&dds, &c) == 0);
10492
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
10493
(uint64_t **)&ddh, &c) == 0);
10494
zpool_dump_ddt(dds, ddh);
10495
}
10496
10497
#define ST_SIZE 4096
10498
#define AC_SIZE 2048
10499
10500
static void
10501
print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
10502
zpool_status_t reason, zpool_errata_t errata, nvlist_t *item)
10503
{
10504
char status[ST_SIZE];
10505
char action[AC_SIZE];
10506
memset(status, 0, ST_SIZE);
10507
memset(action, 0, AC_SIZE);
10508
10509
switch (reason) {
10510
case ZPOOL_STATUS_MISSING_DEV_R:
10511
(void) snprintf(status, ST_SIZE,
10512
gettext("One or more devices could "
10513
"not be opened. Sufficient replicas exist for\n\tthe pool "
10514
"to continue functioning in a degraded state.\n"));
10515
(void) snprintf(action, AC_SIZE,
10516
gettext("Attach the missing device "
10517
"and online it using 'zpool online'.\n"));
10518
break;
10519
10520
case ZPOOL_STATUS_MISSING_DEV_NR:
10521
(void) snprintf(status, ST_SIZE,
10522
gettext("One or more devices could "
10523
"not be opened. There are insufficient\n\treplicas for the"
10524
" pool to continue functioning.\n"));
10525
(void) snprintf(action, AC_SIZE,
10526
gettext("Attach the missing device "
10527
"and online it using 'zpool online'.\n"));
10528
break;
10529
10530
case ZPOOL_STATUS_CORRUPT_LABEL_R:
10531
(void) snprintf(status, ST_SIZE,
10532
gettext("One or more devices could "
10533
"not be used because the label is missing or\n\tinvalid. "
10534
"Sufficient replicas exist for the pool to continue\n\t"
10535
"functioning in a degraded state.\n"));
10536
(void) snprintf(action, AC_SIZE,
10537
gettext("Replace the device using 'zpool replace'.\n"));
10538
break;
10539
10540
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
10541
(void) snprintf(status, ST_SIZE,
10542
gettext("One or more devices could "
10543
"not be used because the label is missing \n\tor invalid. "
10544
"There are insufficient replicas for the pool to "
10545
"continue\n\tfunctioning.\n"));
10546
zpool_explain_recover(zpool_get_handle(zhp),
10547
zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10548
action, AC_SIZE);
10549
break;
10550
10551
case ZPOOL_STATUS_FAILING_DEV:
10552
(void) snprintf(status, ST_SIZE,
10553
gettext("One or more devices has "
10554
"experienced an unrecoverable error. An\n\tattempt was "
10555
"made to correct the error. Applications are "
10556
"unaffected.\n"));
10557
(void) snprintf(action, AC_SIZE, gettext("Determine if the "
10558
"device needs to be replaced, and clear the errors\n\tusing"
10559
" 'zpool clear' or replace the device with 'zpool "
10560
"replace'.\n"));
10561
break;
10562
10563
case ZPOOL_STATUS_OFFLINE_DEV:
10564
(void) snprintf(status, ST_SIZE,
10565
gettext("One or more devices has "
10566
"been taken offline by the administrator.\n\tSufficient "
10567
"replicas exist for the pool to continue functioning in "
10568
"a\n\tdegraded state.\n"));
10569
(void) snprintf(action, AC_SIZE, gettext("Online the device "
10570
"using 'zpool online' or replace the device with\n\t'zpool "
10571
"replace'.\n"));
10572
break;
10573
10574
case ZPOOL_STATUS_REMOVED_DEV:
10575
(void) snprintf(status, ST_SIZE,
10576
gettext("One or more devices have "
10577
"been removed.\n\tSufficient replicas exist for the pool "
10578
"to continue functioning in a\n\tdegraded state.\n"));
10579
(void) snprintf(action, AC_SIZE, gettext("Online the device "
10580
"using zpool online' or replace the device with\n\t'zpool "
10581
"replace'.\n"));
10582
break;
10583
10584
case ZPOOL_STATUS_RESILVERING:
10585
case ZPOOL_STATUS_REBUILDING:
10586
(void) snprintf(status, ST_SIZE,
10587
gettext("One or more devices is "
10588
"currently being resilvered. The pool will\n\tcontinue "
10589
"to function, possibly in a degraded state.\n"));
10590
(void) snprintf(action, AC_SIZE,
10591
gettext("Wait for the resilver to complete.\n"));
10592
break;
10593
10594
case ZPOOL_STATUS_REBUILD_SCRUB:
10595
(void) snprintf(status, ST_SIZE,
10596
gettext("One or more devices have "
10597
"been sequentially resilvered, scrubbing\n\tthe pool "
10598
"is recommended.\n"));
10599
(void) snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
10600
"verify all data checksums.\n"));
10601
break;
10602
10603
case ZPOOL_STATUS_CORRUPT_DATA:
10604
(void) snprintf(status, ST_SIZE,
10605
gettext("One or more devices has "
10606
"experienced an error resulting in data\n\tcorruption. "
10607
"Applications may be affected.\n"));
10608
(void) snprintf(action, AC_SIZE,
10609
gettext("Restore the file in question"
10610
" if possible. Otherwise restore the\n\tentire pool from "
10611
"backup.\n"));
10612
break;
10613
10614
case ZPOOL_STATUS_CORRUPT_POOL:
10615
(void) snprintf(status, ST_SIZE, gettext("The pool metadata is "
10616
"corrupted and the pool cannot be opened.\n"));
10617
zpool_explain_recover(zpool_get_handle(zhp),
10618
zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
10619
action, AC_SIZE);
10620
break;
10621
10622
case ZPOOL_STATUS_VERSION_OLDER:
10623
(void) snprintf(status, ST_SIZE,
10624
gettext("The pool is formatted using "
10625
"a legacy on-disk format. The pool can\n\tstill be used, "
10626
"but some features are unavailable.\n"));
10627
(void) snprintf(action, AC_SIZE,
10628
gettext("Upgrade the pool using "
10629
"'zpool upgrade'. Once this is done, the\n\tpool will no "
10630
"longer be accessible on software that does not support\n\t"
10631
"feature flags.\n"));
10632
break;
10633
10634
case ZPOOL_STATUS_VERSION_NEWER:
10635
(void) snprintf(status, ST_SIZE,
10636
gettext("The pool has been upgraded "
10637
"to a newer, incompatible on-disk version.\n\tThe pool "
10638
"cannot be accessed on this system.\n"));
10639
(void) snprintf(action, AC_SIZE,
10640
gettext("Access the pool from a "
10641
"system running more recent software, or\n\trestore the "
10642
"pool from backup.\n"));
10643
break;
10644
10645
case ZPOOL_STATUS_FEAT_DISABLED:
10646
(void) snprintf(status, ST_SIZE, gettext("Some supported and "
10647
"requested features are not enabled on the pool.\n\t"
10648
"The pool can still be used, but some features are "
10649
"unavailable.\n"));
10650
(void) snprintf(action, AC_SIZE,
10651
gettext("Enable all features using "
10652
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
10653
"longer be accessible by software that does not support\n\t"
10654
"the features. See zpool-features(7) for details.\n"));
10655
break;
10656
10657
case ZPOOL_STATUS_COMPATIBILITY_ERR:
10658
(void) snprintf(status, ST_SIZE, gettext("This pool has a "
10659
"compatibility list specified, but it could not be\n\t"
10660
"read/parsed at this time. The pool can still be used, "
10661
"but this\n\tshould be investigated.\n"));
10662
(void) snprintf(action, AC_SIZE,
10663
gettext("Check the value of the "
10664
"'compatibility' property against the\n\t"
10665
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
10666
ZPOOL_DATA_COMPAT_D ".\n"));
10667
break;
10668
10669
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
10670
(void) snprintf(status, ST_SIZE, gettext("One or more features "
10671
"are enabled on the pool despite not being\n\t"
10672
"requested by the 'compatibility' property.\n"));
10673
(void) snprintf(action, AC_SIZE, gettext("Consider setting "
10674
"'compatibility' to an appropriate value, or\n\t"
10675
"adding needed features to the relevant file in\n\t"
10676
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
10677
break;
10678
10679
case ZPOOL_STATUS_UNSUP_FEAT_READ:
10680
(void) snprintf(status, ST_SIZE,
10681
gettext("The pool cannot be accessed "
10682
"on this system because it uses the\n\tfollowing feature(s)"
10683
" not supported on this system:\n"));
10684
zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10685
1024);
10686
(void) snprintf(action, AC_SIZE,
10687
gettext("Access the pool from a "
10688
"system that supports the required feature(s),\n\tor "
10689
"restore the pool from backup.\n"));
10690
break;
10691
10692
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
10693
(void) snprintf(status, ST_SIZE, gettext("The pool can only be "
10694
"accessed in read-only mode on this system. It\n\tcannot be"
10695
" accessed in read-write mode because it uses the "
10696
"following\n\tfeature(s) not supported on this system:\n"));
10697
zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
10698
1024);
10699
(void) snprintf(action, AC_SIZE,
10700
gettext("The pool cannot be accessed "
10701
"in read-write mode. Import the pool with\n"
10702
"\t\"-o readonly=on\", access the pool from a system that "
10703
"supports the\n\trequired feature(s), or restore the "
10704
"pool from backup.\n"));
10705
break;
10706
10707
case ZPOOL_STATUS_FAULTED_DEV_R:
10708
(void) snprintf(status, ST_SIZE,
10709
gettext("One or more devices are "
10710
"faulted in response to persistent errors.\n\tSufficient "
10711
"replicas exist for the pool to continue functioning "
10712
"in a\n\tdegraded state.\n"));
10713
(void) snprintf(action, AC_SIZE,
10714
gettext("Replace the faulted device, "
10715
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
10716
break;
10717
10718
case ZPOOL_STATUS_FAULTED_DEV_NR:
10719
(void) snprintf(status, ST_SIZE,
10720
gettext("One or more devices are "
10721
"faulted in response to persistent errors. There are "
10722
"insufficient replicas for the pool to\n\tcontinue "
10723
"functioning.\n"));
10724
(void) snprintf(action, AC_SIZE,
10725
gettext("Destroy and re-create the "
10726
"pool from a backup source. Manually marking the device\n"
10727
"\trepaired using 'zpool clear' may allow some data "
10728
"to be recovered.\n"));
10729
break;
10730
10731
case ZPOOL_STATUS_IO_FAILURE_MMP:
10732
(void) snprintf(status, ST_SIZE,
10733
gettext("The pool is suspended "
10734
"because multihost writes failed or were delayed;\n\t"
10735
"another system could import the pool undetected.\n"));
10736
(void) snprintf(action, AC_SIZE,
10737
gettext("Make sure the pool's devices"
10738
" are connected, then reboot your system and\n\timport the "
10739
"pool or run 'zpool clear' to resume the pool.\n"));
10740
break;
10741
10742
case ZPOOL_STATUS_IO_FAILURE_WAIT:
10743
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
10744
(void) snprintf(status, ST_SIZE,
10745
gettext("One or more devices are "
10746
"faulted in response to IO failures.\n"));
10747
(void) snprintf(action, AC_SIZE,
10748
gettext("Make sure the affected "
10749
"devices are connected, then run 'zpool clear'.\n"));
10750
break;
10751
10752
case ZPOOL_STATUS_BAD_LOG:
10753
(void) snprintf(status, ST_SIZE, gettext("An intent log record "
10754
"could not be read.\n"
10755
"\tWaiting for administrator intervention to fix the "
10756
"faulted pool.\n"));
10757
(void) snprintf(action, AC_SIZE,
10758
gettext("Either restore the affected "
10759
"device(s) and run 'zpool online',\n"
10760
"\tor ignore the intent log records by running "
10761
"'zpool clear'.\n"));
10762
break;
10763
10764
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
10765
(void) snprintf(status, ST_SIZE,
10766
gettext("One or more devices are "
10767
"configured to use a non-native block size.\n"
10768
"\tExpect reduced performance.\n"));
10769
(void) snprintf(action, AC_SIZE,
10770
gettext("Replace affected devices "
10771
"with devices that support the\n\tconfigured block size, "
10772
"or migrate data to a properly configured\n\tpool.\n"));
10773
break;
10774
10775
case ZPOOL_STATUS_HOSTID_MISMATCH:
10776
(void) snprintf(status, ST_SIZE,
10777
gettext("Mismatch between pool hostid"
10778
" and system hostid on imported pool.\n\tThis pool was "
10779
"previously imported into a system with a different "
10780
"hostid,\n\tand then was verbatim imported into this "
10781
"system.\n"));
10782
(void) snprintf(action, AC_SIZE,
10783
gettext("Export this pool on all "
10784
"systems on which it is imported.\n"
10785
"\tThen import it to correct the mismatch.\n"));
10786
break;
10787
10788
case ZPOOL_STATUS_ERRATA:
10789
(void) snprintf(status, ST_SIZE,
10790
gettext("Errata #%d detected.\n"), errata);
10791
switch (errata) {
10792
case ZPOOL_ERRATA_NONE:
10793
break;
10794
10795
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
10796
(void) snprintf(action, AC_SIZE,
10797
gettext("To correct the issue run "
10798
"'zpool scrub'.\n"));
10799
break;
10800
10801
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
10802
(void) strlcat(status, gettext("\tExisting encrypted "
10803
"datasets contain an on-disk incompatibility\n\t "
10804
"which needs to be corrected.\n"), ST_SIZE);
10805
(void) snprintf(action, AC_SIZE,
10806
gettext("To correct the issue"
10807
" backup existing encrypted datasets to new\n\t"
10808
"encrypted datasets and destroy the old ones. "
10809
"'zfs mount -o ro' can\n\tbe used to temporarily "
10810
"mount existing encrypted datasets readonly.\n"));
10811
break;
10812
10813
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
10814
(void) strlcat(status, gettext("\tExisting encrypted "
10815
"snapshots and bookmarks contain an on-disk\n\t"
10816
"incompatibility. This may cause on-disk "
10817
"corruption if they are used\n\twith "
10818
"'zfs recv'.\n"), ST_SIZE);
10819
(void) snprintf(action, AC_SIZE,
10820
gettext("To correct the"
10821
"issue, enable the bookmark_v2 feature. No "
10822
"additional\n\taction is needed if there are no "
10823
"encrypted snapshots or bookmarks.\n\tIf preserving"
10824
"the encrypted snapshots and bookmarks is required,"
10825
" use\n\ta non-raw send to backup and restore them."
10826
" Alternately, they may be\n\tremoved to resolve "
10827
"the incompatibility.\n"));
10828
break;
10829
10830
default:
10831
/*
10832
* All errata which allow the pool to be imported
10833
* must contain an action message.
10834
*/
10835
assert(0);
10836
}
10837
break;
10838
10839
default:
10840
/*
10841
* The remaining errors can't actually be generated, yet.
10842
*/
10843
assert(reason == ZPOOL_STATUS_OK);
10844
}
10845
10846
if (status[0] != 0) {
10847
if (cbp->cb_json)
10848
fnvlist_add_string(item, "status", status);
10849
else {
10850
(void) printf_color(ANSI_BOLD, gettext("status: "));
10851
(void) printf_color(ANSI_YELLOW, status);
10852
}
10853
}
10854
10855
if (action[0] != 0) {
10856
if (cbp->cb_json)
10857
fnvlist_add_string(item, "action", action);
10858
else {
10859
(void) printf_color(ANSI_BOLD, gettext("action: "));
10860
(void) printf_color(ANSI_YELLOW, action);
10861
}
10862
}
10863
}
10864
10865
static int
10866
status_callback_json(zpool_handle_t *zhp, void *data)
10867
{
10868
status_cbdata_t *cbp = data;
10869
nvlist_t *config, *nvroot;
10870
const char *msgid;
10871
char pool_guid[256];
10872
char msgbuf[256];
10873
uint64_t guid;
10874
zpool_status_t reason;
10875
zpool_errata_t errata;
10876
uint_t c;
10877
vdev_stat_t *vs;
10878
nvlist_t *item, *d, *load_info, *vds;
10879
10880
/* If dedup stats were requested, also fetch dedupcached. */
10881
if (cbp->cb_dedup_stats > 1)
10882
zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
10883
reason = zpool_get_status(zhp, &msgid, &errata);
10884
/*
10885
* If we were given 'zpool status -x', only report those pools with
10886
* problems.
10887
*/
10888
if (cbp->cb_explain &&
10889
(reason == ZPOOL_STATUS_OK ||
10890
reason == ZPOOL_STATUS_VERSION_OLDER ||
10891
reason == ZPOOL_STATUS_FEAT_DISABLED ||
10892
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
10893
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
10894
return (0);
10895
}
10896
10897
d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
10898
item = fnvlist_alloc();
10899
vds = fnvlist_alloc();
10900
fill_pool_info(item, zhp, B_FALSE, cbp->cb_json_as_int);
10901
config = zpool_get_config(zhp, NULL);
10902
10903
if (config != NULL) {
10904
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10905
verify(nvlist_lookup_uint64_array(nvroot,
10906
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) == 0);
10907
if (cbp->cb_json_pool_key_guid) {
10908
guid = fnvlist_lookup_uint64(config,
10909
ZPOOL_CONFIG_POOL_GUID);
10910
(void) snprintf(pool_guid, 256, "%llu",
10911
(u_longlong_t)guid);
10912
}
10913
cbp->cb_count++;
10914
10915
print_status_reason(zhp, cbp, reason, errata, item);
10916
if (msgid != NULL) {
10917
(void) snprintf(msgbuf, 256,
10918
"https://openzfs.github.io/openzfs-docs/msg/%s",
10919
msgid);
10920
fnvlist_add_string(item, "msgid", msgid);
10921
fnvlist_add_string(item, "moreinfo", msgbuf);
10922
}
10923
10924
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
10925
&load_info) == 0) {
10926
fnvlist_add_nvlist(item, ZPOOL_CONFIG_LOAD_INFO,
10927
load_info);
10928
}
10929
10930
scan_status_nvlist(zhp, cbp, nvroot, item);
10931
removal_status_nvlist(zhp, cbp, nvroot, item);
10932
checkpoint_status_nvlist(nvroot, cbp, item);
10933
raidz_expand_status_nvlist(zhp, cbp, nvroot, item);
10934
vdev_stats_nvlist(zhp, cbp, nvroot, 0, B_FALSE, NULL, vds);
10935
if (cbp->cb_flat_vdevs) {
10936
class_vdevs_nvlist(zhp, cbp, nvroot,
10937
VDEV_ALLOC_BIAS_DEDUP, vds);
10938
class_vdevs_nvlist(zhp, cbp, nvroot,
10939
VDEV_ALLOC_BIAS_SPECIAL, vds);
10940
class_vdevs_nvlist(zhp, cbp, nvroot,
10941
VDEV_ALLOC_CLASS_LOGS, vds);
10942
l2cache_nvlist(zhp, cbp, nvroot, vds);
10943
spares_nvlist(zhp, cbp, nvroot, vds);
10944
10945
fnvlist_add_nvlist(item, "vdevs", vds);
10946
fnvlist_free(vds);
10947
} else {
10948
fnvlist_add_nvlist(item, "vdevs", vds);
10949
fnvlist_free(vds);
10950
10951
class_vdevs_nvlist(zhp, cbp, nvroot,
10952
VDEV_ALLOC_BIAS_DEDUP, item);
10953
class_vdevs_nvlist(zhp, cbp, nvroot,
10954
VDEV_ALLOC_BIAS_SPECIAL, item);
10955
class_vdevs_nvlist(zhp, cbp, nvroot,
10956
VDEV_ALLOC_CLASS_LOGS, item);
10957
l2cache_nvlist(zhp, cbp, nvroot, item);
10958
spares_nvlist(zhp, cbp, nvroot, item);
10959
}
10960
dedup_stats_nvlist(zhp, cbp, item);
10961
errors_nvlist(zhp, cbp, item);
10962
}
10963
if (cbp->cb_json_pool_key_guid) {
10964
fnvlist_add_nvlist(d, pool_guid, item);
10965
} else {
10966
fnvlist_add_nvlist(d, zpool_get_name(zhp),
10967
item);
10968
}
10969
fnvlist_free(item);
10970
return (0);
10971
}
10972
10973
/*
10974
* Display a summary of pool status. Displays a summary such as:
10975
*
10976
* pool: tank
10977
* status: DEGRADED
10978
* reason: One or more devices ...
10979
* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
10980
* config:
10981
* mirror DEGRADED
10982
* c1t0d0 OK
10983
* c2t0d0 UNAVAIL
10984
*
10985
* When given the '-v' option, we print out the complete config. If the '-e'
10986
* option is specified, then we print out error rate information as well.
10987
*/
10988
static int
10989
status_callback(zpool_handle_t *zhp, void *data)
10990
{
10991
status_cbdata_t *cbp = data;
10992
nvlist_t *config, *nvroot;
10993
const char *msgid;
10994
zpool_status_t reason;
10995
zpool_errata_t errata;
10996
const char *health;
10997
uint_t c;
10998
vdev_stat_t *vs;
10999
11000
/* If dedup stats were requested, also fetch dedupcached. */
11001
if (cbp->cb_dedup_stats > 1)
11002
zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
11003
11004
config = zpool_get_config(zhp, NULL);
11005
reason = zpool_get_status(zhp, &msgid, &errata);
11006
11007
cbp->cb_count++;
11008
11009
/*
11010
* If we were given 'zpool status -x', only report those pools with
11011
* problems.
11012
*/
11013
if (cbp->cb_explain &&
11014
(reason == ZPOOL_STATUS_OK ||
11015
reason == ZPOOL_STATUS_VERSION_OLDER ||
11016
reason == ZPOOL_STATUS_FEAT_DISABLED ||
11017
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
11018
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
11019
if (!cbp->cb_allpools) {
11020
(void) printf(gettext("pool '%s' is healthy\n"),
11021
zpool_get_name(zhp));
11022
if (cbp->cb_first)
11023
cbp->cb_first = B_FALSE;
11024
}
11025
return (0);
11026
}
11027
11028
if (cbp->cb_first)
11029
cbp->cb_first = B_FALSE;
11030
else
11031
(void) printf("\n");
11032
11033
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
11034
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
11035
(uint64_t **)&vs, &c) == 0);
11036
11037
health = zpool_get_state_str(zhp);
11038
11039
printf(" ");
11040
(void) printf_color(ANSI_BOLD, gettext("pool:"));
11041
printf(" %s\n", zpool_get_name(zhp));
11042
(void) fputc(' ', stdout);
11043
(void) printf_color(ANSI_BOLD, gettext("state: "));
11044
11045
(void) printf_color(health_str_to_color(health), "%s", health);
11046
11047
(void) fputc('\n', stdout);
11048
print_status_reason(zhp, cbp, reason, errata, NULL);
11049
11050
if (msgid != NULL) {
11051
printf(" ");
11052
(void) printf_color(ANSI_BOLD, gettext("see:"));
11053
printf(gettext(
11054
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
11055
msgid);
11056
}
11057
11058
if (config != NULL) {
11059
uint64_t nerr;
11060
nvlist_t **spares, **l2cache;
11061
uint_t nspares, nl2cache;
11062
11063
print_scan_status(zhp, nvroot);
11064
11065
pool_removal_stat_t *prs = NULL;
11066
(void) nvlist_lookup_uint64_array(nvroot,
11067
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
11068
print_removal_status(zhp, prs);
11069
11070
pool_checkpoint_stat_t *pcs = NULL;
11071
(void) nvlist_lookup_uint64_array(nvroot,
11072
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
11073
print_checkpoint_status(pcs);
11074
11075
pool_raidz_expand_stat_t *pres = NULL;
11076
(void) nvlist_lookup_uint64_array(nvroot,
11077
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
11078
print_raidz_expand_status(zhp, pres);
11079
11080
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
11081
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
11082
if (cbp->cb_namewidth < 10)
11083
cbp->cb_namewidth = 10;
11084
11085
color_start(ANSI_BOLD);
11086
(void) printf(gettext("config:\n\n"));
11087
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
11088
cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
11089
"CKSUM");
11090
color_end();
11091
11092
if (cbp->cb_print_slow_ios) {
11093
(void) printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
11094
}
11095
11096
if (cbp->cb_print_power) {
11097
(void) printf_color(ANSI_BOLD, " %5s",
11098
gettext("POWER"));
11099
}
11100
11101
if (cbp->cb_print_dio_verify) {
11102
(void) printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
11103
}
11104
11105
if (cbp->vcdl != NULL)
11106
print_cmd_columns(cbp->vcdl, 0);
11107
11108
printf("\n");
11109
11110
print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
11111
B_FALSE, NULL);
11112
11113
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
11114
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
11115
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
11116
11117
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
11118
&l2cache, &nl2cache) == 0)
11119
print_l2cache(zhp, cbp, l2cache, nl2cache);
11120
11121
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
11122
&spares, &nspares) == 0)
11123
print_spares(zhp, cbp, spares, nspares);
11124
11125
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
11126
&nerr) == 0) {
11127
(void) printf("\n");
11128
if (nerr == 0) {
11129
(void) printf(gettext(
11130
"errors: No known data errors\n"));
11131
} else if (!cbp->cb_verbose) {
11132
color_start(ANSI_RED);
11133
(void) printf(gettext("errors: %llu data "
11134
"errors, use '-v' for a list\n"),
11135
(u_longlong_t)nerr);
11136
color_end();
11137
} else {
11138
print_error_log(zhp);
11139
}
11140
}
11141
11142
if (cbp->cb_dedup_stats)
11143
print_dedup_stats(zhp, config, cbp->cb_literal);
11144
} else {
11145
(void) printf(gettext("config: The configuration cannot be "
11146
"determined.\n"));
11147
}
11148
11149
return (0);
11150
}
11151
11152
/*
11153
* zpool status [-dDegiLpPstvx] [-c [script1,script2,...]] ...
11154
* [-j|--json [--json-flat-vdevs] [--json-int] ...
11155
* [--json-pool-key-guid]] [--power] [-T d|u] ...
11156
* [pool] [interval [count]]
11157
*
11158
* -c CMD For each vdev, run command CMD
11159
* -D Display dedup status (undocumented)
11160
* -d Display Direct I/O write verify errors
11161
* -e Display only unhealthy vdevs
11162
* -g Display guid for individual vdev name.
11163
* -i Display vdev initialization status.
11164
* -j [...] Display output in JSON format
11165
* --json-flat-vdevs Display vdevs in flat hierarchy
11166
* --json-int Display numbers in integer format instead of string
11167
* --json-pool-key-guid Use pool GUID as key for pool objects
11168
* -L Follow links when resolving vdev path name.
11169
* -P Display full path for vdev name.
11170
* -p Display values in parsable (exact) format.
11171
* --power Display vdev enclosure slot power status
11172
* -s Display slow IOs column.
11173
* -T Display a timestamp in date(1) or Unix format
11174
* -t Display vdev TRIM status.
11175
* -v Display complete error logs
11176
* -x Display only pools with potential problems
11177
*
11178
* Describes the health status of all pools or some subset.
11179
*/
11180
int
11181
zpool_do_status(int argc, char **argv)
11182
{
11183
int c;
11184
int ret;
11185
float interval = 0;
11186
unsigned long count = 0;
11187
status_cbdata_t cb = { 0 };
11188
nvlist_t *data;
11189
char *cmd = NULL;
11190
11191
struct option long_options[] = {
11192
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
11193
{"json", no_argument, NULL, 'j'},
11194
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
11195
{"json-flat-vdevs", no_argument, NULL,
11196
ZPOOL_OPTION_JSON_FLAT_VDEVS},
11197
{"json-pool-key-guid", no_argument, NULL,
11198
ZPOOL_OPTION_POOL_KEY_GUID},
11199
{0, 0, 0, 0}
11200
};
11201
11202
/* check options */
11203
while ((c = getopt_long(argc, argv, "c:jdDegiLpPstT:vx", long_options,
11204
NULL)) != -1) {
11205
switch (c) {
11206
case 'c':
11207
if (cmd != NULL) {
11208
fprintf(stderr,
11209
gettext("Can't set -c flag twice\n"));
11210
exit(1);
11211
}
11212
11213
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
11214
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
11215
fprintf(stderr, gettext(
11216
"Can't run -c, disabled by "
11217
"ZPOOL_SCRIPTS_ENABLED.\n"));
11218
exit(1);
11219
}
11220
11221
if ((getuid() <= 0 || geteuid() <= 0) &&
11222
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
11223
fprintf(stderr, gettext(
11224
"Can't run -c with root privileges "
11225
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
11226
exit(1);
11227
}
11228
cmd = optarg;
11229
break;
11230
case 'd':
11231
cb.cb_print_dio_verify = B_TRUE;
11232
break;
11233
case 'D':
11234
if (++cb.cb_dedup_stats > 2)
11235
cb.cb_dedup_stats = 2;
11236
break;
11237
case 'e':
11238
cb.cb_print_unhealthy = B_TRUE;
11239
break;
11240
case 'g':
11241
cb.cb_name_flags |= VDEV_NAME_GUID;
11242
break;
11243
case 'i':
11244
cb.cb_print_vdev_init = B_TRUE;
11245
break;
11246
case 'L':
11247
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
11248
break;
11249
case 'p':
11250
cb.cb_literal = B_TRUE;
11251
break;
11252
case 'P':
11253
cb.cb_name_flags |= VDEV_NAME_PATH;
11254
break;
11255
case 's':
11256
cb.cb_print_slow_ios = B_TRUE;
11257
break;
11258
case 't':
11259
cb.cb_print_vdev_trim = B_TRUE;
11260
break;
11261
case 'T':
11262
get_timestamp_arg(*optarg);
11263
break;
11264
case 'v':
11265
cb.cb_verbose = B_TRUE;
11266
break;
11267
case 'j':
11268
cb.cb_json = B_TRUE;
11269
break;
11270
case 'x':
11271
cb.cb_explain = B_TRUE;
11272
break;
11273
case ZPOOL_OPTION_POWER:
11274
cb.cb_print_power = B_TRUE;
11275
break;
11276
case ZPOOL_OPTION_JSON_FLAT_VDEVS:
11277
cb.cb_flat_vdevs = B_TRUE;
11278
break;
11279
case ZPOOL_OPTION_JSON_NUMS_AS_INT:
11280
cb.cb_json_as_int = B_TRUE;
11281
cb.cb_literal = B_TRUE;
11282
break;
11283
case ZPOOL_OPTION_POOL_KEY_GUID:
11284
cb.cb_json_pool_key_guid = B_TRUE;
11285
break;
11286
case '?':
11287
if (optopt == 'c') {
11288
print_zpool_script_list("status");
11289
exit(0);
11290
} else {
11291
fprintf(stderr,
11292
gettext("invalid option '%c'\n"), optopt);
11293
}
11294
usage(B_FALSE);
11295
}
11296
}
11297
11298
argc -= optind;
11299
argv += optind;
11300
11301
get_interval_count(&argc, argv, &interval, &count);
11302
11303
if (argc == 0)
11304
cb.cb_allpools = B_TRUE;
11305
11306
cb.cb_first = B_TRUE;
11307
cb.cb_print_status = B_TRUE;
11308
11309
if (cb.cb_flat_vdevs && !cb.cb_json) {
11310
fprintf(stderr, gettext("'--json-flat-vdevs' only works with"
11311
" '-j' option\n"));
11312
usage(B_FALSE);
11313
}
11314
11315
if (cb.cb_json_as_int && !cb.cb_json) {
11316
(void) fprintf(stderr, gettext("'--json-int' only works with"
11317
" '-j' option\n"));
11318
usage(B_FALSE);
11319
}
11320
11321
if (!cb.cb_json && cb.cb_json_pool_key_guid) {
11322
(void) fprintf(stderr, gettext("'json-pool-key-guid' only"
11323
" works with '-j' option\n"));
11324
usage(B_FALSE);
11325
}
11326
11327
for (;;) {
11328
if (cb.cb_json) {
11329
cb.cb_jsobj = zpool_json_schema(0, 1);
11330
data = fnvlist_alloc();
11331
fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
11332
fnvlist_free(data);
11333
}
11334
11335
if (timestamp_fmt != NODATE) {
11336
if (cb.cb_json) {
11337
if (cb.cb_json_as_int) {
11338
fnvlist_add_uint64(cb.cb_jsobj, "time",
11339
time(NULL));
11340
} else {
11341
char ts[128];
11342
get_timestamp(timestamp_fmt, ts, 128);
11343
fnvlist_add_string(cb.cb_jsobj, "time",
11344
ts);
11345
}
11346
} else
11347
print_timestamp(timestamp_fmt);
11348
}
11349
11350
if (cmd != NULL)
11351
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
11352
NULL, NULL, 0, 0);
11353
11354
if (cb.cb_json) {
11355
ret = for_each_pool(argc, argv, B_TRUE, NULL,
11356
ZFS_TYPE_POOL, cb.cb_literal,
11357
status_callback_json, &cb);
11358
} else {
11359
ret = for_each_pool(argc, argv, B_TRUE, NULL,
11360
ZFS_TYPE_POOL, cb.cb_literal,
11361
status_callback, &cb);
11362
}
11363
11364
if (cb.vcdl != NULL)
11365
free_vdev_cmd_data_list(cb.vcdl);
11366
11367
if (cb.cb_json) {
11368
if (ret == 0)
11369
zcmd_print_json(cb.cb_jsobj);
11370
else
11371
nvlist_free(cb.cb_jsobj);
11372
} else {
11373
if (argc == 0 && cb.cb_count == 0) {
11374
(void) fprintf(stderr, "%s",
11375
gettext("no pools available\n"));
11376
} else if (cb.cb_explain && cb.cb_first &&
11377
cb.cb_allpools) {
11378
(void) printf("%s",
11379
gettext("all pools are healthy\n"));
11380
}
11381
}
11382
11383
if (ret != 0)
11384
return (ret);
11385
11386
if (interval == 0)
11387
break;
11388
11389
if (count != 0 && --count == 0)
11390
break;
11391
11392
(void) fflush(stdout);
11393
(void) fsleep(interval);
11394
}
11395
11396
return (0);
11397
}
11398
11399
typedef struct upgrade_cbdata {
11400
int cb_first;
11401
int cb_argc;
11402
uint64_t cb_version;
11403
char **cb_argv;
11404
} upgrade_cbdata_t;
11405
11406
static int
11407
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
11408
{
11409
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
11410
int *count = (int *)unsupp_fs;
11411
11412
if (zfs_version > ZPL_VERSION) {
11413
(void) printf(gettext("%s (v%d) is not supported by this "
11414
"implementation of ZFS.\n"),
11415
zfs_get_name(zhp), zfs_version);
11416
(*count)++;
11417
}
11418
11419
(void) zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
11420
11421
zfs_close(zhp);
11422
11423
return (0);
11424
}
11425
11426
static int
11427
upgrade_version(zpool_handle_t *zhp, uint64_t version)
11428
{
11429
int ret;
11430
nvlist_t *config;
11431
uint64_t oldversion;
11432
int unsupp_fs = 0;
11433
11434
config = zpool_get_config(zhp, NULL);
11435
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11436
&oldversion) == 0);
11437
11438
char compat[ZFS_MAXPROPLEN];
11439
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11440
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11441
compat[0] = '\0';
11442
11443
assert(SPA_VERSION_IS_SUPPORTED(oldversion));
11444
assert(oldversion < version);
11445
11446
ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
11447
if (ret != 0)
11448
return (ret);
11449
11450
if (unsupp_fs) {
11451
(void) fprintf(stderr, gettext("Upgrade not performed due "
11452
"to %d unsupported filesystems (max v%d).\n"),
11453
unsupp_fs, (int)ZPL_VERSION);
11454
return (1);
11455
}
11456
11457
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
11458
(void) fprintf(stderr, gettext("Upgrade not performed because "
11459
"'compatibility' property set to '"
11460
ZPOOL_COMPAT_LEGACY "'.\n"));
11461
return (1);
11462
}
11463
11464
ret = zpool_upgrade(zhp, version);
11465
if (ret != 0)
11466
return (ret);
11467
11468
if (version >= SPA_VERSION_FEATURES) {
11469
(void) printf(gettext("Successfully upgraded "
11470
"'%s' from version %llu to feature flags.\n"),
11471
zpool_get_name(zhp), (u_longlong_t)oldversion);
11472
} else {
11473
(void) printf(gettext("Successfully upgraded "
11474
"'%s' from version %llu to version %llu.\n"),
11475
zpool_get_name(zhp), (u_longlong_t)oldversion,
11476
(u_longlong_t)version);
11477
}
11478
11479
return (0);
11480
}
11481
11482
static int
11483
upgrade_enable_all(zpool_handle_t *zhp, int *countp)
11484
{
11485
int i, ret, count;
11486
boolean_t firstff = B_TRUE;
11487
nvlist_t *enabled = zpool_get_features(zhp);
11488
11489
char compat[ZFS_MAXPROPLEN];
11490
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
11491
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
11492
compat[0] = '\0';
11493
11494
boolean_t requested_features[SPA_FEATURES];
11495
if (zpool_do_load_compat(compat, requested_features) !=
11496
ZPOOL_COMPATIBILITY_OK)
11497
return (-1);
11498
11499
count = 0;
11500
for (i = 0; i < SPA_FEATURES; i++) {
11501
const char *fname = spa_feature_table[i].fi_uname;
11502
const char *fguid = spa_feature_table[i].fi_guid;
11503
11504
if (!spa_feature_table[i].fi_zfs_mod_supported ||
11505
(spa_feature_table[i].fi_flags & ZFEATURE_FLAG_NO_UPGRADE))
11506
continue;
11507
11508
if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
11509
char *propname;
11510
verify(-1 != asprintf(&propname, "feature@%s", fname));
11511
ret = zpool_set_prop(zhp, propname,
11512
ZFS_FEATURE_ENABLED);
11513
if (ret != 0) {
11514
free(propname);
11515
return (ret);
11516
}
11517
count++;
11518
11519
if (firstff) {
11520
(void) printf(gettext("Enabled the "
11521
"following features on '%s':\n"),
11522
zpool_get_name(zhp));
11523
firstff = B_FALSE;
11524
}
11525
(void) printf(gettext(" %s\n"), fname);
11526
free(propname);
11527
}
11528
}
11529
11530
if (countp != NULL)
11531
*countp = count;
11532
return (0);
11533
}
11534
11535
static int
11536
upgrade_cb(zpool_handle_t *zhp, void *arg)
11537
{
11538
upgrade_cbdata_t *cbp = arg;
11539
nvlist_t *config;
11540
uint64_t version;
11541
boolean_t modified_pool = B_FALSE;
11542
int ret;
11543
11544
config = zpool_get_config(zhp, NULL);
11545
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11546
&version) == 0);
11547
11548
assert(SPA_VERSION_IS_SUPPORTED(version));
11549
11550
if (version < cbp->cb_version) {
11551
cbp->cb_first = B_FALSE;
11552
ret = upgrade_version(zhp, cbp->cb_version);
11553
if (ret != 0)
11554
return (ret);
11555
modified_pool = B_TRUE;
11556
11557
/*
11558
* If they did "zpool upgrade -a", then we could
11559
* be doing ioctls to different pools. We need
11560
* to log this history once to each pool, and bypass
11561
* the normal history logging that happens in main().
11562
*/
11563
(void) zpool_log_history(g_zfs, history_str);
11564
log_history = B_FALSE;
11565
}
11566
11567
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11568
int count;
11569
ret = upgrade_enable_all(zhp, &count);
11570
if (ret != 0)
11571
return (ret);
11572
11573
if (count > 0) {
11574
cbp->cb_first = B_FALSE;
11575
modified_pool = B_TRUE;
11576
}
11577
}
11578
11579
if (modified_pool) {
11580
(void) printf("\n");
11581
(void) after_zpool_upgrade(zhp);
11582
}
11583
11584
return (0);
11585
}
11586
11587
static int
11588
upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
11589
{
11590
upgrade_cbdata_t *cbp = arg;
11591
nvlist_t *config;
11592
uint64_t version;
11593
11594
config = zpool_get_config(zhp, NULL);
11595
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11596
&version) == 0);
11597
11598
assert(SPA_VERSION_IS_SUPPORTED(version));
11599
11600
if (version < SPA_VERSION_FEATURES) {
11601
if (cbp->cb_first) {
11602
(void) printf(gettext("The following pools are "
11603
"formatted with legacy version numbers and can\n"
11604
"be upgraded to use feature flags. After "
11605
"being upgraded, these pools\nwill no "
11606
"longer be accessible by software that does not "
11607
"support feature\nflags.\n\n"
11608
"Note that setting a pool's 'compatibility' "
11609
"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
11610
"inhibit upgrades.\n\n"));
11611
(void) printf(gettext("VER POOL\n"));
11612
(void) printf(gettext("--- ------------\n"));
11613
cbp->cb_first = B_FALSE;
11614
}
11615
11616
(void) printf("%2llu %s\n", (u_longlong_t)version,
11617
zpool_get_name(zhp));
11618
}
11619
11620
return (0);
11621
}
11622
11623
static int
11624
upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
11625
{
11626
upgrade_cbdata_t *cbp = arg;
11627
nvlist_t *config;
11628
uint64_t version;
11629
11630
config = zpool_get_config(zhp, NULL);
11631
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
11632
&version) == 0);
11633
11634
if (version >= SPA_VERSION_FEATURES) {
11635
int i;
11636
boolean_t poolfirst = B_TRUE;
11637
nvlist_t *enabled = zpool_get_features(zhp);
11638
11639
for (i = 0; i < SPA_FEATURES; i++) {
11640
const char *fguid = spa_feature_table[i].fi_guid;
11641
const char *fname = spa_feature_table[i].fi_uname;
11642
11643
if (!spa_feature_table[i].fi_zfs_mod_supported)
11644
continue;
11645
11646
if (!nvlist_exists(enabled, fguid)) {
11647
if (cbp->cb_first) {
11648
(void) printf(gettext("\nSome "
11649
"supported features are not "
11650
"enabled on the following pools. "
11651
"Once a\nfeature is enabled the "
11652
"pool may become incompatible with "
11653
"software\nthat does not support "
11654
"the feature. See "
11655
"zpool-features(7) for "
11656
"details.\n\n"
11657
"Note that the pool "
11658
"'compatibility' feature can be "
11659
"used to inhibit\nfeature "
11660
"upgrades.\n\n"
11661
"Features marked with (*) are not "
11662
"applied automatically on upgrade, "
11663
"and\nmust be applied explicitly "
11664
"with zpool-set(7).\n\n"));
11665
(void) printf(gettext("POOL "
11666
"FEATURE\n"));
11667
(void) printf(gettext("------"
11668
"---------\n"));
11669
cbp->cb_first = B_FALSE;
11670
}
11671
11672
if (poolfirst) {
11673
(void) printf(gettext("%s\n"),
11674
zpool_get_name(zhp));
11675
poolfirst = B_FALSE;
11676
}
11677
11678
(void) printf(gettext(" %s%s\n"), fname,
11679
spa_feature_table[i].fi_flags &
11680
ZFEATURE_FLAG_NO_UPGRADE ? "(*)" : "");
11681
}
11682
/*
11683
* If they did "zpool upgrade -a", then we could
11684
* be doing ioctls to different pools. We need
11685
* to log this history once to each pool, and bypass
11686
* the normal history logging that happens in main().
11687
*/
11688
(void) zpool_log_history(g_zfs, history_str);
11689
log_history = B_FALSE;
11690
}
11691
}
11692
11693
return (0);
11694
}
11695
11696
static int
11697
upgrade_one(zpool_handle_t *zhp, void *data)
11698
{
11699
boolean_t modified_pool = B_FALSE;
11700
upgrade_cbdata_t *cbp = data;
11701
uint64_t cur_version;
11702
int ret;
11703
11704
if (strcmp("log", zpool_get_name(zhp)) == 0) {
11705
(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
11706
"Pool 'log' must be renamed using export and import"
11707
" to upgrade.\n"));
11708
return (1);
11709
}
11710
11711
cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
11712
if (cur_version > cbp->cb_version) {
11713
(void) printf(gettext("Pool '%s' is already formatted "
11714
"using more current version '%llu'.\n\n"),
11715
zpool_get_name(zhp), (u_longlong_t)cur_version);
11716
return (0);
11717
}
11718
11719
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
11720
(void) printf(gettext("Pool '%s' is already formatted "
11721
"using version %llu.\n\n"), zpool_get_name(zhp),
11722
(u_longlong_t)cbp->cb_version);
11723
return (0);
11724
}
11725
11726
if (cur_version != cbp->cb_version) {
11727
modified_pool = B_TRUE;
11728
ret = upgrade_version(zhp, cbp->cb_version);
11729
if (ret != 0)
11730
return (ret);
11731
}
11732
11733
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
11734
int count = 0;
11735
ret = upgrade_enable_all(zhp, &count);
11736
if (ret != 0)
11737
return (ret);
11738
11739
if (count != 0) {
11740
modified_pool = B_TRUE;
11741
} else if (cur_version == SPA_VERSION) {
11742
(void) printf(gettext("Pool '%s' already has all "
11743
"supported and requested features enabled.\n"),
11744
zpool_get_name(zhp));
11745
}
11746
}
11747
11748
if (modified_pool) {
11749
(void) printf("\n");
11750
(void) after_zpool_upgrade(zhp);
11751
}
11752
11753
return (0);
11754
}
11755
11756
/*
11757
* zpool upgrade
11758
* zpool upgrade -v
11759
* zpool upgrade [-V version] <-a | pool ...>
11760
*
11761
* With no arguments, display downrev'd ZFS pool available for upgrade.
11762
* Individual pools can be upgraded by specifying the pool, and '-a' will
11763
* upgrade all pools.
11764
*/
11765
int
11766
zpool_do_upgrade(int argc, char **argv)
11767
{
11768
int c;
11769
upgrade_cbdata_t cb = { 0 };
11770
int ret = 0;
11771
boolean_t showversions = B_FALSE;
11772
boolean_t upgradeall = B_FALSE;
11773
char *end;
11774
11775
11776
/* check options */
11777
while ((c = getopt(argc, argv, ":avV:")) != -1) {
11778
switch (c) {
11779
case 'a':
11780
upgradeall = B_TRUE;
11781
break;
11782
case 'v':
11783
showversions = B_TRUE;
11784
break;
11785
case 'V':
11786
cb.cb_version = strtoll(optarg, &end, 10);
11787
if (*end != '\0' ||
11788
!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
11789
(void) fprintf(stderr,
11790
gettext("invalid version '%s'\n"), optarg);
11791
usage(B_FALSE);
11792
}
11793
break;
11794
case ':':
11795
(void) fprintf(stderr, gettext("missing argument for "
11796
"'%c' option\n"), optopt);
11797
usage(B_FALSE);
11798
break;
11799
case '?':
11800
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
11801
optopt);
11802
usage(B_FALSE);
11803
}
11804
}
11805
11806
cb.cb_argc = argc;
11807
cb.cb_argv = argv;
11808
argc -= optind;
11809
argv += optind;
11810
11811
if (cb.cb_version == 0) {
11812
cb.cb_version = SPA_VERSION;
11813
} else if (!upgradeall && argc == 0) {
11814
(void) fprintf(stderr, gettext("-V option is "
11815
"incompatible with other arguments\n"));
11816
usage(B_FALSE);
11817
}
11818
11819
if (showversions) {
11820
if (upgradeall || argc != 0) {
11821
(void) fprintf(stderr, gettext("-v option is "
11822
"incompatible with other arguments\n"));
11823
usage(B_FALSE);
11824
}
11825
} else if (upgradeall) {
11826
if (argc != 0) {
11827
(void) fprintf(stderr, gettext("-a option should not "
11828
"be used along with a pool name\n"));
11829
usage(B_FALSE);
11830
}
11831
}
11832
11833
(void) printf("%s", gettext("This system supports ZFS pool feature "
11834
"flags.\n\n"));
11835
if (showversions) {
11836
int i;
11837
11838
(void) printf(gettext("The following features are "
11839
"supported:\n\n"));
11840
(void) printf(gettext("FEAT DESCRIPTION\n"));
11841
(void) printf("----------------------------------------------"
11842
"---------------\n");
11843
for (i = 0; i < SPA_FEATURES; i++) {
11844
zfeature_info_t *fi = &spa_feature_table[i];
11845
if (!fi->fi_zfs_mod_supported)
11846
continue;
11847
const char *ro =
11848
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
11849
" (read-only compatible)" : "";
11850
11851
(void) printf("%-37s%s\n", fi->fi_uname, ro);
11852
(void) printf(" %s\n", fi->fi_desc);
11853
}
11854
(void) printf("\n");
11855
11856
(void) printf(gettext("The following legacy versions are also "
11857
"supported:\n\n"));
11858
(void) printf(gettext("VER DESCRIPTION\n"));
11859
(void) printf("--- -----------------------------------------"
11860
"---------------\n");
11861
(void) printf(gettext(" 1 Initial ZFS version\n"));
11862
(void) printf(gettext(" 2 Ditto blocks "
11863
"(replicated metadata)\n"));
11864
(void) printf(gettext(" 3 Hot spares and double parity "
11865
"RAID-Z\n"));
11866
(void) printf(gettext(" 4 zpool history\n"));
11867
(void) printf(gettext(" 5 Compression using the gzip "
11868
"algorithm\n"));
11869
(void) printf(gettext(" 6 bootfs pool property\n"));
11870
(void) printf(gettext(" 7 Separate intent log devices\n"));
11871
(void) printf(gettext(" 8 Delegated administration\n"));
11872
(void) printf(gettext(" 9 refquota and refreservation "
11873
"properties\n"));
11874
(void) printf(gettext(" 10 Cache devices\n"));
11875
(void) printf(gettext(" 11 Improved scrub performance\n"));
11876
(void) printf(gettext(" 12 Snapshot properties\n"));
11877
(void) printf(gettext(" 13 snapused property\n"));
11878
(void) printf(gettext(" 14 passthrough-x aclinherit\n"));
11879
(void) printf(gettext(" 15 user/group space accounting\n"));
11880
(void) printf(gettext(" 16 stmf property support\n"));
11881
(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
11882
(void) printf(gettext(" 18 Snapshot user holds\n"));
11883
(void) printf(gettext(" 19 Log device removal\n"));
11884
(void) printf(gettext(" 20 Compression using zle "
11885
"(zero-length encoding)\n"));
11886
(void) printf(gettext(" 21 Deduplication\n"));
11887
(void) printf(gettext(" 22 Received properties\n"));
11888
(void) printf(gettext(" 23 Slim ZIL\n"));
11889
(void) printf(gettext(" 24 System attributes\n"));
11890
(void) printf(gettext(" 25 Improved scrub stats\n"));
11891
(void) printf(gettext(" 26 Improved snapshot deletion "
11892
"performance\n"));
11893
(void) printf(gettext(" 27 Improved snapshot creation "
11894
"performance\n"));
11895
(void) printf(gettext(" 28 Multiple vdev replacements\n"));
11896
(void) printf(gettext("\nFor more information on a particular "
11897
"version, including supported releases,\n"));
11898
(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
11899
} else if (argc == 0 && upgradeall) {
11900
cb.cb_first = B_TRUE;
11901
ret = zpool_iter(g_zfs, upgrade_cb, &cb);
11902
if (ret == 0 && cb.cb_first) {
11903
if (cb.cb_version == SPA_VERSION) {
11904
(void) printf(gettext("All pools are already "
11905
"formatted using feature flags.\n\n"));
11906
(void) printf(gettext("Every feature flags "
11907
"pool already has all supported and "
11908
"requested features enabled.\n"));
11909
} else {
11910
(void) printf(gettext("All pools are already "
11911
"formatted with version %llu or higher.\n"),
11912
(u_longlong_t)cb.cb_version);
11913
}
11914
}
11915
} else if (argc == 0) {
11916
cb.cb_first = B_TRUE;
11917
ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
11918
assert(ret == 0);
11919
11920
if (cb.cb_first) {
11921
(void) printf(gettext("All pools are formatted "
11922
"using feature flags.\n\n"));
11923
} else {
11924
(void) printf(gettext("\nUse 'zpool upgrade -v' "
11925
"for a list of available legacy versions.\n"));
11926
}
11927
11928
cb.cb_first = B_TRUE;
11929
ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
11930
assert(ret == 0);
11931
11932
if (cb.cb_first) {
11933
(void) printf(gettext("Every feature flags pool has "
11934
"all supported and requested features enabled.\n"));
11935
} else {
11936
(void) printf(gettext("\n"));
11937
}
11938
} else {
11939
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
11940
B_FALSE, upgrade_one, &cb);
11941
}
11942
11943
return (ret);
11944
}
11945
11946
typedef struct hist_cbdata {
11947
boolean_t first;
11948
boolean_t longfmt;
11949
boolean_t internal;
11950
} hist_cbdata_t;
11951
11952
static void
11953
print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
11954
{
11955
nvlist_t **records;
11956
uint_t numrecords;
11957
int i;
11958
11959
verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
11960
&records, &numrecords) == 0);
11961
for (i = 0; i < numrecords; i++) {
11962
nvlist_t *rec = records[i];
11963
char tbuf[64] = "";
11964
11965
if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
11966
time_t tsec;
11967
struct tm t;
11968
11969
tsec = fnvlist_lookup_uint64(records[i],
11970
ZPOOL_HIST_TIME);
11971
(void) localtime_r(&tsec, &t);
11972
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
11973
}
11974
11975
if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
11976
uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
11977
ZPOOL_HIST_ELAPSED_NS);
11978
(void) snprintf(tbuf + strlen(tbuf),
11979
sizeof (tbuf) - strlen(tbuf),
11980
" (%lldms)", (long long)elapsed_ns / 1000 / 1000);
11981
}
11982
11983
if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
11984
(void) printf("%s %s", tbuf,
11985
fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
11986
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
11987
int ievent =
11988
fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
11989
if (!cb->internal)
11990
continue;
11991
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
11992
(void) printf("%s unrecognized record:\n",
11993
tbuf);
11994
dump_nvlist(rec, 4);
11995
continue;
11996
}
11997
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
11998
zfs_history_event_names[ievent],
11999
(longlong_t)fnvlist_lookup_uint64(
12000
rec, ZPOOL_HIST_TXG),
12001
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
12002
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
12003
if (!cb->internal)
12004
continue;
12005
(void) printf("%s [txg:%lld] %s", tbuf,
12006
(longlong_t)fnvlist_lookup_uint64(
12007
rec, ZPOOL_HIST_TXG),
12008
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
12009
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
12010
(void) printf(" %s (%llu)",
12011
fnvlist_lookup_string(rec,
12012
ZPOOL_HIST_DSNAME),
12013
(u_longlong_t)fnvlist_lookup_uint64(rec,
12014
ZPOOL_HIST_DSID));
12015
}
12016
(void) printf(" %s", fnvlist_lookup_string(rec,
12017
ZPOOL_HIST_INT_STR));
12018
} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
12019
if (!cb->internal)
12020
continue;
12021
(void) printf("%s ioctl %s\n", tbuf,
12022
fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
12023
if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
12024
(void) printf(" input:\n");
12025
dump_nvlist(fnvlist_lookup_nvlist(rec,
12026
ZPOOL_HIST_INPUT_NVL), 8);
12027
}
12028
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
12029
(void) printf(" output:\n");
12030
dump_nvlist(fnvlist_lookup_nvlist(rec,
12031
ZPOOL_HIST_OUTPUT_NVL), 8);
12032
}
12033
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
12034
(void) printf(" output nvlist omitted; "
12035
"original size: %lldKB\n",
12036
(longlong_t)fnvlist_lookup_int64(rec,
12037
ZPOOL_HIST_OUTPUT_SIZE) / 1024);
12038
}
12039
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
12040
(void) printf(" errno: %lld\n",
12041
(longlong_t)fnvlist_lookup_int64(rec,
12042
ZPOOL_HIST_ERRNO));
12043
}
12044
} else {
12045
if (!cb->internal)
12046
continue;
12047
(void) printf("%s unrecognized record:\n", tbuf);
12048
dump_nvlist(rec, 4);
12049
}
12050
12051
if (!cb->longfmt) {
12052
(void) printf("\n");
12053
continue;
12054
}
12055
(void) printf(" [");
12056
if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
12057
uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
12058
struct passwd *pwd = getpwuid(who);
12059
(void) printf("user %d ", (int)who);
12060
if (pwd != NULL)
12061
(void) printf("(%s) ", pwd->pw_name);
12062
}
12063
if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
12064
(void) printf("on %s",
12065
fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
12066
}
12067
if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
12068
(void) printf(":%s",
12069
fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
12070
}
12071
12072
(void) printf("]");
12073
(void) printf("\n");
12074
}
12075
}
12076
12077
/*
12078
* Print out the command history for a specific pool.
12079
*/
12080
static int
12081
get_history_one(zpool_handle_t *zhp, void *data)
12082
{
12083
nvlist_t *nvhis;
12084
int ret;
12085
hist_cbdata_t *cb = (hist_cbdata_t *)data;
12086
uint64_t off = 0;
12087
boolean_t eof = B_FALSE;
12088
12089
cb->first = B_FALSE;
12090
12091
(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
12092
12093
while (!eof) {
12094
if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
12095
return (ret);
12096
12097
print_history_records(nvhis, cb);
12098
nvlist_free(nvhis);
12099
}
12100
(void) printf("\n");
12101
12102
return (ret);
12103
}
12104
12105
/*
12106
* zpool history <pool>
12107
*
12108
* Displays the history of commands that modified pools.
12109
*/
12110
int
12111
zpool_do_history(int argc, char **argv)
12112
{
12113
hist_cbdata_t cbdata = { 0 };
12114
int ret;
12115
int c;
12116
12117
cbdata.first = B_TRUE;
12118
/* check options */
12119
while ((c = getopt(argc, argv, "li")) != -1) {
12120
switch (c) {
12121
case 'l':
12122
cbdata.longfmt = B_TRUE;
12123
break;
12124
case 'i':
12125
cbdata.internal = B_TRUE;
12126
break;
12127
case '?':
12128
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
12129
optopt);
12130
usage(B_FALSE);
12131
}
12132
}
12133
argc -= optind;
12134
argv += optind;
12135
12136
ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
12137
B_FALSE, get_history_one, &cbdata);
12138
12139
if (argc == 0 && cbdata.first == B_TRUE) {
12140
(void) fprintf(stderr, gettext("no pools available\n"));
12141
return (0);
12142
}
12143
12144
return (ret);
12145
}
12146
12147
typedef struct ev_opts {
12148
int verbose;
12149
int scripted;
12150
int follow;
12151
int clear;
12152
char poolname[ZFS_MAX_DATASET_NAME_LEN];
12153
} ev_opts_t;
12154
12155
static void
12156
zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
12157
{
12158
char ctime_str[26], str[32];
12159
const char *ptr;
12160
int64_t *tv;
12161
uint_t n;
12162
12163
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
12164
memset(str, ' ', 32);
12165
(void) ctime_r((const time_t *)&tv[0], ctime_str);
12166
(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
12167
(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
12168
(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
12169
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
12170
if (opts->scripted)
12171
(void) printf(gettext("%s\t"), str);
12172
else
12173
(void) printf(gettext("%s "), str);
12174
12175
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
12176
(void) printf(gettext("%s\n"), ptr);
12177
}
12178
12179
static void
12180
zpool_do_events_nvprint(nvlist_t *nvl, int depth)
12181
{
12182
nvpair_t *nvp;
12183
static char flagstr[256];
12184
12185
for (nvp = nvlist_next_nvpair(nvl, NULL);
12186
nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
12187
12188
data_type_t type = nvpair_type(nvp);
12189
const char *name = nvpair_name(nvp);
12190
12191
boolean_t b;
12192
uint8_t i8;
12193
uint16_t i16;
12194
uint32_t i32;
12195
uint64_t i64;
12196
const char *str;
12197
nvlist_t *cnv;
12198
12199
printf(gettext("%*s%s = "), depth, "", name);
12200
12201
switch (type) {
12202
case DATA_TYPE_BOOLEAN:
12203
printf(gettext("%s"), "1");
12204
break;
12205
12206
case DATA_TYPE_BOOLEAN_VALUE:
12207
(void) nvpair_value_boolean_value(nvp, &b);
12208
printf(gettext("%s"), b ? "1" : "0");
12209
break;
12210
12211
case DATA_TYPE_BYTE:
12212
(void) nvpair_value_byte(nvp, &i8);
12213
printf(gettext("0x%x"), i8);
12214
break;
12215
12216
case DATA_TYPE_INT8:
12217
(void) nvpair_value_int8(nvp, (void *)&i8);
12218
printf(gettext("0x%x"), i8);
12219
break;
12220
12221
case DATA_TYPE_UINT8:
12222
(void) nvpair_value_uint8(nvp, &i8);
12223
printf(gettext("0x%x"), i8);
12224
break;
12225
12226
case DATA_TYPE_INT16:
12227
(void) nvpair_value_int16(nvp, (void *)&i16);
12228
printf(gettext("0x%x"), i16);
12229
break;
12230
12231
case DATA_TYPE_UINT16:
12232
(void) nvpair_value_uint16(nvp, &i16);
12233
printf(gettext("0x%x"), i16);
12234
break;
12235
12236
case DATA_TYPE_INT32:
12237
(void) nvpair_value_int32(nvp, (void *)&i32);
12238
printf(gettext("0x%x"), i32);
12239
break;
12240
12241
case DATA_TYPE_UINT32:
12242
(void) nvpair_value_uint32(nvp, &i32);
12243
if (strcmp(name,
12244
FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||
12245
strcmp(name,
12246
FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {
12247
(void) zfs_valstr_zio_stage(i32, flagstr,
12248
sizeof (flagstr));
12249
printf(gettext("0x%x [%s]"), i32, flagstr);
12250
} else if (strcmp(name,
12251
FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) {
12252
(void) zfs_valstr_zio_type(i32, flagstr,
12253
sizeof (flagstr));
12254
printf(gettext("0x%x [%s]"), i32, flagstr);
12255
} else if (strcmp(name,
12256
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
12257
(void) zfs_valstr_zio_priority(i32, flagstr,
12258
sizeof (flagstr));
12259
printf(gettext("0x%x [%s]"), i32, flagstr);
12260
} else {
12261
printf(gettext("0x%x"), i32);
12262
}
12263
break;
12264
12265
case DATA_TYPE_INT64:
12266
(void) nvpair_value_int64(nvp, (void *)&i64);
12267
printf(gettext("0x%llx"), (u_longlong_t)i64);
12268
break;
12269
12270
case DATA_TYPE_UINT64:
12271
(void) nvpair_value_uint64(nvp, &i64);
12272
/*
12273
* translate vdev state values to readable
12274
* strings to aide zpool events consumers
12275
*/
12276
if (strcmp(name,
12277
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
12278
strcmp(name,
12279
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
12280
printf(gettext("\"%s\" (0x%llx)"),
12281
zpool_state_to_name(i64, VDEV_AUX_NONE),
12282
(u_longlong_t)i64);
12283
} else if (strcmp(name,
12284
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {
12285
(void) zfs_valstr_zio_flag(i64, flagstr,
12286
sizeof (flagstr));
12287
printf(gettext("0x%llx [%s]"),
12288
(u_longlong_t)i64, flagstr);
12289
} else {
12290
printf(gettext("0x%llx"), (u_longlong_t)i64);
12291
}
12292
break;
12293
12294
case DATA_TYPE_HRTIME:
12295
(void) nvpair_value_hrtime(nvp, (void *)&i64);
12296
printf(gettext("0x%llx"), (u_longlong_t)i64);
12297
break;
12298
12299
case DATA_TYPE_STRING:
12300
(void) nvpair_value_string(nvp, &str);
12301
printf(gettext("\"%s\""), str ? str : "<NULL>");
12302
break;
12303
12304
case DATA_TYPE_NVLIST:
12305
printf(gettext("(embedded nvlist)\n"));
12306
(void) nvpair_value_nvlist(nvp, &cnv);
12307
zpool_do_events_nvprint(cnv, depth + 8);
12308
printf(gettext("%*s(end %s)"), depth, "", name);
12309
break;
12310
12311
case DATA_TYPE_NVLIST_ARRAY: {
12312
nvlist_t **val;
12313
uint_t i, nelem;
12314
12315
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
12316
printf(gettext("(%d embedded nvlists)\n"), nelem);
12317
for (i = 0; i < nelem; i++) {
12318
printf(gettext("%*s%s[%d] = %s\n"),
12319
depth, "", name, i, "(embedded nvlist)");
12320
zpool_do_events_nvprint(val[i], depth + 8);
12321
printf(gettext("%*s(end %s[%i])\n"),
12322
depth, "", name, i);
12323
}
12324
printf(gettext("%*s(end %s)\n"), depth, "", name);
12325
}
12326
break;
12327
12328
case DATA_TYPE_INT8_ARRAY: {
12329
int8_t *val;
12330
uint_t i, nelem;
12331
12332
(void) nvpair_value_int8_array(nvp, &val, &nelem);
12333
for (i = 0; i < nelem; i++)
12334
printf(gettext("0x%x "), val[i]);
12335
12336
break;
12337
}
12338
12339
case DATA_TYPE_UINT8_ARRAY: {
12340
uint8_t *val;
12341
uint_t i, nelem;
12342
12343
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
12344
for (i = 0; i < nelem; i++)
12345
printf(gettext("0x%x "), val[i]);
12346
12347
break;
12348
}
12349
12350
case DATA_TYPE_INT16_ARRAY: {
12351
int16_t *val;
12352
uint_t i, nelem;
12353
12354
(void) nvpair_value_int16_array(nvp, &val, &nelem);
12355
for (i = 0; i < nelem; i++)
12356
printf(gettext("0x%x "), val[i]);
12357
12358
break;
12359
}
12360
12361
case DATA_TYPE_UINT16_ARRAY: {
12362
uint16_t *val;
12363
uint_t i, nelem;
12364
12365
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
12366
for (i = 0; i < nelem; i++)
12367
printf(gettext("0x%x "), val[i]);
12368
12369
break;
12370
}
12371
12372
case DATA_TYPE_INT32_ARRAY: {
12373
int32_t *val;
12374
uint_t i, nelem;
12375
12376
(void) nvpair_value_int32_array(nvp, &val, &nelem);
12377
for (i = 0; i < nelem; i++)
12378
printf(gettext("0x%x "), val[i]);
12379
12380
break;
12381
}
12382
12383
case DATA_TYPE_UINT32_ARRAY: {
12384
uint32_t *val;
12385
uint_t i, nelem;
12386
12387
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
12388
for (i = 0; i < nelem; i++)
12389
printf(gettext("0x%x "), val[i]);
12390
12391
break;
12392
}
12393
12394
case DATA_TYPE_INT64_ARRAY: {
12395
int64_t *val;
12396
uint_t i, nelem;
12397
12398
(void) nvpair_value_int64_array(nvp, &val, &nelem);
12399
for (i = 0; i < nelem; i++)
12400
printf(gettext("0x%llx "),
12401
(u_longlong_t)val[i]);
12402
12403
break;
12404
}
12405
12406
case DATA_TYPE_UINT64_ARRAY: {
12407
uint64_t *val;
12408
uint_t i, nelem;
12409
12410
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
12411
for (i = 0; i < nelem; i++)
12412
printf(gettext("0x%llx "),
12413
(u_longlong_t)val[i]);
12414
12415
break;
12416
}
12417
12418
case DATA_TYPE_STRING_ARRAY: {
12419
const char **str;
12420
uint_t i, nelem;
12421
12422
(void) nvpair_value_string_array(nvp, &str, &nelem);
12423
for (i = 0; i < nelem; i++)
12424
printf(gettext("\"%s\" "),
12425
str[i] ? str[i] : "<NULL>");
12426
12427
break;
12428
}
12429
12430
case DATA_TYPE_BOOLEAN_ARRAY:
12431
case DATA_TYPE_BYTE_ARRAY:
12432
case DATA_TYPE_DOUBLE:
12433
case DATA_TYPE_DONTCARE:
12434
case DATA_TYPE_UNKNOWN:
12435
printf(gettext("<unknown>"));
12436
break;
12437
}
12438
12439
printf(gettext("\n"));
12440
}
12441
}
12442
12443
static int
12444
zpool_do_events_next(ev_opts_t *opts)
12445
{
12446
nvlist_t *nvl;
12447
int zevent_fd, ret, dropped;
12448
const char *pool;
12449
12450
zevent_fd = open(ZFS_DEV, O_RDWR);
12451
VERIFY(zevent_fd >= 0);
12452
12453
if (!opts->scripted)
12454
(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
12455
12456
while (1) {
12457
ret = zpool_events_next(g_zfs, &nvl, &dropped,
12458
(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
12459
if (ret || nvl == NULL)
12460
break;
12461
12462
if (dropped > 0)
12463
(void) printf(gettext("dropped %d events\n"), dropped);
12464
12465
if (strlen(opts->poolname) > 0 &&
12466
nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
12467
strcmp(opts->poolname, pool) != 0)
12468
continue;
12469
12470
zpool_do_events_short(nvl, opts);
12471
12472
if (opts->verbose) {
12473
zpool_do_events_nvprint(nvl, 8);
12474
printf(gettext("\n"));
12475
}
12476
(void) fflush(stdout);
12477
12478
nvlist_free(nvl);
12479
}
12480
12481
VERIFY0(close(zevent_fd));
12482
12483
return (ret);
12484
}
12485
12486
static int
12487
zpool_do_events_clear(void)
12488
{
12489
int count, ret;
12490
12491
ret = zpool_events_clear(g_zfs, &count);
12492
if (!ret)
12493
(void) printf(gettext("cleared %d events\n"), count);
12494
12495
return (ret);
12496
}
12497
12498
/*
12499
* zpool events [-vHf [pool] | -c]
12500
*
12501
* Displays events logs by ZFS.
12502
*/
12503
int
12504
zpool_do_events(int argc, char **argv)
12505
{
12506
ev_opts_t opts = { 0 };
12507
int ret;
12508
int c;
12509
12510
/* check options */
12511
while ((c = getopt(argc, argv, "vHfc")) != -1) {
12512
switch (c) {
12513
case 'v':
12514
opts.verbose = 1;
12515
break;
12516
case 'H':
12517
opts.scripted = 1;
12518
break;
12519
case 'f':
12520
opts.follow = 1;
12521
break;
12522
case 'c':
12523
opts.clear = 1;
12524
break;
12525
case '?':
12526
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
12527
optopt);
12528
usage(B_FALSE);
12529
}
12530
}
12531
argc -= optind;
12532
argv += optind;
12533
12534
if (argc > 1) {
12535
(void) fprintf(stderr, gettext("too many arguments\n"));
12536
usage(B_FALSE);
12537
} else if (argc == 1) {
12538
(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
12539
if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
12540
(void) fprintf(stderr,
12541
gettext("invalid pool name '%s'\n"), opts.poolname);
12542
usage(B_FALSE);
12543
}
12544
}
12545
12546
if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
12547
opts.clear) {
12548
(void) fprintf(stderr,
12549
gettext("invalid options combined with -c\n"));
12550
usage(B_FALSE);
12551
}
12552
12553
if (opts.clear)
12554
ret = zpool_do_events_clear();
12555
else
12556
ret = zpool_do_events_next(&opts);
12557
12558
return (ret);
12559
}
12560
12561
static int
12562
get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
12563
{
12564
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12565
char value[ZFS_MAXPROPLEN];
12566
zprop_source_t srctype;
12567
nvlist_t *props, *item, *d;
12568
props = item = d = NULL;
12569
12570
if (cbp->cb_json) {
12571
d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "vdevs");
12572
if (d == NULL) {
12573
fprintf(stderr, "vdevs obj not found.\n");
12574
exit(1);
12575
}
12576
props = fnvlist_alloc();
12577
}
12578
12579
for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
12580
pl = pl->pl_next) {
12581
char *prop_name;
12582
/*
12583
* If the first property is pool name, it is a special
12584
* placeholder that we can skip. This will also skip
12585
* over the name property when 'all' is specified.
12586
*/
12587
if (pl->pl_prop == ZPOOL_PROP_NAME &&
12588
pl == cbp->cb_proplist)
12589
continue;
12590
12591
if (pl->pl_prop == ZPROP_INVAL) {
12592
prop_name = pl->pl_user_prop;
12593
} else {
12594
prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
12595
}
12596
if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
12597
prop_name, value, sizeof (value), &srctype,
12598
cbp->cb_literal) == 0) {
12599
(void) zprop_collect_property(vdevname, cbp, prop_name,
12600
value, srctype, NULL, NULL, props);
12601
}
12602
}
12603
12604
if (cbp->cb_json) {
12605
if (!nvlist_empty(props)) {
12606
item = fnvlist_alloc();
12607
fill_vdev_info(item, zhp, vdevname, B_TRUE,
12608
cbp->cb_json_as_int);
12609
fnvlist_add_nvlist(item, "properties", props);
12610
fnvlist_add_nvlist(d, vdevname, item);
12611
fnvlist_add_nvlist(cbp->cb_jsobj, "vdevs", d);
12612
fnvlist_free(item);
12613
}
12614
fnvlist_free(props);
12615
}
12616
12617
return (0);
12618
}
12619
12620
static int
12621
get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
12622
{
12623
zpool_handle_t *zhp = zhp_data;
12624
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12625
char *vdevname;
12626
const char *type;
12627
int ret;
12628
12629
/*
12630
* zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
12631
* pool name for display purposes, which is not desired. Fallback to
12632
* zpool_vdev_name() when not dealing with the root vdev.
12633
*/
12634
type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
12635
if (zhp != NULL && strcmp(type, "root") == 0)
12636
vdevname = strdup("root-0");
12637
else
12638
vdevname = zpool_vdev_name(g_zfs, zhp, nv,
12639
cbp->cb_vdevs.cb_name_flags);
12640
12641
(void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
12642
12643
ret = get_callback_vdev(zhp, vdevname, data);
12644
12645
free(vdevname);
12646
12647
return (ret);
12648
}
12649
12650
static int
12651
get_callback(zpool_handle_t *zhp, void *data)
12652
{
12653
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
12654
char value[ZFS_MAXPROPLEN];
12655
zprop_source_t srctype;
12656
zprop_list_t *pl;
12657
int vid;
12658
int err = 0;
12659
nvlist_t *props, *item, *d;
12660
props = item = d = NULL;
12661
12662
if (cbp->cb_type == ZFS_TYPE_VDEV) {
12663
if (cbp->cb_json) {
12664
nvlist_t *pool = fnvlist_alloc();
12665
fill_pool_info(pool, zhp, B_FALSE, cbp->cb_json_as_int);
12666
fnvlist_add_nvlist(cbp->cb_jsobj, "pool", pool);
12667
fnvlist_free(pool);
12668
}
12669
12670
if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
12671
(void) for_each_vdev(zhp, get_callback_vdev_cb, data);
12672
} else {
12673
/* Adjust column widths for vdev properties */
12674
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12675
vid++) {
12676
(void) vdev_expand_proplist(zhp,
12677
cbp->cb_vdevs.cb_names[vid],
12678
&cbp->cb_proplist);
12679
}
12680
/* Display the properties */
12681
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
12682
vid++) {
12683
(void) get_callback_vdev(zhp,
12684
cbp->cb_vdevs.cb_names[vid], data);
12685
}
12686
}
12687
} else {
12688
assert(cbp->cb_type == ZFS_TYPE_POOL);
12689
if (cbp->cb_json) {
12690
d = fnvlist_lookup_nvlist(cbp->cb_jsobj, "pools");
12691
if (d == NULL) {
12692
fprintf(stderr, "pools obj not found.\n");
12693
exit(1);
12694
}
12695
props = fnvlist_alloc();
12696
}
12697
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
12698
/*
12699
* Skip the special fake placeholder. This will also
12700
* skip over the name property when 'all' is specified.
12701
*/
12702
if (pl->pl_prop == ZPOOL_PROP_NAME &&
12703
pl == cbp->cb_proplist)
12704
continue;
12705
12706
if (pl->pl_prop == ZPROP_INVAL &&
12707
zfs_prop_user(pl->pl_user_prop)) {
12708
srctype = ZPROP_SRC_LOCAL;
12709
12710
if (zpool_get_userprop(zhp, pl->pl_user_prop,
12711
value, sizeof (value), &srctype) != 0)
12712
continue;
12713
12714
err = zprop_collect_property(
12715
zpool_get_name(zhp), cbp, pl->pl_user_prop,
12716
value, srctype, NULL, NULL, props);
12717
} else if (pl->pl_prop == ZPROP_INVAL &&
12718
(zpool_prop_feature(pl->pl_user_prop) ||
12719
zpool_prop_unsupported(pl->pl_user_prop))) {
12720
srctype = ZPROP_SRC_LOCAL;
12721
12722
if (zpool_prop_get_feature(zhp,
12723
pl->pl_user_prop, value,
12724
sizeof (value)) == 0) {
12725
err = zprop_collect_property(
12726
zpool_get_name(zhp), cbp,
12727
pl->pl_user_prop, value, srctype,
12728
NULL, NULL, props);
12729
}
12730
} else {
12731
if (zpool_get_prop(zhp, pl->pl_prop, value,
12732
sizeof (value), &srctype,
12733
cbp->cb_literal) != 0)
12734
continue;
12735
12736
err = zprop_collect_property(
12737
zpool_get_name(zhp), cbp,
12738
zpool_prop_to_name(pl->pl_prop),
12739
value, srctype, NULL, NULL, props);
12740
}
12741
if (err != 0)
12742
return (err);
12743
}
12744
12745
if (cbp->cb_json) {
12746
if (!nvlist_empty(props)) {
12747
item = fnvlist_alloc();
12748
fill_pool_info(item, zhp, B_TRUE,
12749
cbp->cb_json_as_int);
12750
fnvlist_add_nvlist(item, "properties", props);
12751
if (cbp->cb_json_pool_key_guid) {
12752
char buf[256];
12753
uint64_t guid = fnvlist_lookup_uint64(
12754
zpool_get_config(zhp, NULL),
12755
ZPOOL_CONFIG_POOL_GUID);
12756
(void) snprintf(buf, 256, "%llu",
12757
(u_longlong_t)guid);
12758
fnvlist_add_nvlist(d, buf, item);
12759
} else {
12760
const char *name = zpool_get_name(zhp);
12761
fnvlist_add_nvlist(d, name, item);
12762
}
12763
fnvlist_add_nvlist(cbp->cb_jsobj, "pools", d);
12764
fnvlist_free(item);
12765
}
12766
fnvlist_free(props);
12767
}
12768
}
12769
12770
return (0);
12771
}
12772
12773
/*
12774
* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
12775
*
12776
* -H Scripted mode. Don't display headers, and separate properties
12777
* by a single tab.
12778
* -o List of columns to display. Defaults to
12779
* "name,property,value,source".
12780
* -p Display values in parsable (exact) format.
12781
* -j Display output in JSON format.
12782
* --json-int Display numbers as integers instead of strings.
12783
* --json-pool-key-guid Set pool GUID as key for pool objects.
12784
*
12785
* Get properties of pools in the system. Output space statistics
12786
* for each one as well as other attributes.
12787
*/
12788
int
12789
zpool_do_get(int argc, char **argv)
12790
{
12791
zprop_get_cbdata_t cb = { 0 };
12792
zprop_list_t fake_name = { 0 };
12793
int ret;
12794
int c, i;
12795
char *propstr = NULL;
12796
char *vdev = NULL;
12797
nvlist_t *data = NULL;
12798
12799
cb.cb_first = B_TRUE;
12800
12801
/*
12802
* Set up default columns and sources.
12803
*/
12804
cb.cb_sources = ZPROP_SRC_ALL;
12805
cb.cb_columns[0] = GET_COL_NAME;
12806
cb.cb_columns[1] = GET_COL_PROPERTY;
12807
cb.cb_columns[2] = GET_COL_VALUE;
12808
cb.cb_columns[3] = GET_COL_SOURCE;
12809
cb.cb_type = ZFS_TYPE_POOL;
12810
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
12811
current_prop_type = cb.cb_type;
12812
12813
struct option long_options[] = {
12814
{"json", no_argument, NULL, 'j'},
12815
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
12816
{"json-pool-key-guid", no_argument, NULL,
12817
ZPOOL_OPTION_POOL_KEY_GUID},
12818
{0, 0, 0, 0}
12819
};
12820
12821
/* check options */
12822
while ((c = getopt_long(argc, argv, ":jHpo:", long_options,
12823
NULL)) != -1) {
12824
switch (c) {
12825
case 'p':
12826
cb.cb_literal = B_TRUE;
12827
break;
12828
case 'H':
12829
cb.cb_scripted = B_TRUE;
12830
break;
12831
case 'j':
12832
cb.cb_json = B_TRUE;
12833
cb.cb_jsobj = zpool_json_schema(0, 1);
12834
data = fnvlist_alloc();
12835
break;
12836
case ZPOOL_OPTION_POOL_KEY_GUID:
12837
cb.cb_json_pool_key_guid = B_TRUE;
12838
break;
12839
case ZPOOL_OPTION_JSON_NUMS_AS_INT:
12840
cb.cb_json_as_int = B_TRUE;
12841
cb.cb_literal = B_TRUE;
12842
break;
12843
case 'o':
12844
memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
12845
i = 0;
12846
12847
for (char *tok; (tok = strsep(&optarg, ",")); ) {
12848
static const char *const col_opts[] =
12849
{ "name", "property", "value", "source",
12850
"all" };
12851
static const zfs_get_column_t col_cols[] =
12852
{ GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
12853
GET_COL_SOURCE };
12854
12855
if (i == ZFS_GET_NCOLS - 1) {
12856
(void) fprintf(stderr, gettext("too "
12857
"many fields given to -o "
12858
"option\n"));
12859
usage(B_FALSE);
12860
}
12861
12862
for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
12863
if (strcmp(tok, col_opts[c]) == 0)
12864
goto found;
12865
12866
(void) fprintf(stderr,
12867
gettext("invalid column name '%s'\n"), tok);
12868
usage(B_FALSE);
12869
12870
found:
12871
if (c >= 4) {
12872
if (i > 0) {
12873
(void) fprintf(stderr,
12874
gettext("\"all\" conflicts "
12875
"with specific fields "
12876
"given to -o option\n"));
12877
usage(B_FALSE);
12878
}
12879
12880
memcpy(cb.cb_columns, col_cols,
12881
sizeof (col_cols));
12882
i = ZFS_GET_NCOLS - 1;
12883
} else
12884
cb.cb_columns[i++] = col_cols[c];
12885
}
12886
break;
12887
case '?':
12888
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
12889
optopt);
12890
usage(B_FALSE);
12891
}
12892
}
12893
12894
argc -= optind;
12895
argv += optind;
12896
12897
if (!cb.cb_json && cb.cb_json_as_int) {
12898
(void) fprintf(stderr, gettext("'--json-int' only works with"
12899
" '-j' option\n"));
12900
usage(B_FALSE);
12901
}
12902
12903
if (!cb.cb_json && cb.cb_json_pool_key_guid) {
12904
(void) fprintf(stderr, gettext("'json-pool-key-guid' only"
12905
" works with '-j' option\n"));
12906
usage(B_FALSE);
12907
}
12908
12909
if (argc < 1) {
12910
(void) fprintf(stderr, gettext("missing property "
12911
"argument\n"));
12912
usage(B_FALSE);
12913
}
12914
12915
/* Properties list is needed later by zprop_get_list() */
12916
propstr = argv[0];
12917
12918
argc--;
12919
argv++;
12920
12921
if (argc == 0) {
12922
/* No args, so just print the defaults. */
12923
} else if (are_all_pools(argc, argv)) {
12924
/* All the args are pool names */
12925
} else if (are_all_pools(1, argv)) {
12926
/* The first arg is a pool name */
12927
if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
12928
(argc == 2 && strcmp(argv[1], "root") == 0) ||
12929
are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
12930
&cb.cb_vdevs)) {
12931
12932
if (strcmp(argv[1], "root") == 0)
12933
vdev = strdup("root-0");
12934
12935
/* ... and the rest are vdev names */
12936
if (vdev == NULL)
12937
cb.cb_vdevs.cb_names = argv + 1;
12938
else
12939
cb.cb_vdevs.cb_names = &vdev;
12940
12941
cb.cb_vdevs.cb_names_count = argc - 1;
12942
cb.cb_type = ZFS_TYPE_VDEV;
12943
argc = 1; /* One pool to process */
12944
} else {
12945
if (cb.cb_json) {
12946
nvlist_free(cb.cb_jsobj);
12947
nvlist_free(data);
12948
}
12949
fprintf(stderr, gettext("Expected a list of vdevs in"
12950
" \"%s\", but got:\n"), argv[0]);
12951
error_list_unresolved_vdevs(argc - 1, argv + 1,
12952
argv[0], &cb.cb_vdevs);
12953
fprintf(stderr, "\n");
12954
usage(B_FALSE);
12955
}
12956
} else {
12957
if (cb.cb_json) {
12958
nvlist_free(cb.cb_jsobj);
12959
nvlist_free(data);
12960
}
12961
/*
12962
* The first arg isn't the name of a valid pool.
12963
*/
12964
fprintf(stderr, gettext("Cannot get properties of %s: "
12965
"no such pool available.\n"), argv[0]);
12966
return (1);
12967
}
12968
12969
if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
12970
cb.cb_type) != 0) {
12971
/* Use correct list of valid properties (pool or vdev) */
12972
current_prop_type = cb.cb_type;
12973
usage(B_FALSE);
12974
}
12975
12976
if (cb.cb_proplist != NULL) {
12977
fake_name.pl_prop = ZPOOL_PROP_NAME;
12978
fake_name.pl_width = strlen(gettext("NAME"));
12979
fake_name.pl_next = cb.cb_proplist;
12980
cb.cb_proplist = &fake_name;
12981
}
12982
12983
if (cb.cb_json) {
12984
if (cb.cb_type == ZFS_TYPE_VDEV)
12985
fnvlist_add_nvlist(cb.cb_jsobj, "vdevs", data);
12986
else
12987
fnvlist_add_nvlist(cb.cb_jsobj, "pools", data);
12988
fnvlist_free(data);
12989
}
12990
12991
ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
12992
cb.cb_literal, get_callback, &cb);
12993
12994
if (ret == 0 && cb.cb_json)
12995
zcmd_print_json(cb.cb_jsobj);
12996
else if (ret != 0 && cb.cb_json)
12997
nvlist_free(cb.cb_jsobj);
12998
12999
if (cb.cb_proplist == &fake_name)
13000
zprop_free_list(fake_name.pl_next);
13001
else
13002
zprop_free_list(cb.cb_proplist);
13003
13004
if (vdev != NULL)
13005
free(vdev);
13006
13007
return (ret);
13008
}
13009
13010
typedef struct set_cbdata {
13011
char *cb_propname;
13012
char *cb_value;
13013
zfs_type_t cb_type;
13014
vdev_cbdata_t cb_vdevs;
13015
boolean_t cb_any_successful;
13016
} set_cbdata_t;
13017
13018
static int
13019
set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
13020
{
13021
int error;
13022
13023
/* Check if we have out-of-bounds features */
13024
if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
13025
boolean_t features[SPA_FEATURES];
13026
if (zpool_do_load_compat(cb->cb_value, features) !=
13027
ZPOOL_COMPATIBILITY_OK)
13028
return (-1);
13029
13030
nvlist_t *enabled = zpool_get_features(zhp);
13031
spa_feature_t i;
13032
for (i = 0; i < SPA_FEATURES; i++) {
13033
const char *fguid = spa_feature_table[i].fi_guid;
13034
if (nvlist_exists(enabled, fguid) && !features[i])
13035
break;
13036
}
13037
if (i < SPA_FEATURES)
13038
(void) fprintf(stderr, gettext("Warning: one or "
13039
"more features already enabled on pool '%s'\n"
13040
"are not present in this compatibility set.\n"),
13041
zpool_get_name(zhp));
13042
}
13043
13044
/* if we're setting a feature, check it's in compatibility set */
13045
if (zpool_prop_feature(cb->cb_propname) &&
13046
strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
13047
char *fname = strchr(cb->cb_propname, '@') + 1;
13048
spa_feature_t f;
13049
13050
if (zfeature_lookup_name(fname, &f) == 0) {
13051
char compat[ZFS_MAXPROPLEN];
13052
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
13053
compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
13054
compat[0] = '\0';
13055
13056
boolean_t features[SPA_FEATURES];
13057
if (zpool_do_load_compat(compat, features) !=
13058
ZPOOL_COMPATIBILITY_OK) {
13059
(void) fprintf(stderr, gettext("Error: "
13060
"cannot enable feature '%s' on pool '%s'\n"
13061
"because the pool's 'compatibility' "
13062
"property cannot be parsed.\n"),
13063
fname, zpool_get_name(zhp));
13064
return (-1);
13065
}
13066
13067
if (!features[f]) {
13068
(void) fprintf(stderr, gettext("Error: "
13069
"cannot enable feature '%s' on pool '%s'\n"
13070
"as it is not specified in this pool's "
13071
"current compatibility set.\n"
13072
"Consider setting 'compatibility' to a "
13073
"less restrictive set, or to 'off'.\n"),
13074
fname, zpool_get_name(zhp));
13075
return (-1);
13076
}
13077
}
13078
}
13079
13080
error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
13081
13082
return (error);
13083
}
13084
13085
static int
13086
set_callback(zpool_handle_t *zhp, void *data)
13087
{
13088
int error;
13089
set_cbdata_t *cb = (set_cbdata_t *)data;
13090
13091
if (cb->cb_type == ZFS_TYPE_VDEV) {
13092
error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
13093
cb->cb_propname, cb->cb_value);
13094
} else {
13095
assert(cb->cb_type == ZFS_TYPE_POOL);
13096
error = set_pool_callback(zhp, cb);
13097
}
13098
13099
cb->cb_any_successful = !error;
13100
return (error);
13101
}
13102
13103
int
13104
zpool_do_set(int argc, char **argv)
13105
{
13106
set_cbdata_t cb = { 0 };
13107
int error;
13108
char *vdev = NULL;
13109
13110
current_prop_type = ZFS_TYPE_POOL;
13111
if (argc > 1 && argv[1][0] == '-') {
13112
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
13113
argv[1][1]);
13114
usage(B_FALSE);
13115
}
13116
13117
if (argc < 2) {
13118
(void) fprintf(stderr, gettext("missing property=value "
13119
"argument\n"));
13120
usage(B_FALSE);
13121
}
13122
13123
if (argc < 3) {
13124
(void) fprintf(stderr, gettext("missing pool name\n"));
13125
usage(B_FALSE);
13126
}
13127
13128
if (argc > 4) {
13129
(void) fprintf(stderr, gettext("too many pool names\n"));
13130
usage(B_FALSE);
13131
}
13132
13133
cb.cb_propname = argv[1];
13134
cb.cb_type = ZFS_TYPE_POOL;
13135
cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
13136
cb.cb_value = strchr(cb.cb_propname, '=');
13137
if (cb.cb_value == NULL) {
13138
(void) fprintf(stderr, gettext("missing value in "
13139
"property=value argument\n"));
13140
usage(B_FALSE);
13141
}
13142
13143
*(cb.cb_value) = '\0';
13144
cb.cb_value++;
13145
argc -= 2;
13146
argv += 2;
13147
13148
/* argv[0] is pool name */
13149
if (!is_pool(argv[0])) {
13150
(void) fprintf(stderr,
13151
gettext("cannot open '%s': is not a pool\n"), argv[0]);
13152
return (EINVAL);
13153
}
13154
13155
/* argv[1], when supplied, is vdev name */
13156
if (argc == 2) {
13157
13158
if (strcmp(argv[1], "root") == 0)
13159
vdev = strdup("root-0");
13160
else
13161
vdev = strdup(argv[1]);
13162
13163
if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
13164
(void) fprintf(stderr, gettext(
13165
"cannot find '%s' in '%s': device not in pool\n"),
13166
vdev, argv[0]);
13167
free(vdev);
13168
return (EINVAL);
13169
}
13170
cb.cb_vdevs.cb_names = &vdev;
13171
cb.cb_vdevs.cb_names_count = 1;
13172
cb.cb_type = ZFS_TYPE_VDEV;
13173
}
13174
13175
error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
13176
B_FALSE, set_callback, &cb);
13177
13178
if (vdev != NULL)
13179
free(vdev);
13180
13181
return (error);
13182
}
13183
13184
/* Add up the total number of bytes left to initialize/trim across all vdevs */
13185
static uint64_t
13186
vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
13187
{
13188
uint64_t bytes_remaining;
13189
nvlist_t **child;
13190
uint_t c, children;
13191
vdev_stat_t *vs;
13192
13193
assert(activity == ZPOOL_WAIT_INITIALIZE ||
13194
activity == ZPOOL_WAIT_TRIM);
13195
13196
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
13197
(uint64_t **)&vs, &c) == 0);
13198
13199
if (activity == ZPOOL_WAIT_INITIALIZE &&
13200
vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
13201
bytes_remaining = vs->vs_initialize_bytes_est -
13202
vs->vs_initialize_bytes_done;
13203
else if (activity == ZPOOL_WAIT_TRIM &&
13204
vs->vs_trim_state == VDEV_TRIM_ACTIVE)
13205
bytes_remaining = vs->vs_trim_bytes_est -
13206
vs->vs_trim_bytes_done;
13207
else
13208
bytes_remaining = 0;
13209
13210
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13211
&child, &children) != 0)
13212
children = 0;
13213
13214
for (c = 0; c < children; c++)
13215
bytes_remaining += vdev_activity_remaining(child[c], activity);
13216
13217
return (bytes_remaining);
13218
}
13219
13220
/* Add up the total number of bytes left to rebuild across top-level vdevs */
13221
static uint64_t
13222
vdev_activity_top_remaining(nvlist_t *nv)
13223
{
13224
uint64_t bytes_remaining = 0;
13225
nvlist_t **child;
13226
uint_t children;
13227
int error;
13228
13229
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13230
&child, &children) != 0)
13231
children = 0;
13232
13233
for (uint_t c = 0; c < children; c++) {
13234
vdev_rebuild_stat_t *vrs;
13235
uint_t i;
13236
13237
error = nvlist_lookup_uint64_array(child[c],
13238
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
13239
if (error == 0) {
13240
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
13241
bytes_remaining += (vrs->vrs_bytes_est -
13242
vrs->vrs_bytes_rebuilt);
13243
}
13244
}
13245
}
13246
13247
return (bytes_remaining);
13248
}
13249
13250
/* Whether any vdevs are 'spare' or 'replacing' vdevs */
13251
static boolean_t
13252
vdev_any_spare_replacing(nvlist_t *nv)
13253
{
13254
nvlist_t **child;
13255
uint_t c, children;
13256
const char *vdev_type;
13257
13258
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
13259
13260
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
13261
strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
13262
strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
13263
return (B_TRUE);
13264
}
13265
13266
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
13267
&child, &children) != 0)
13268
children = 0;
13269
13270
for (c = 0; c < children; c++) {
13271
if (vdev_any_spare_replacing(child[c]))
13272
return (B_TRUE);
13273
}
13274
13275
return (B_FALSE);
13276
}
13277
13278
typedef struct wait_data {
13279
char *wd_poolname;
13280
boolean_t wd_scripted;
13281
boolean_t wd_exact;
13282
boolean_t wd_headers_once;
13283
boolean_t wd_should_exit;
13284
/* Which activities to wait for */
13285
boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
13286
float wd_interval;
13287
pthread_cond_t wd_cv;
13288
pthread_mutex_t wd_mutex;
13289
} wait_data_t;
13290
13291
/*
13292
* Print to stdout a single line, containing one column for each activity that
13293
* we are waiting for specifying how many bytes of work are left for that
13294
* activity.
13295
*/
13296
static void
13297
print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
13298
{
13299
nvlist_t *config, *nvroot;
13300
uint_t c;
13301
int i;
13302
pool_checkpoint_stat_t *pcs = NULL;
13303
pool_scan_stat_t *pss = NULL;
13304
pool_removal_stat_t *prs = NULL;
13305
pool_raidz_expand_stat_t *pres = NULL;
13306
const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
13307
"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
13308
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
13309
13310
/* Calculate the width of each column */
13311
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13312
/*
13313
* Make sure we have enough space in the col for pretty-printed
13314
* numbers and for the column header, and then leave a couple
13315
* spaces between cols for readability.
13316
*/
13317
col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
13318
}
13319
13320
if (timestamp_fmt != NODATE)
13321
print_timestamp(timestamp_fmt);
13322
13323
/* Print header if appropriate */
13324
int term_height = terminal_height();
13325
boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
13326
row % (term_height-1) == 0);
13327
if (!wd->wd_scripted && (row == 0 || reprint_header)) {
13328
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13329
if (wd->wd_enabled[i])
13330
(void) printf("%*s", col_widths[i], headers[i]);
13331
}
13332
(void) fputc('\n', stdout);
13333
}
13334
13335
/* Bytes of work remaining in each activity */
13336
int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
13337
13338
bytes_rem[ZPOOL_WAIT_FREE] =
13339
zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
13340
13341
config = zpool_get_config(zhp, NULL);
13342
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
13343
13344
(void) nvlist_lookup_uint64_array(nvroot,
13345
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
13346
if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
13347
bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
13348
13349
(void) nvlist_lookup_uint64_array(nvroot,
13350
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
13351
if (prs != NULL && prs->prs_state == DSS_SCANNING)
13352
bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
13353
prs->prs_copied;
13354
13355
(void) nvlist_lookup_uint64_array(nvroot,
13356
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
13357
if (pss != NULL && pss->pss_state == DSS_SCANNING &&
13358
pss->pss_pass_scrub_pause == 0) {
13359
int64_t rem = pss->pss_to_examine - pss->pss_issued;
13360
if (pss->pss_func == POOL_SCAN_SCRUB)
13361
bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
13362
else
13363
bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
13364
} else if (check_rebuilding(nvroot, NULL)) {
13365
bytes_rem[ZPOOL_WAIT_RESILVER] =
13366
vdev_activity_top_remaining(nvroot);
13367
}
13368
13369
(void) nvlist_lookup_uint64_array(nvroot,
13370
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
13371
if (pres != NULL && pres->pres_state == DSS_SCANNING) {
13372
int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
13373
bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
13374
}
13375
13376
bytes_rem[ZPOOL_WAIT_INITIALIZE] =
13377
vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
13378
bytes_rem[ZPOOL_WAIT_TRIM] =
13379
vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
13380
13381
/*
13382
* A replace finishes after resilvering finishes, so the amount of work
13383
* left for a replace is the same as for resilvering.
13384
*
13385
* It isn't quite correct to say that if we have any 'spare' or
13386
* 'replacing' vdevs and a resilver is happening, then a replace is in
13387
* progress, like we do here. When a hot spare is used, the faulted vdev
13388
* is not removed after the hot spare is resilvered, so parent 'spare'
13389
* vdev is not removed either. So we could have a 'spare' vdev, but be
13390
* resilvering for a different reason. However, we use it as a heuristic
13391
* because we don't have access to the DTLs, which could tell us whether
13392
* or not we have really finished resilvering a hot spare.
13393
*/
13394
if (vdev_any_spare_replacing(nvroot))
13395
bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
13396
13397
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13398
char buf[64];
13399
if (!wd->wd_enabled[i])
13400
continue;
13401
13402
if (wd->wd_exact) {
13403
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
13404
bytes_rem[i]);
13405
} else {
13406
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
13407
}
13408
13409
if (wd->wd_scripted)
13410
(void) printf(i == 0 ? "%s" : "\t%s", buf);
13411
else
13412
(void) printf(" %*s", col_widths[i] - 1, buf);
13413
}
13414
(void) printf("\n");
13415
(void) fflush(stdout);
13416
}
13417
13418
static void *
13419
wait_status_thread(void *arg)
13420
{
13421
wait_data_t *wd = (wait_data_t *)arg;
13422
zpool_handle_t *zhp;
13423
13424
if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
13425
return (void *)(1);
13426
13427
for (int row = 0; ; row++) {
13428
boolean_t missing;
13429
struct timespec timeout;
13430
int ret = 0;
13431
(void) clock_gettime(CLOCK_REALTIME, &timeout);
13432
13433
if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
13434
zpool_props_refresh(zhp) != 0) {
13435
zpool_close(zhp);
13436
return (void *)(uintptr_t)(missing ? 0 : 1);
13437
}
13438
13439
print_wait_status_row(wd, zhp, row);
13440
13441
timeout.tv_sec += floor(wd->wd_interval);
13442
long nanos = timeout.tv_nsec +
13443
(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
13444
if (nanos >= NANOSEC) {
13445
timeout.tv_sec++;
13446
timeout.tv_nsec = nanos - NANOSEC;
13447
} else {
13448
timeout.tv_nsec = nanos;
13449
}
13450
(void) pthread_mutex_lock(&wd->wd_mutex);
13451
if (!wd->wd_should_exit)
13452
ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
13453
&timeout);
13454
(void) pthread_mutex_unlock(&wd->wd_mutex);
13455
if (ret == 0) {
13456
break; /* signaled by main thread */
13457
} else if (ret != ETIMEDOUT) {
13458
(void) fprintf(stderr, gettext("pthread_cond_timedwait "
13459
"failed: %s\n"), strerror(ret));
13460
zpool_close(zhp);
13461
return (void *)(uintptr_t)(1);
13462
}
13463
}
13464
13465
zpool_close(zhp);
13466
return (void *)(0);
13467
}
13468
13469
int
13470
zpool_do_wait(int argc, char **argv)
13471
{
13472
boolean_t verbose = B_FALSE;
13473
int c, i;
13474
unsigned long count;
13475
pthread_t status_thr;
13476
int error = 0;
13477
zpool_handle_t *zhp;
13478
13479
wait_data_t wd;
13480
wd.wd_scripted = B_FALSE;
13481
wd.wd_exact = B_FALSE;
13482
wd.wd_headers_once = B_FALSE;
13483
wd.wd_should_exit = B_FALSE;
13484
13485
(void) pthread_mutex_init(&wd.wd_mutex, NULL);
13486
(void) pthread_cond_init(&wd.wd_cv, NULL);
13487
13488
/* By default, wait for all types of activity. */
13489
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
13490
wd.wd_enabled[i] = B_TRUE;
13491
13492
while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
13493
switch (c) {
13494
case 'H':
13495
wd.wd_scripted = B_TRUE;
13496
break;
13497
case 'n':
13498
wd.wd_headers_once = B_TRUE;
13499
break;
13500
case 'p':
13501
wd.wd_exact = B_TRUE;
13502
break;
13503
case 'T':
13504
get_timestamp_arg(*optarg);
13505
break;
13506
case 't':
13507
/* Reset activities array */
13508
memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
13509
13510
for (char *tok; (tok = strsep(&optarg, ",")); ) {
13511
static const char *const col_opts[] = {
13512
"discard", "free", "initialize", "replace",
13513
"remove", "resilver", "scrub", "trim",
13514
"raidz_expand" };
13515
13516
for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
13517
if (strcmp(tok, col_opts[i]) == 0) {
13518
wd.wd_enabled[i] = B_TRUE;
13519
goto found;
13520
}
13521
13522
(void) fprintf(stderr,
13523
gettext("invalid activity '%s'\n"), tok);
13524
usage(B_FALSE);
13525
found:;
13526
}
13527
break;
13528
case '?':
13529
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
13530
optopt);
13531
usage(B_FALSE);
13532
}
13533
}
13534
13535
argc -= optind;
13536
argv += optind;
13537
13538
get_interval_count(&argc, argv, &wd.wd_interval, &count);
13539
if (count != 0) {
13540
/* This subcmd only accepts an interval, not a count */
13541
(void) fprintf(stderr, gettext("too many arguments\n"));
13542
usage(B_FALSE);
13543
}
13544
13545
if (wd.wd_interval != 0)
13546
verbose = B_TRUE;
13547
13548
if (argc < 1) {
13549
(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
13550
usage(B_FALSE);
13551
}
13552
if (argc > 1) {
13553
(void) fprintf(stderr, gettext("too many arguments\n"));
13554
usage(B_FALSE);
13555
}
13556
13557
wd.wd_poolname = argv[0];
13558
13559
if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
13560
return (1);
13561
13562
if (verbose) {
13563
/*
13564
* We use a separate thread for printing status updates because
13565
* the main thread will call lzc_wait(), which blocks as long
13566
* as an activity is in progress, which can be a long time.
13567
*/
13568
if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
13569
!= 0) {
13570
(void) fprintf(stderr, gettext("failed to create status"
13571
"thread: %s\n"), strerror(errno));
13572
zpool_close(zhp);
13573
return (1);
13574
}
13575
}
13576
13577
/*
13578
* Loop over all activities that we are supposed to wait for until none
13579
* of them are in progress. Note that this means we can end up waiting
13580
* for more activities to complete than just those that were in progress
13581
* when we began waiting; if an activity we are interested in begins
13582
* while we are waiting for another activity, we will wait for both to
13583
* complete before exiting.
13584
*/
13585
for (;;) {
13586
boolean_t missing = B_FALSE;
13587
boolean_t any_waited = B_FALSE;
13588
13589
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
13590
boolean_t waited;
13591
13592
if (!wd.wd_enabled[i])
13593
continue;
13594
13595
error = zpool_wait_status(zhp, i, &missing, &waited);
13596
if (error != 0 || missing)
13597
break;
13598
13599
any_waited = (any_waited || waited);
13600
}
13601
13602
if (error != 0 || missing || !any_waited)
13603
break;
13604
}
13605
13606
zpool_close(zhp);
13607
13608
if (verbose) {
13609
uintptr_t status;
13610
(void) pthread_mutex_lock(&wd.wd_mutex);
13611
wd.wd_should_exit = B_TRUE;
13612
(void) pthread_cond_signal(&wd.wd_cv);
13613
(void) pthread_mutex_unlock(&wd.wd_mutex);
13614
(void) pthread_join(status_thr, (void *)&status);
13615
if (status != 0)
13616
error = status;
13617
}
13618
13619
(void) pthread_mutex_destroy(&wd.wd_mutex);
13620
(void) pthread_cond_destroy(&wd.wd_cv);
13621
return (error);
13622
}
13623
13624
/*
13625
* zpool ddtprune -d|-p <amount> <pool>
13626
*
13627
* -d <days> Prune entries <days> old and older
13628
* -p <percent> Prune <percent> amount of entries
13629
*
13630
* Prune single reference entries from DDT to satisfy the amount specified.
13631
*/
13632
int
13633
zpool_do_ddt_prune(int argc, char **argv)
13634
{
13635
zpool_ddt_prune_unit_t unit = ZPOOL_DDT_PRUNE_NONE;
13636
uint64_t amount = 0;
13637
zpool_handle_t *zhp;
13638
char *endptr;
13639
int c;
13640
13641
while ((c = getopt(argc, argv, "d:p:")) != -1) {
13642
switch (c) {
13643
case 'd':
13644
if (unit == ZPOOL_DDT_PRUNE_PERCENTAGE) {
13645
(void) fprintf(stderr, gettext("-d cannot be "
13646
"combined with -p option\n"));
13647
usage(B_FALSE);
13648
}
13649
errno = 0;
13650
amount = strtoull(optarg, &endptr, 0);
13651
if (errno != 0 || *endptr != '\0' || amount == 0) {
13652
(void) fprintf(stderr,
13653
gettext("invalid days value\n"));
13654
usage(B_FALSE);
13655
}
13656
amount *= 86400; /* convert days to seconds */
13657
unit = ZPOOL_DDT_PRUNE_AGE;
13658
break;
13659
case 'p':
13660
if (unit == ZPOOL_DDT_PRUNE_AGE) {
13661
(void) fprintf(stderr, gettext("-p cannot be "
13662
"combined with -d option\n"));
13663
usage(B_FALSE);
13664
}
13665
errno = 0;
13666
amount = strtoull(optarg, &endptr, 0);
13667
if (errno != 0 || *endptr != '\0' ||
13668
amount == 0 || amount > 100) {
13669
(void) fprintf(stderr,
13670
gettext("invalid percentage value\n"));
13671
usage(B_FALSE);
13672
}
13673
unit = ZPOOL_DDT_PRUNE_PERCENTAGE;
13674
break;
13675
case '?':
13676
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
13677
optopt);
13678
usage(B_FALSE);
13679
}
13680
}
13681
argc -= optind;
13682
argv += optind;
13683
13684
if (unit == ZPOOL_DDT_PRUNE_NONE) {
13685
(void) fprintf(stderr,
13686
gettext("missing amount option (-d|-p <value>)\n"));
13687
usage(B_FALSE);
13688
} else if (argc < 1) {
13689
(void) fprintf(stderr, gettext("missing pool argument\n"));
13690
usage(B_FALSE);
13691
} else if (argc > 1) {
13692
(void) fprintf(stderr, gettext("too many arguments\n"));
13693
usage(B_FALSE);
13694
}
13695
zhp = zpool_open(g_zfs, argv[0]);
13696
if (zhp == NULL)
13697
return (-1);
13698
13699
int error = zpool_ddt_prune(zhp, unit, amount);
13700
13701
zpool_close(zhp);
13702
13703
return (error);
13704
}
13705
13706
static int
13707
find_command_idx(const char *command, int *idx)
13708
{
13709
for (int i = 0; i < NCOMMAND; ++i) {
13710
if (command_table[i].name == NULL)
13711
continue;
13712
13713
if (strcmp(command, command_table[i].name) == 0) {
13714
*idx = i;
13715
return (0);
13716
}
13717
}
13718
return (1);
13719
}
13720
13721
/*
13722
* Display version message
13723
*/
13724
static int
13725
zpool_do_version(int argc, char **argv)
13726
{
13727
int c;
13728
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
13729
boolean_t json = B_FALSE;
13730
13731
struct option long_options[] = {
13732
{"json", no_argument, NULL, 'j'},
13733
};
13734
13735
while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
13736
switch (c) {
13737
case 'j':
13738
json = B_TRUE;
13739
jsobj = zpool_json_schema(0, 1);
13740
break;
13741
case '?':
13742
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
13743
optopt);
13744
usage(B_FALSE);
13745
}
13746
}
13747
13748
argc -= optind;
13749
if (argc != 0) {
13750
(void) fprintf(stderr, "too many arguments\n");
13751
usage(B_FALSE);
13752
}
13753
13754
if (json) {
13755
zfs_ver = zfs_version_nvlist();
13756
if (zfs_ver) {
13757
fnvlist_add_nvlist(jsobj, "zfs_version", zfs_ver);
13758
zcmd_print_json(jsobj);
13759
fnvlist_free(zfs_ver);
13760
return (0);
13761
} else
13762
return (-1);
13763
} else
13764
return (zfs_version_print() != 0);
13765
}
13766
13767
/* Display documentation */
13768
static int
13769
zpool_do_help(int argc, char **argv)
13770
{
13771
char page[MAXNAMELEN];
13772
if (argc < 3 || strcmp(argv[2], "zpool") == 0)
13773
(void) strcpy(page, "zpool");
13774
else if (strcmp(argv[2], "concepts") == 0 ||
13775
strcmp(argv[2], "props") == 0)
13776
(void) snprintf(page, sizeof (page), "zpool%s", argv[2]);
13777
else
13778
(void) snprintf(page, sizeof (page), "zpool-%s", argv[2]);
13779
13780
(void) execlp("man", "man", page, NULL);
13781
13782
fprintf(stderr, "couldn't run man program: %s", strerror(errno));
13783
return (-1);
13784
}
13785
13786
/*
13787
* Do zpool_load_compat() and print error message on failure
13788
*/
13789
static zpool_compat_status_t
13790
zpool_do_load_compat(const char *compat, boolean_t *list)
13791
{
13792
char report[1024];
13793
13794
zpool_compat_status_t ret;
13795
13796
ret = zpool_load_compat(compat, list, report, 1024);
13797
switch (ret) {
13798
13799
case ZPOOL_COMPATIBILITY_OK:
13800
break;
13801
13802
case ZPOOL_COMPATIBILITY_NOFILES:
13803
case ZPOOL_COMPATIBILITY_BADFILE:
13804
case ZPOOL_COMPATIBILITY_BADTOKEN:
13805
(void) fprintf(stderr, "Error: %s\n", report);
13806
break;
13807
13808
case ZPOOL_COMPATIBILITY_WARNTOKEN:
13809
(void) fprintf(stderr, "Warning: %s\n", report);
13810
ret = ZPOOL_COMPATIBILITY_OK;
13811
break;
13812
}
13813
return (ret);
13814
}
13815
13816
int
13817
main(int argc, char **argv)
13818
{
13819
int ret = 0;
13820
int i = 0;
13821
char *cmdname;
13822
char **newargv;
13823
13824
(void) setlocale(LC_ALL, "");
13825
(void) setlocale(LC_NUMERIC, "C");
13826
(void) textdomain(TEXT_DOMAIN);
13827
srand(time(NULL));
13828
13829
opterr = 0;
13830
13831
/*
13832
* Make sure the user has specified some command.
13833
*/
13834
if (argc < 2) {
13835
(void) fprintf(stderr, gettext("missing command\n"));
13836
usage(B_FALSE);
13837
}
13838
13839
cmdname = argv[1];
13840
13841
/*
13842
* Special case '-?'
13843
*/
13844
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
13845
usage(B_TRUE);
13846
13847
/*
13848
* Special case '-V|--version'
13849
*/
13850
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
13851
return (zfs_version_print() != 0);
13852
13853
/*
13854
* Special case 'help'
13855
*/
13856
if (strcmp(cmdname, "help") == 0)
13857
return (zpool_do_help(argc, argv));
13858
13859
if ((g_zfs = libzfs_init()) == NULL) {
13860
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
13861
return (1);
13862
}
13863
13864
libzfs_print_on_error(g_zfs, B_TRUE);
13865
13866
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
13867
13868
/*
13869
* Many commands modify input strings for string parsing reasons.
13870
* We create a copy to protect the original argv.
13871
*/
13872
newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
13873
for (i = 0; i < argc; i++)
13874
newargv[i] = strdup(argv[i]);
13875
newargv[argc] = NULL;
13876
13877
/*
13878
* Run the appropriate command.
13879
*/
13880
if (find_command_idx(cmdname, &i) == 0) {
13881
current_command = &command_table[i];
13882
ret = command_table[i].func(argc - 1, newargv + 1);
13883
} else if (strchr(cmdname, '=')) {
13884
verify(find_command_idx("set", &i) == 0);
13885
current_command = &command_table[i];
13886
ret = command_table[i].func(argc, newargv);
13887
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
13888
/*
13889
* 'freeze' is a vile debugging abomination, so we treat
13890
* it as such.
13891
*/
13892
zfs_cmd_t zc = {"\0"};
13893
13894
(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
13895
ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
13896
if (ret != 0) {
13897
(void) fprintf(stderr,
13898
gettext("failed to freeze pool: %d\n"), errno);
13899
ret = 1;
13900
}
13901
13902
log_history = 0;
13903
} else {
13904
(void) fprintf(stderr, gettext("unrecognized "
13905
"command '%s'\n"), cmdname);
13906
usage(B_FALSE);
13907
}
13908
13909
for (i = 0; i < argc; i++)
13910
free(newargv[i]);
13911
free(newargv);
13912
13913
if (ret == 0 && log_history)
13914
(void) zpool_log_history(g_zfs, history_str);
13915
13916
libzfs_fini(g_zfs);
13917
13918
/*
13919
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
13920
* for the purposes of running ::findleaks.
13921
*/
13922
if (getenv("ZFS_ABORT") != NULL) {
13923
(void) printf("dumping core by request\n");
13924
abort();
13925
}
13926
13927
return (ret);
13928
}
13929
13930