Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
48529 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
/*
23
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24
* Copyright (c) 2012 by Delphix. All rights reserved.
25
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26
* Copyright (c) 2016, 2017, Intel Corporation.
27
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
28
* Copyright (c) 2023, Klara Inc.
29
*/
30
31
/*
32
* ZFS syseventd module.
33
*
34
* file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
35
*
36
* The purpose of this module is to identify when devices are added to the
37
* system, and appropriately online or replace the affected vdevs.
38
*
39
* When a device is added to the system:
40
*
41
* 1. Search for any vdevs whose devid matches that of the newly added
42
* device.
43
*
44
* 2. If no vdevs are found, then search for any vdevs whose udev path
45
* matches that of the new device.
46
*
47
* 3. If no vdevs match by either method, then ignore the event.
48
*
49
* 4. Attempt to online the device with a flag to indicate that it should
50
* be unspared when resilvering completes. If this succeeds, then the
51
* same device was inserted and we should continue normally.
52
*
53
* 5. If the pool does not have the 'autoreplace' property set, attempt to
54
* online the device again without the unspare flag, which will
55
* generate a FMA fault.
56
*
57
* 6. If the pool has the 'autoreplace' property set, and the matching vdev
58
* is a whole disk, then label the new disk and attempt a 'zpool
59
* replace'.
60
*
61
* The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
62
* event indicates that a device failed to open during pool load, but the
63
* autoreplace property was set. In this case, we deferred the associated
64
* FMA fault until our module had a chance to process the autoreplace logic.
65
* If the device could not be replaced, then the second online attempt will
66
* trigger the FMA fault that we skipped earlier.
67
*
68
* On Linux udev provides a disk insert for both the disk and the partition.
69
*/
70
71
#include <ctype.h>
72
#include <fcntl.h>
73
#include <libnvpair.h>
74
#include <libzfs.h>
75
#include <libzutil.h>
76
#include <limits.h>
77
#include <stddef.h>
78
#include <stdlib.h>
79
#include <string.h>
80
#include <syslog.h>
81
#include <sys/list.h>
82
#include <sys/sunddi.h>
83
#include <sys/sysevent/eventdefs.h>
84
#include <sys/sysevent/dev.h>
85
#include <thread_pool.h>
86
#include <pthread.h>
87
#include <unistd.h>
88
#include <errno.h>
89
#include "zfs_agents.h"
90
#include "../zed_log.h"
91
92
#define DEV_BYID_PATH "/dev/disk/by-id/"
93
#define DEV_BYPATH_PATH "/dev/disk/by-path/"
94
#define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
95
96
typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
97
98
libzfs_handle_t *g_zfshdl;
99
list_t g_pool_list; /* list of unavailable pools at initialization */
100
list_t g_device_list; /* list of disks with asynchronous label request */
101
tpool_t *g_tpool;
102
boolean_t g_enumeration_done;
103
pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
104
105
typedef struct unavailpool {
106
zpool_handle_t *uap_zhp;
107
list_node_t uap_node;
108
} unavailpool_t;
109
110
typedef struct pendingdev {
111
char pd_physpath[128];
112
list_node_t pd_node;
113
} pendingdev_t;
114
115
static int
116
zfs_toplevel_state(zpool_handle_t *zhp)
117
{
118
nvlist_t *nvroot;
119
vdev_stat_t *vs;
120
unsigned int c;
121
122
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
123
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
124
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
125
(uint64_t **)&vs, &c) == 0);
126
return (vs->vs_state);
127
}
128
129
static int
130
zfs_unavail_pool(zpool_handle_t *zhp, void *data)
131
{
132
zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
133
zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
134
135
if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
136
unavailpool_t *uap;
137
uap = malloc(sizeof (unavailpool_t));
138
if (uap == NULL) {
139
perror("malloc");
140
exit(EXIT_FAILURE);
141
}
142
143
uap->uap_zhp = zhp;
144
list_insert_tail((list_t *)data, uap);
145
} else {
146
zpool_close(zhp);
147
}
148
return (0);
149
}
150
151
/*
152
* Write an array of strings to the zed log
153
*/
154
static void lines_to_zed_log_msg(char **lines, int lines_cnt)
155
{
156
int i;
157
for (i = 0; i < lines_cnt; i++) {
158
zed_log_msg(LOG_INFO, "%s", lines[i]);
159
}
160
}
161
162
/*
163
* Two stage replace on Linux
164
* since we get disk notifications
165
* we can wait for partitioned disk slice to show up!
166
*
167
* First stage tags the disk, initiates async partitioning, and returns
168
* Second stage finds the tag and proceeds to ZFS labeling/replace
169
*
170
* disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
171
*
172
* 1. physical match with no fs, no partition
173
* tag it top, partition disk
174
*
175
* 2. physical match again, see partition and tag
176
*
177
*/
178
179
/*
180
* The device associated with the given vdev (either by devid or physical path)
181
* has been added to the system. If 'isdisk' is set, then we only attempt a
182
* replacement if it's a whole disk. This also implies that we should label the
183
* disk first.
184
*
185
* First, we attempt to online the device (making sure to undo any spare
186
* operation when finished). If this succeeds, then we're done. If it fails,
187
* and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
188
* but that the label was not what we expected. If the 'autoreplace' property
189
* is enabled, then we relabel the disk (if specified), and attempt a 'zpool
190
* replace'. If the online is successful, but the new state is something else
191
* (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
192
* race, and we should avoid attempting to relabel the disk.
193
*
194
* Also can arrive here from a ESC_ZFS_VDEV_CHECK event
195
*/
196
static void
197
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
198
{
199
const char *path;
200
vdev_state_t newstate;
201
nvlist_t *nvroot, *newvd;
202
pendingdev_t *device;
203
uint64_t wholedisk = 0ULL;
204
uint64_t offline = 0ULL, faulted = 0ULL;
205
uint64_t guid = 0ULL;
206
uint64_t is_spare = 0;
207
const char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
208
char rawpath[PATH_MAX], fullpath[PATH_MAX];
209
char pathbuf[PATH_MAX];
210
int ret;
211
int online_flag = ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE;
212
boolean_t is_sd = B_FALSE;
213
boolean_t is_mpath_wholedisk = B_FALSE;
214
uint_t c;
215
vdev_stat_t *vs;
216
char **lines = NULL;
217
int lines_cnt = 0;
218
int rc;
219
220
/*
221
* Get the persistent path, typically under the '/dev/disk/by-id' or
222
* '/dev/disk/by-vdev' directories. Note that this path can change
223
* when a vdev is replaced with a new disk.
224
*/
225
if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
226
return;
227
228
/* Skip healthy disks */
229
verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
230
(uint64_t **)&vs, &c) == 0);
231
if (vs->vs_state == VDEV_STATE_HEALTHY) {
232
zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
233
__func__, path);
234
return;
235
}
236
237
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
238
239
update_vdev_config_dev_sysfs_path(vdev, path,
240
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
241
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
242
&enc_sysfs_path);
243
244
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
245
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
246
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
247
248
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
249
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_IS_SPARE, &is_spare);
250
251
/*
252
* Special case:
253
*
254
* We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
255
* entry in their config. For example, on this force-faulted disk:
256
*
257
* children[0]:
258
* type: 'disk'
259
* id: 0
260
* guid: 14309659774640089719
261
* path: '/dev/disk/by-vdev/L28'
262
* whole_disk: 0
263
* DTL: 654
264
* create_txg: 4
265
* com.delphix:vdev_zap_leaf: 1161
266
* faulted: 1
267
* aux_state: 'external'
268
* children[1]:
269
* type: 'disk'
270
* id: 1
271
* guid: 16002508084177980912
272
* path: '/dev/disk/by-vdev/L29'
273
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
274
* phys_path: 'L29'
275
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
276
* whole_disk: 0
277
* DTL: 1028
278
* create_txg: 4
279
* com.delphix:vdev_zap_leaf: 131
280
*
281
* If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
282
* the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
283
*/
284
if (physpath == NULL && path != NULL) {
285
/* If path begins with "/dev/disk/by-vdev/" ... */
286
if (strncmp(path, DEV_BYVDEV_PATH,
287
strlen(DEV_BYVDEV_PATH)) == 0) {
288
/* Set physpath to the char after "/dev/disk/by-vdev" */
289
physpath = &path[strlen(DEV_BYVDEV_PATH)];
290
}
291
}
292
293
/*
294
* We don't want to autoreplace offlined disks. However, we do want to
295
* replace force-faulted disks (`zpool offline -f`). Force-faulted
296
* disks have both offline=1 and faulted=1 in the nvlist.
297
*/
298
if (offline && !faulted) {
299
zed_log_msg(LOG_INFO, "%s: %s is offline, skip autoreplace",
300
__func__, path);
301
return;
302
}
303
304
is_mpath_wholedisk = is_mpath_whole_disk(path);
305
zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
306
" %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
307
"(guid %llu)",
308
zpool_get_name(zhp), path,
309
physpath ? physpath : "NULL",
310
wholedisk ? "is" : "not",
311
is_mpath_wholedisk? "is" : "not",
312
labeled ? "is" : "not",
313
enc_sysfs_path,
314
(long long unsigned int)guid);
315
316
/*
317
* The VDEV guid is preferred for identification (gets passed in path)
318
*/
319
if (guid != 0) {
320
(void) snprintf(fullpath, sizeof (fullpath), "%llu",
321
(long long unsigned int)guid);
322
} else {
323
/*
324
* otherwise use path sans partition suffix for whole disks
325
*/
326
(void) strlcpy(fullpath, path, sizeof (fullpath));
327
if (wholedisk) {
328
char *spath = zfs_strip_partition(fullpath);
329
if (!spath) {
330
zed_log_msg(LOG_INFO, "%s: Can't alloc",
331
__func__);
332
return;
333
}
334
335
(void) strlcpy(fullpath, spath, sizeof (fullpath));
336
free(spath);
337
}
338
}
339
340
if (is_spare)
341
online_flag |= ZFS_ONLINE_SPARE;
342
343
/*
344
* Attempt to online the device.
345
*/
346
if (zpool_vdev_online(zhp, fullpath, online_flag, &newstate) == 0 &&
347
(newstate == VDEV_STATE_HEALTHY ||
348
newstate == VDEV_STATE_DEGRADED)) {
349
zed_log_msg(LOG_INFO,
350
" zpool_vdev_online: vdev '%s' ('%s') is "
351
"%s", fullpath, physpath, (newstate == VDEV_STATE_HEALTHY) ?
352
"HEALTHY" : "DEGRADED");
353
return;
354
}
355
356
/*
357
* vdev_id alias rule for using scsi_debug devices (FMA automated
358
* testing)
359
*/
360
if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
361
is_sd = B_TRUE;
362
363
/*
364
* If the pool doesn't have the autoreplace property set, then use
365
* vdev online to trigger a FMA fault by posting an ereport.
366
*/
367
if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
368
!(wholedisk || is_mpath_wholedisk) || (physpath == NULL)) {
369
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
370
&newstate);
371
zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
372
"not a blank disk for '%s' ('%s')", fullpath,
373
physpath);
374
return;
375
}
376
377
/*
378
* Convert physical path into its current device node. Rawpath
379
* needs to be /dev/disk/by-vdev for a scsi_debug device since
380
* /dev/disk/by-path will not be present.
381
*/
382
(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
383
is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
384
385
if (realpath(rawpath, pathbuf) == NULL && !is_mpath_wholedisk) {
386
zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",
387
rawpath, strerror(errno));
388
389
int err = zpool_vdev_online(zhp, fullpath,
390
ZFS_ONLINE_FORCEFAULT, &newstate);
391
392
zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s) "
393
"err %d, new state %d",
394
fullpath, libzfs_error_description(g_zfshdl), err,
395
err ? (int)newstate : 0);
396
return;
397
}
398
399
/* Only autoreplace bad disks */
400
if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
401
(vs->vs_state != VDEV_STATE_FAULTED) &&
402
(vs->vs_state != VDEV_STATE_REMOVED) &&
403
(vs->vs_state != VDEV_STATE_CANT_OPEN)) {
404
zed_log_msg(LOG_INFO, " not autoreplacing since disk isn't in "
405
"a bad state (currently %llu)", vs->vs_state);
406
return;
407
}
408
409
nvlist_lookup_string(vdev, "new_devid", &new_devid);
410
if (is_mpath_wholedisk) {
411
/* Don't label device mapper or multipath disks. */
412
zed_log_msg(LOG_INFO,
413
" it's a multipath wholedisk, don't label");
414
rc = zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
415
&lines_cnt);
416
if (rc != 0) {
417
zed_log_msg(LOG_INFO,
418
" zpool_prepare_disk: could not "
419
"prepare '%s' (%s), path '%s', rc = %d", fullpath,
420
libzfs_error_description(g_zfshdl), path, rc);
421
if (lines_cnt > 0) {
422
zed_log_msg(LOG_INFO,
423
" zfs_prepare_disk output:");
424
lines_to_zed_log_msg(lines, lines_cnt);
425
}
426
libzfs_free_str_array(lines, lines_cnt);
427
return;
428
}
429
} else if (!labeled) {
430
/*
431
* we're auto-replacing a raw disk, so label it first
432
*/
433
char *leafname;
434
435
/*
436
* If this is a request to label a whole disk, then attempt to
437
* write out the label. Before we can label the disk, we need
438
* to map the physical string that was matched on to the under
439
* lying device node.
440
*
441
* If any part of this process fails, then do a force online
442
* to trigger a ZFS fault for the device (and any hot spare
443
* replacement).
444
*/
445
leafname = strrchr(pathbuf, '/') + 1;
446
447
/*
448
* If this is a request to label a whole disk, then attempt to
449
* write out the label.
450
*/
451
rc = zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
452
vdev, "autoreplace", &lines, &lines_cnt);
453
if (rc != 0) {
454
zed_log_msg(LOG_WARNING,
455
" zpool_prepare_and_label_disk: could not "
456
"label '%s' (%s), rc = %d", leafname,
457
libzfs_error_description(g_zfshdl), rc);
458
if (lines_cnt > 0) {
459
zed_log_msg(LOG_INFO,
460
" zfs_prepare_disk output:");
461
lines_to_zed_log_msg(lines, lines_cnt);
462
}
463
libzfs_free_str_array(lines, lines_cnt);
464
465
(void) zpool_vdev_online(zhp, fullpath,
466
ZFS_ONLINE_FORCEFAULT, &newstate);
467
return;
468
}
469
470
/*
471
* The disk labeling is asynchronous on Linux. Just record
472
* this label request and return as there will be another
473
* disk add event for the partition after the labeling is
474
* completed.
475
*/
476
device = malloc(sizeof (pendingdev_t));
477
if (device == NULL) {
478
perror("malloc");
479
exit(EXIT_FAILURE);
480
}
481
482
(void) strlcpy(device->pd_physpath, physpath,
483
sizeof (device->pd_physpath));
484
list_insert_tail(&g_device_list, device);
485
486
zed_log_msg(LOG_NOTICE, " zpool_label_disk: async '%s' (%llu)",
487
leafname, (u_longlong_t)guid);
488
489
return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
490
491
} else /* labeled */ {
492
boolean_t found = B_FALSE;
493
/*
494
* match up with request above to label the disk
495
*/
496
for (device = list_head(&g_device_list); device != NULL;
497
device = list_next(&g_device_list, device)) {
498
if (strcmp(physpath, device->pd_physpath) == 0) {
499
list_remove(&g_device_list, device);
500
free(device);
501
found = B_TRUE;
502
break;
503
}
504
zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
505
physpath, device->pd_physpath);
506
}
507
if (!found) {
508
/* unexpected partition slice encountered */
509
zed_log_msg(LOG_WARNING, "labeled disk %s was "
510
"unexpected here", fullpath);
511
(void) zpool_vdev_online(zhp, fullpath,
512
ZFS_ONLINE_FORCEFAULT, &newstate);
513
return;
514
}
515
516
zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
517
physpath, (u_longlong_t)guid);
518
519
/*
520
* Paths that begin with '/dev/disk/by-id/' will change and so
521
* they must be updated before calling zpool_vdev_attach().
522
*/
523
if (strncmp(path, DEV_BYID_PATH, strlen(DEV_BYID_PATH)) == 0) {
524
(void) snprintf(pathbuf, sizeof (pathbuf), "%s%s",
525
DEV_BYID_PATH, new_devid);
526
zed_log_msg(LOG_INFO, " zpool_label_disk: path '%s' "
527
"replaced by '%s'", path, pathbuf);
528
path = pathbuf;
529
}
530
}
531
532
libzfs_free_str_array(lines, lines_cnt);
533
534
/*
535
* Construct the root vdev to pass to zpool_vdev_attach(). While adding
536
* the entire vdev structure is harmless, we construct a reduced set of
537
* path/physpath/wholedisk to keep it simple.
538
*/
539
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
540
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
541
return;
542
}
543
if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
544
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
545
nvlist_free(nvroot);
546
return;
547
}
548
549
if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
550
nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
551
nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
552
(physpath != NULL && nvlist_add_string(newvd,
553
ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
554
(enc_sysfs_path != NULL && nvlist_add_string(newvd,
555
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
556
nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
557
nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
558
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
559
(const nvlist_t **)&newvd, 1) != 0) {
560
zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
561
nvlist_free(newvd);
562
nvlist_free(nvroot);
563
return;
564
}
565
566
nvlist_free(newvd);
567
568
/*
569
* Wait for udev to verify the links exist, then auto-replace
570
* the leaf disk at same physical location.
571
*/
572
if (zpool_label_disk_wait(path, DISK_LABEL_WAIT) != 0) {
573
zed_log_msg(LOG_WARNING, "zfs_mod: pool '%s', after labeling "
574
"replacement disk, the expected disk partition link '%s' "
575
"is missing after waiting %u ms",
576
zpool_get_name(zhp), path, DISK_LABEL_WAIT);
577
nvlist_free(nvroot);
578
return;
579
}
580
581
/*
582
* Prefer sequential resilvering when supported (mirrors and dRAID),
583
* otherwise fallback to a traditional healing resilver.
584
*/
585
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
586
if (ret != 0) {
587
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
588
B_TRUE, B_FALSE);
589
}
590
591
zed_log_msg(LOG_WARNING, " zpool_vdev_replace: %s with %s (%s)",
592
fullpath, path, (ret == 0) ? "no errors" :
593
libzfs_error_description(g_zfshdl));
594
595
nvlist_free(nvroot);
596
}
597
598
/*
599
* Utility functions to find a vdev matching given criteria.
600
*/
601
typedef struct dev_data {
602
const char *dd_compare;
603
const char *dd_prop;
604
zfs_process_func_t dd_func;
605
boolean_t dd_found;
606
boolean_t dd_islabeled;
607
uint64_t dd_pool_guid;
608
uint64_t dd_vdev_guid;
609
uint64_t dd_new_vdev_guid;
610
const char *dd_new_devid;
611
uint64_t dd_num_spares;
612
} dev_data_t;
613
614
static void
615
zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
616
{
617
dev_data_t *dp = data;
618
const char *path = NULL;
619
uint_t c, children;
620
nvlist_t **child;
621
uint64_t guid = 0;
622
uint64_t isspare = 0;
623
624
/*
625
* First iterate over any children.
626
*/
627
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
628
&child, &children) == 0) {
629
for (c = 0; c < children; c++)
630
zfs_iter_vdev(zhp, child[c], data);
631
}
632
633
/*
634
* Iterate over any spares and cache devices
635
*/
636
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
637
&child, &children) == 0) {
638
for (c = 0; c < children; c++)
639
zfs_iter_vdev(zhp, child[c], data);
640
}
641
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
642
&child, &children) == 0) {
643
for (c = 0; c < children; c++)
644
zfs_iter_vdev(zhp, child[c], data);
645
}
646
647
/* once a vdev was matched and processed there is nothing left to do */
648
if (dp->dd_found && dp->dd_num_spares == 0)
649
return;
650
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);
651
652
/*
653
* Match by GUID if available otherwise fallback to devid or physical
654
*/
655
if (dp->dd_vdev_guid != 0) {
656
if (guid != dp->dd_vdev_guid)
657
return;
658
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
659
dp->dd_found = B_TRUE;
660
661
} else if (dp->dd_compare != NULL) {
662
/*
663
* NOTE: On Linux there is an event for partition, so unlike
664
* illumos, substring matching is not required to accommodate
665
* the partition suffix. An exact match will be present in
666
* the dp->dd_compare value.
667
* If the attached disk already contains a vdev GUID, it means
668
* the disk is not clean. In such a scenario, the physical path
669
* would be a match that makes the disk faulted when trying to
670
* online it. So, we would only want to proceed if either GUID
671
* matches with the last attached disk or the disk is in clean
672
* state.
673
*/
674
if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
675
strcmp(dp->dd_compare, path) != 0) {
676
return;
677
}
678
if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {
679
zed_log_msg(LOG_INFO, " %s: no match (GUID:%llu"
680
" != vdev GUID:%llu)", __func__,
681
dp->dd_new_vdev_guid, guid);
682
return;
683
}
684
685
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
686
dp->dd_prop, path);
687
dp->dd_found = B_TRUE;
688
689
/* pass the new devid for use by auto-replacing code */
690
if (dp->dd_new_devid != NULL) {
691
(void) nvlist_add_string(nvl, "new_devid",
692
dp->dd_new_devid);
693
}
694
}
695
696
if (dp->dd_found == B_TRUE && nvlist_lookup_uint64(nvl,
697
ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
698
dp->dd_num_spares++;
699
700
(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
701
}
702
703
static void
704
zfs_enable_ds(void *arg)
705
{
706
unavailpool_t *pool = (unavailpool_t *)arg;
707
708
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0, 512);
709
zpool_close(pool->uap_zhp);
710
free(pool);
711
}
712
713
static int
714
zfs_iter_pool(zpool_handle_t *zhp, void *data)
715
{
716
nvlist_t *config, *nvl;
717
dev_data_t *dp = data;
718
uint64_t pool_guid;
719
unavailpool_t *pool;
720
721
zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
722
zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
723
724
/*
725
* For each vdev in this pool, look for a match to apply dd_func
726
*/
727
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
728
if (dp->dd_pool_guid == 0 ||
729
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
730
&pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
731
(void) nvlist_lookup_nvlist(config,
732
ZPOOL_CONFIG_VDEV_TREE, &nvl);
733
zfs_iter_vdev(zhp, nvl, data);
734
}
735
} else {
736
zed_log_msg(LOG_INFO, "%s: no config\n", __func__);
737
}
738
739
/*
740
* if this pool was originally unavailable,
741
* then enable its datasets asynchronously
742
*/
743
if (g_enumeration_done) {
744
for (pool = list_head(&g_pool_list); pool != NULL;
745
pool = list_next(&g_pool_list, pool)) {
746
747
if (strcmp(zpool_get_name(zhp),
748
zpool_get_name(pool->uap_zhp)))
749
continue;
750
if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
751
list_remove(&g_pool_list, pool);
752
(void) tpool_dispatch(g_tpool, zfs_enable_ds,
753
pool);
754
break;
755
}
756
}
757
}
758
759
zpool_close(zhp);
760
761
/* cease iteration after a match */
762
return (dp->dd_found && dp->dd_num_spares == 0);
763
}
764
765
/*
766
* Given a physical device location, iterate over all
767
* (pool, vdev) pairs which correspond to that location.
768
*/
769
static boolean_t
770
devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
771
boolean_t is_slice, uint64_t new_vdev_guid)
772
{
773
dev_data_t data = { 0 };
774
775
data.dd_compare = physical;
776
data.dd_func = func;
777
data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
778
data.dd_found = B_FALSE;
779
data.dd_islabeled = is_slice;
780
data.dd_new_devid = devid; /* used by auto replace code */
781
data.dd_new_vdev_guid = new_vdev_guid;
782
783
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
784
785
return (data.dd_found);
786
}
787
788
/*
789
* Given a device identifier, find any vdevs with a matching by-vdev
790
* path. Normally we shouldn't need this as the comparison would be
791
* made earlier in the devphys_iter(). For example, if we were replacing
792
* /dev/disk/by-vdev/L28, normally devphys_iter() would match the
793
* ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
794
* of the new disk config. However, we've seen cases where
795
* ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk. Here's
796
* an example of a real 2-disk mirror pool where one disk was force
797
* faulted:
798
*
799
* com.delphix:vdev_zap_top: 129
800
* children[0]:
801
* type: 'disk'
802
* id: 0
803
* guid: 14309659774640089719
804
* path: '/dev/disk/by-vdev/L28'
805
* whole_disk: 0
806
* DTL: 654
807
* create_txg: 4
808
* com.delphix:vdev_zap_leaf: 1161
809
* faulted: 1
810
* aux_state: 'external'
811
* children[1]:
812
* type: 'disk'
813
* id: 1
814
* guid: 16002508084177980912
815
* path: '/dev/disk/by-vdev/L29'
816
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
817
* phys_path: 'L29'
818
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
819
* whole_disk: 0
820
* DTL: 1028
821
* create_txg: 4
822
* com.delphix:vdev_zap_leaf: 131
823
*
824
* So in the case above, the only thing we could compare is the path.
825
*
826
* We can do this because we assume by-vdev paths are authoritative as physical
827
* paths. We could not assume this for normal paths like /dev/sda since the
828
* physical location /dev/sda points to could change over time.
829
*/
830
static boolean_t
831
by_vdev_path_iter(const char *by_vdev_path, const char *devid,
832
zfs_process_func_t func, boolean_t is_slice)
833
{
834
dev_data_t data = { 0 };
835
836
data.dd_compare = by_vdev_path;
837
data.dd_func = func;
838
data.dd_prop = ZPOOL_CONFIG_PATH;
839
data.dd_found = B_FALSE;
840
data.dd_islabeled = is_slice;
841
data.dd_new_devid = devid;
842
843
if (strncmp(by_vdev_path, DEV_BYVDEV_PATH,
844
strlen(DEV_BYVDEV_PATH)) != 0) {
845
/* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
846
return (B_FALSE);
847
}
848
849
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
850
851
return (data.dd_found);
852
}
853
854
/*
855
* Given a device identifier, find any vdevs with a matching devid.
856
* On Linux we can match devid directly which is always a whole disk.
857
*/
858
static boolean_t
859
devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
860
{
861
dev_data_t data = { 0 };
862
863
data.dd_compare = devid;
864
data.dd_func = func;
865
data.dd_prop = ZPOOL_CONFIG_DEVID;
866
data.dd_found = B_FALSE;
867
data.dd_islabeled = is_slice;
868
data.dd_new_devid = devid;
869
870
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
871
872
return (data.dd_found);
873
}
874
875
/*
876
* Given a device guid, find any vdevs with a matching guid.
877
*/
878
static boolean_t
879
guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,
880
zfs_process_func_t func, boolean_t is_slice)
881
{
882
dev_data_t data = { 0 };
883
884
data.dd_func = func;
885
data.dd_found = B_FALSE;
886
data.dd_pool_guid = pool_guid;
887
data.dd_vdev_guid = vdev_guid;
888
data.dd_islabeled = is_slice;
889
data.dd_new_devid = devid;
890
891
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
892
893
return (data.dd_found);
894
}
895
896
/*
897
* Handle a EC_DEV_ADD.ESC_DISK event.
898
*
899
* illumos
900
* Expects: DEV_PHYS_PATH string in schema
901
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
902
*
903
* path: '/dev/dsk/c0t1d0s0' (persistent)
904
* devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
905
* phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
906
*
907
* linux
908
* provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
909
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
910
*
911
* path: '/dev/sdc1' (not persistent)
912
* devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
913
* phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
914
*/
915
static int
916
zfs_deliver_add(nvlist_t *nvl)
917
{
918
const char *devpath = NULL, *devid = NULL;
919
uint64_t pool_guid = 0, vdev_guid = 0;
920
boolean_t is_slice;
921
922
/*
923
* Expecting a devid string and an optional physical location and guid
924
*/
925
if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0) {
926
zed_log_msg(LOG_INFO, "%s: no dev identifier\n", __func__);
927
return (-1);
928
}
929
930
(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
931
(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
932
(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
933
934
is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
935
936
zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
937
devid, devpath ? devpath : "NULL", is_slice);
938
939
/*
940
* Iterate over all vdevs looking for a match in the following order:
941
* 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
942
* 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
943
* 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
944
* 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
945
* by-vdev paths represent physical paths).
946
*/
947
if (devid_iter(devid, zfs_process_add, is_slice))
948
return (0);
949
if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
950
is_slice, vdev_guid))
951
return (0);
952
if (vdev_guid != 0)
953
(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
954
is_slice);
955
956
if (devpath != NULL) {
957
/* Can we match a /dev/disk/by-vdev/ path? */
958
char by_vdev_path[MAXPATHLEN];
959
snprintf(by_vdev_path, sizeof (by_vdev_path),
960
"/dev/disk/by-vdev/%s", devpath);
961
if (by_vdev_path_iter(by_vdev_path, devid, zfs_process_add,
962
is_slice))
963
return (0);
964
}
965
966
return (0);
967
}
968
969
/*
970
* Called when we receive a VDEV_CHECK event, which indicates a device could not
971
* be opened during initial pool open, but the autoreplace property was set on
972
* the pool. In this case, we treat it as if it were an add event.
973
*/
974
static int
975
zfs_deliver_check(nvlist_t *nvl)
976
{
977
dev_data_t data = { 0 };
978
979
if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
980
&data.dd_pool_guid) != 0 ||
981
nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
982
&data.dd_vdev_guid) != 0 ||
983
data.dd_vdev_guid == 0)
984
return (0);
985
986
zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
987
data.dd_pool_guid, data.dd_vdev_guid);
988
989
data.dd_func = zfs_process_add;
990
991
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
992
993
return (0);
994
}
995
996
/*
997
* Given a path to a vdev, lookup the vdev's physical size from its
998
* config nvlist.
999
*
1000
* Returns the vdev's physical size in bytes on success, 0 on error.
1001
*/
1002
static uint64_t
1003
vdev_size_from_config(zpool_handle_t *zhp, const char *vdev_path)
1004
{
1005
nvlist_t *nvl = NULL;
1006
boolean_t avail_spare, l2cache, log;
1007
vdev_stat_t *vs = NULL;
1008
uint_t c;
1009
1010
nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
1011
if (!nvl)
1012
return (0);
1013
1014
verify(nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,
1015
(uint64_t **)&vs, &c) == 0);
1016
if (!vs) {
1017
zed_log_msg(LOG_INFO, "%s: no nvlist for '%s'", __func__,
1018
vdev_path);
1019
return (0);
1020
}
1021
1022
return (vs->vs_pspace);
1023
}
1024
1025
/*
1026
* Given a path to a vdev, lookup if the vdev is a "whole disk" in the
1027
* config nvlist. "whole disk" means that ZFS was passed a whole disk
1028
* at pool creation time, which it partitioned up and has full control over.
1029
* Thus a partition with wholedisk=1 set tells us that zfs created the
1030
* partition at creation time. A partition without whole disk set would have
1031
* been created by externally (like with fdisk) and passed to ZFS.
1032
*
1033
* Returns the whole disk value (either 0 or 1).
1034
*/
1035
static uint64_t
1036
vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)
1037
{
1038
nvlist_t *nvl = NULL;
1039
boolean_t avail_spare, l2cache, log;
1040
uint64_t wholedisk = 0;
1041
1042
nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
1043
if (!nvl)
1044
return (0);
1045
1046
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
1047
1048
return (wholedisk);
1049
}
1050
1051
/*
1052
* If the device size grew more than 1% then return true.
1053
*/
1054
#define DEVICE_GREW(oldsize, newsize) \
1055
((newsize > oldsize) && \
1056
((newsize / (newsize - oldsize)) <= 100))
1057
1058
static int
1059
zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
1060
{
1061
boolean_t avail_spare, l2cache;
1062
nvlist_t *udev_nvl = data;
1063
nvlist_t *tgt;
1064
int error;
1065
1066
const char *tmp_devname;
1067
char devname[MAXPATHLEN] = "";
1068
uint64_t guid;
1069
1070
if (nvlist_lookup_uint64(udev_nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
1071
sprintf(devname, "%llu", (u_longlong_t)guid);
1072
} else if (nvlist_lookup_string(udev_nvl, DEV_PHYS_PATH,
1073
&tmp_devname) == 0) {
1074
strlcpy(devname, tmp_devname, MAXPATHLEN);
1075
zfs_append_partition(devname, MAXPATHLEN);
1076
} else {
1077
zed_log_msg(LOG_INFO, "%s: no guid or physpath", __func__);
1078
}
1079
1080
zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
1081
devname, zpool_get_name(zhp));
1082
1083
if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
1084
&avail_spare, &l2cache, NULL)) != NULL) {
1085
const char *path;
1086
char fullpath[MAXPATHLEN];
1087
uint64_t wholedisk = 0;
1088
1089
error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
1090
if (error) {
1091
zpool_close(zhp);
1092
return (0);
1093
}
1094
1095
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
1096
&wholedisk);
1097
1098
if (wholedisk) {
1099
char *tmp;
1100
path = strrchr(path, '/');
1101
if (path != NULL) {
1102
tmp = zfs_strip_partition(path + 1);
1103
if (tmp == NULL) {
1104
zpool_close(zhp);
1105
return (0);
1106
}
1107
} else {
1108
zpool_close(zhp);
1109
return (0);
1110
}
1111
1112
(void) strlcpy(fullpath, tmp, sizeof (fullpath));
1113
free(tmp);
1114
1115
/*
1116
* We need to reopen the pool associated with this
1117
* device so that the kernel can update the size of
1118
* the expanded device. When expanding there is no
1119
* need to restart the scrub from the beginning.
1120
*/
1121
boolean_t scrub_restart = B_FALSE;
1122
(void) zpool_reopen_one(zhp, &scrub_restart);
1123
} else {
1124
(void) strlcpy(fullpath, path, sizeof (fullpath));
1125
}
1126
1127
if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
1128
vdev_state_t newstate;
1129
1130
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
1131
/*
1132
* If this disk size has not changed, then
1133
* there's no need to do an autoexpand. To
1134
* check we look at the disk's size in its
1135
* config, and compare it to the disk size
1136
* that udev is reporting.
1137
*/
1138
uint64_t udev_size = 0, conf_size = 0,
1139
wholedisk = 0, udev_parent_size = 0;
1140
1141
/*
1142
* Get the size of our disk that udev is
1143
* reporting.
1144
*/
1145
if (nvlist_lookup_uint64(udev_nvl, DEV_SIZE,
1146
&udev_size) != 0) {
1147
udev_size = 0;
1148
}
1149
1150
/*
1151
* Get the size of our disk's parent device
1152
* from udev (where sda1's parent is sda).
1153
*/
1154
if (nvlist_lookup_uint64(udev_nvl,
1155
DEV_PARENT_SIZE, &udev_parent_size) != 0) {
1156
udev_parent_size = 0;
1157
}
1158
1159
conf_size = vdev_size_from_config(zhp,
1160
fullpath);
1161
1162
wholedisk = vdev_whole_disk_from_config(zhp,
1163
fullpath);
1164
1165
/*
1166
* Only attempt an autoexpand if the vdev size
1167
* changed. There are two different cases
1168
* to consider.
1169
*
1170
* 1. wholedisk=1
1171
* If you do a 'zpool create' on a whole disk
1172
* (like /dev/sda), then zfs will create
1173
* partitions on the disk (like /dev/sda1). In
1174
* that case, wholedisk=1 will be set in the
1175
* partition's nvlist config. So zed will need
1176
* to see if your parent device (/dev/sda)
1177
* expanded in size, and if so, then attempt
1178
* the autoexpand.
1179
*
1180
* 2. wholedisk=0
1181
* If you do a 'zpool create' on an existing
1182
* partition, or a device that doesn't allow
1183
* partitions, then wholedisk=0, and you will
1184
* simply need to check if the device itself
1185
* expanded in size.
1186
*/
1187
if (DEVICE_GREW(conf_size, udev_size) ||
1188
(wholedisk && DEVICE_GREW(conf_size,
1189
udev_parent_size))) {
1190
error = zpool_vdev_online(zhp, fullpath,
1191
0, &newstate);
1192
1193
zed_log_msg(LOG_INFO,
1194
"%s: autoexpanding '%s' from %llu"
1195
" to %llu bytes in pool '%s': %d",
1196
__func__, fullpath, conf_size,
1197
MAX(udev_size, udev_parent_size),
1198
zpool_get_name(zhp), error);
1199
}
1200
}
1201
}
1202
zpool_close(zhp);
1203
return (1);
1204
}
1205
zpool_close(zhp);
1206
return (0);
1207
}
1208
1209
/*
1210
* This function handles the ESC_DEV_DLE device change event. Use the
1211
* provided vdev guid when looking up a disk or partition, when the guid
1212
* is not present assume the entire disk is owned by ZFS and append the
1213
* expected -part1 partition information then lookup by physical path.
1214
*/
1215
static int
1216
zfs_deliver_dle(nvlist_t *nvl)
1217
{
1218
const char *devname;
1219
char name[MAXPATHLEN];
1220
uint64_t guid;
1221
1222
if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
1223
sprintf(name, "%llu", (u_longlong_t)guid);
1224
} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
1225
strlcpy(name, devname, MAXPATHLEN);
1226
zfs_append_partition(name, MAXPATHLEN);
1227
} else {
1228
sprintf(name, "unknown");
1229
zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
1230
}
1231
1232
if (zpool_iter(g_zfshdl, zfsdle_vdev_online, nvl) != 1) {
1233
zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
1234
"found", name);
1235
return (1);
1236
}
1237
1238
return (0);
1239
}
1240
1241
/*
1242
* syseventd daemon module event handler
1243
*
1244
* Handles syseventd daemon zfs device related events:
1245
*
1246
* EC_DEV_ADD.ESC_DISK
1247
* EC_DEV_STATUS.ESC_DEV_DLE
1248
* EC_ZFS.ESC_ZFS_VDEV_CHECK
1249
*
1250
* Note: assumes only one thread active at a time (not thread safe)
1251
*/
1252
static int
1253
zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
1254
{
1255
int ret;
1256
boolean_t is_check = B_FALSE, is_dle = B_FALSE;
1257
1258
if (strcmp(class, EC_DEV_ADD) == 0) {
1259
/*
1260
* We're mainly interested in disk additions, but we also listen
1261
* for new loop devices, to allow for simplified testing.
1262
*/
1263
if (strcmp(subclass, ESC_DISK) != 0 &&
1264
strcmp(subclass, ESC_LOFI) != 0)
1265
return (0);
1266
1267
is_check = B_FALSE;
1268
} else if (strcmp(class, EC_ZFS) == 0 &&
1269
strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
1270
/*
1271
* This event signifies that a device failed to open
1272
* during pool load, but the 'autoreplace' property was
1273
* set, so we should pretend it's just been added.
1274
*/
1275
is_check = B_TRUE;
1276
} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
1277
strcmp(subclass, ESC_DEV_DLE) == 0) {
1278
is_dle = B_TRUE;
1279
} else {
1280
return (0);
1281
}
1282
1283
if (is_dle)
1284
ret = zfs_deliver_dle(nvl);
1285
else if (is_check)
1286
ret = zfs_deliver_check(nvl);
1287
else
1288
ret = zfs_deliver_add(nvl);
1289
1290
return (ret);
1291
}
1292
1293
static void *
1294
zfs_enum_pools(void *arg)
1295
{
1296
(void) arg;
1297
1298
(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
1299
/*
1300
* Linux - instead of using a thread pool, each list entry
1301
* will spawn a thread when an unavailable pool transitions
1302
* to available. zfs_slm_fini will wait for these threads.
1303
*/
1304
g_enumeration_done = B_TRUE;
1305
return (NULL);
1306
}
1307
1308
/*
1309
* called from zed daemon at startup
1310
*
1311
* sent messages from zevents or udev monitor
1312
*
1313
* For now, each agent has its own libzfs instance
1314
*/
1315
int
1316
zfs_slm_init(void)
1317
{
1318
if ((g_zfshdl = libzfs_init()) == NULL)
1319
return (-1);
1320
1321
/*
1322
* collect a list of unavailable pools (asynchronously,
1323
* since this can take a while)
1324
*/
1325
list_create(&g_pool_list, sizeof (struct unavailpool),
1326
offsetof(struct unavailpool, uap_node));
1327
1328
if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
1329
list_destroy(&g_pool_list);
1330
libzfs_fini(g_zfshdl);
1331
return (-1);
1332
}
1333
1334
pthread_setname_np(g_zfs_tid, "enum-pools");
1335
list_create(&g_device_list, sizeof (struct pendingdev),
1336
offsetof(struct pendingdev, pd_node));
1337
1338
return (0);
1339
}
1340
1341
void
1342
zfs_slm_fini(void)
1343
{
1344
unavailpool_t *pool;
1345
pendingdev_t *device;
1346
1347
/* wait for zfs_enum_pools thread to complete */
1348
(void) pthread_join(g_zfs_tid, NULL);
1349
/* destroy the thread pool */
1350
if (g_tpool != NULL) {
1351
tpool_wait(g_tpool);
1352
tpool_destroy(g_tpool);
1353
}
1354
1355
while ((pool = list_remove_head(&g_pool_list)) != NULL) {
1356
zpool_close(pool->uap_zhp);
1357
free(pool);
1358
}
1359
list_destroy(&g_pool_list);
1360
1361
while ((device = list_remove_head(&g_device_list)) != NULL)
1362
free(device);
1363
list_destroy(&g_device_list);
1364
1365
libzfs_fini(g_zfshdl);
1366
}
1367
1368
void
1369
zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
1370
{
1371
zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
1372
(void) zfs_slm_deliver_event(class, subclass, nvl);
1373
}
1374
1375