Path: blob/main/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
48529 views
// SPDX-License-Identifier: CDDL-1.01/*2* CDDL HEADER START3*4* The contents of this file are subject to the terms of the5* Common Development and Distribution License (the "License").6* You may not use this file except in compliance with the License.7*8* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE9* or https://opensource.org/licenses/CDDL-1.0.10* See the License for the specific language governing permissions11* and limitations under the License.12*13* When distributing Covered Code, include this CDDL HEADER in each14* file and include the License file at usr/src/OPENSOLARIS.LICENSE.15* If applicable, add the following below this CDDL HEADER, with the16* fields enclosed by brackets "[]" replaced with your own identifying17* information: Portions Copyright [yyyy] [name of copyright owner]18*19* CDDL HEADER END20*/21/*22* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.23* Copyright (c) 2012 by Delphix. All rights reserved.24* Copyright 2014 Nexenta Systems, Inc. All rights reserved.25* Copyright (c) 2016, 2017, Intel Corporation.26* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.27* Copyright (c) 2023, Klara Inc.28*/2930/*31* ZFS syseventd module.32*33* file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c34*35* The purpose of this module is to identify when devices are added to the36* system, and appropriately online or replace the affected vdevs.37*38* When a device is added to the system:39*40* 1. Search for any vdevs whose devid matches that of the newly added41* device.42*43* 2. If no vdevs are found, then search for any vdevs whose udev path44* matches that of the new device.45*46* 3. If no vdevs match by either method, then ignore the event.47*48* 4. Attempt to online the device with a flag to indicate that it should49* be unspared when resilvering completes. If this succeeds, then the50* same device was inserted and we should continue normally.51*52* 5. If the pool does not have the 'autoreplace' property set, attempt to53* online the device again without the unspare flag, which will54* generate a FMA fault.55*56* 6. If the pool has the 'autoreplace' property set, and the matching vdev57* is a whole disk, then label the new disk and attempt a 'zpool58* replace'.59*60* The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK61* event indicates that a device failed to open during pool load, but the62* autoreplace property was set. In this case, we deferred the associated63* FMA fault until our module had a chance to process the autoreplace logic.64* If the device could not be replaced, then the second online attempt will65* trigger the FMA fault that we skipped earlier.66*67* On Linux udev provides a disk insert for both the disk and the partition.68*/6970#include <ctype.h>71#include <fcntl.h>72#include <libnvpair.h>73#include <libzfs.h>74#include <libzutil.h>75#include <limits.h>76#include <stddef.h>77#include <stdlib.h>78#include <string.h>79#include <syslog.h>80#include <sys/list.h>81#include <sys/sunddi.h>82#include <sys/sysevent/eventdefs.h>83#include <sys/sysevent/dev.h>84#include <thread_pool.h>85#include <pthread.h>86#include <unistd.h>87#include <errno.h>88#include "zfs_agents.h"89#include "../zed_log.h"9091#define DEV_BYID_PATH "/dev/disk/by-id/"92#define DEV_BYPATH_PATH "/dev/disk/by-path/"93#define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"9495typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);9697libzfs_handle_t *g_zfshdl;98list_t g_pool_list; /* list of unavailable pools at initialization */99list_t g_device_list; /* list of disks with asynchronous label request */100tpool_t *g_tpool;101boolean_t g_enumeration_done;102pthread_t g_zfs_tid; /* zfs_enum_pools() thread */103104typedef struct unavailpool {105zpool_handle_t *uap_zhp;106list_node_t uap_node;107} unavailpool_t;108109typedef struct pendingdev {110char pd_physpath[128];111list_node_t pd_node;112} pendingdev_t;113114static int115zfs_toplevel_state(zpool_handle_t *zhp)116{117nvlist_t *nvroot;118vdev_stat_t *vs;119unsigned int c;120121verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),122ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);123verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,124(uint64_t **)&vs, &c) == 0);125return (vs->vs_state);126}127128static int129zfs_unavail_pool(zpool_handle_t *zhp, void *data)130{131zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",132zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));133134if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {135unavailpool_t *uap;136uap = malloc(sizeof (unavailpool_t));137if (uap == NULL) {138perror("malloc");139exit(EXIT_FAILURE);140}141142uap->uap_zhp = zhp;143list_insert_tail((list_t *)data, uap);144} else {145zpool_close(zhp);146}147return (0);148}149150/*151* Write an array of strings to the zed log152*/153static void lines_to_zed_log_msg(char **lines, int lines_cnt)154{155int i;156for (i = 0; i < lines_cnt; i++) {157zed_log_msg(LOG_INFO, "%s", lines[i]);158}159}160161/*162* Two stage replace on Linux163* since we get disk notifications164* we can wait for partitioned disk slice to show up!165*166* First stage tags the disk, initiates async partitioning, and returns167* Second stage finds the tag and proceeds to ZFS labeling/replace168*169* disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach170*171* 1. physical match with no fs, no partition172* tag it top, partition disk173*174* 2. physical match again, see partition and tag175*176*/177178/*179* The device associated with the given vdev (either by devid or physical path)180* has been added to the system. If 'isdisk' is set, then we only attempt a181* replacement if it's a whole disk. This also implies that we should label the182* disk first.183*184* First, we attempt to online the device (making sure to undo any spare185* operation when finished). If this succeeds, then we're done. If it fails,186* and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,187* but that the label was not what we expected. If the 'autoreplace' property188* is enabled, then we relabel the disk (if specified), and attempt a 'zpool189* replace'. If the online is successful, but the new state is something else190* (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of191* race, and we should avoid attempting to relabel the disk.192*193* Also can arrive here from a ESC_ZFS_VDEV_CHECK event194*/195static void196zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)197{198const char *path;199vdev_state_t newstate;200nvlist_t *nvroot, *newvd;201pendingdev_t *device;202uint64_t wholedisk = 0ULL;203uint64_t offline = 0ULL, faulted = 0ULL;204uint64_t guid = 0ULL;205uint64_t is_spare = 0;206const char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;207char rawpath[PATH_MAX], fullpath[PATH_MAX];208char pathbuf[PATH_MAX];209int ret;210int online_flag = ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE;211boolean_t is_sd = B_FALSE;212boolean_t is_mpath_wholedisk = B_FALSE;213uint_t c;214vdev_stat_t *vs;215char **lines = NULL;216int lines_cnt = 0;217int rc;218219/*220* Get the persistent path, typically under the '/dev/disk/by-id' or221* '/dev/disk/by-vdev' directories. Note that this path can change222* when a vdev is replaced with a new disk.223*/224if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)225return;226227/* Skip healthy disks */228verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,229(uint64_t **)&vs, &c) == 0);230if (vs->vs_state == VDEV_STATE_HEALTHY) {231zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",232__func__, path);233return;234}235236(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);237238update_vdev_config_dev_sysfs_path(vdev, path,239ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);240(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,241&enc_sysfs_path);242243(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);244(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);245(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);246247(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);248(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_IS_SPARE, &is_spare);249250/*251* Special case:252*253* We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH254* entry in their config. For example, on this force-faulted disk:255*256* children[0]:257* type: 'disk'258* id: 0259* guid: 14309659774640089719260* path: '/dev/disk/by-vdev/L28'261* whole_disk: 0262* DTL: 654263* create_txg: 4264* com.delphix:vdev_zap_leaf: 1161265* faulted: 1266* aux_state: 'external'267* children[1]:268* type: 'disk'269* id: 1270* guid: 16002508084177980912271* path: '/dev/disk/by-vdev/L29'272* devid: 'dm-uuid-mpath-35000c500a61d68a3'273* phys_path: 'L29'274* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'275* whole_disk: 0276* DTL: 1028277* create_txg: 4278* com.delphix:vdev_zap_leaf: 131279*280* If the disk's path is a /dev/disk/by-vdev/ path, then we can infer281* the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.282*/283if (physpath == NULL && path != NULL) {284/* If path begins with "/dev/disk/by-vdev/" ... */285if (strncmp(path, DEV_BYVDEV_PATH,286strlen(DEV_BYVDEV_PATH)) == 0) {287/* Set physpath to the char after "/dev/disk/by-vdev" */288physpath = &path[strlen(DEV_BYVDEV_PATH)];289}290}291292/*293* We don't want to autoreplace offlined disks. However, we do want to294* replace force-faulted disks (`zpool offline -f`). Force-faulted295* disks have both offline=1 and faulted=1 in the nvlist.296*/297if (offline && !faulted) {298zed_log_msg(LOG_INFO, "%s: %s is offline, skip autoreplace",299__func__, path);300return;301}302303is_mpath_wholedisk = is_mpath_whole_disk(path);304zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"305" %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "306"(guid %llu)",307zpool_get_name(zhp), path,308physpath ? physpath : "NULL",309wholedisk ? "is" : "not",310is_mpath_wholedisk? "is" : "not",311labeled ? "is" : "not",312enc_sysfs_path,313(long long unsigned int)guid);314315/*316* The VDEV guid is preferred for identification (gets passed in path)317*/318if (guid != 0) {319(void) snprintf(fullpath, sizeof (fullpath), "%llu",320(long long unsigned int)guid);321} else {322/*323* otherwise use path sans partition suffix for whole disks324*/325(void) strlcpy(fullpath, path, sizeof (fullpath));326if (wholedisk) {327char *spath = zfs_strip_partition(fullpath);328if (!spath) {329zed_log_msg(LOG_INFO, "%s: Can't alloc",330__func__);331return;332}333334(void) strlcpy(fullpath, spath, sizeof (fullpath));335free(spath);336}337}338339if (is_spare)340online_flag |= ZFS_ONLINE_SPARE;341342/*343* Attempt to online the device.344*/345if (zpool_vdev_online(zhp, fullpath, online_flag, &newstate) == 0 &&346(newstate == VDEV_STATE_HEALTHY ||347newstate == VDEV_STATE_DEGRADED)) {348zed_log_msg(LOG_INFO,349" zpool_vdev_online: vdev '%s' ('%s') is "350"%s", fullpath, physpath, (newstate == VDEV_STATE_HEALTHY) ?351"HEALTHY" : "DEGRADED");352return;353}354355/*356* vdev_id alias rule for using scsi_debug devices (FMA automated357* testing)358*/359if (physpath != NULL && strcmp("scsidebug", physpath) == 0)360is_sd = B_TRUE;361362/*363* If the pool doesn't have the autoreplace property set, then use364* vdev online to trigger a FMA fault by posting an ereport.365*/366if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||367!(wholedisk || is_mpath_wholedisk) || (physpath == NULL)) {368(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,369&newstate);370zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "371"not a blank disk for '%s' ('%s')", fullpath,372physpath);373return;374}375376/*377* Convert physical path into its current device node. Rawpath378* needs to be /dev/disk/by-vdev for a scsi_debug device since379* /dev/disk/by-path will not be present.380*/381(void) snprintf(rawpath, sizeof (rawpath), "%s%s",382is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);383384if (realpath(rawpath, pathbuf) == NULL && !is_mpath_wholedisk) {385zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",386rawpath, strerror(errno));387388int err = zpool_vdev_online(zhp, fullpath,389ZFS_ONLINE_FORCEFAULT, &newstate);390391zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s) "392"err %d, new state %d",393fullpath, libzfs_error_description(g_zfshdl), err,394err ? (int)newstate : 0);395return;396}397398/* Only autoreplace bad disks */399if ((vs->vs_state != VDEV_STATE_DEGRADED) &&400(vs->vs_state != VDEV_STATE_FAULTED) &&401(vs->vs_state != VDEV_STATE_REMOVED) &&402(vs->vs_state != VDEV_STATE_CANT_OPEN)) {403zed_log_msg(LOG_INFO, " not autoreplacing since disk isn't in "404"a bad state (currently %llu)", vs->vs_state);405return;406}407408nvlist_lookup_string(vdev, "new_devid", &new_devid);409if (is_mpath_wholedisk) {410/* Don't label device mapper or multipath disks. */411zed_log_msg(LOG_INFO,412" it's a multipath wholedisk, don't label");413rc = zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,414&lines_cnt);415if (rc != 0) {416zed_log_msg(LOG_INFO,417" zpool_prepare_disk: could not "418"prepare '%s' (%s), path '%s', rc = %d", fullpath,419libzfs_error_description(g_zfshdl), path, rc);420if (lines_cnt > 0) {421zed_log_msg(LOG_INFO,422" zfs_prepare_disk output:");423lines_to_zed_log_msg(lines, lines_cnt);424}425libzfs_free_str_array(lines, lines_cnt);426return;427}428} else if (!labeled) {429/*430* we're auto-replacing a raw disk, so label it first431*/432char *leafname;433434/*435* If this is a request to label a whole disk, then attempt to436* write out the label. Before we can label the disk, we need437* to map the physical string that was matched on to the under438* lying device node.439*440* If any part of this process fails, then do a force online441* to trigger a ZFS fault for the device (and any hot spare442* replacement).443*/444leafname = strrchr(pathbuf, '/') + 1;445446/*447* If this is a request to label a whole disk, then attempt to448* write out the label.449*/450rc = zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,451vdev, "autoreplace", &lines, &lines_cnt);452if (rc != 0) {453zed_log_msg(LOG_WARNING,454" zpool_prepare_and_label_disk: could not "455"label '%s' (%s), rc = %d", leafname,456libzfs_error_description(g_zfshdl), rc);457if (lines_cnt > 0) {458zed_log_msg(LOG_INFO,459" zfs_prepare_disk output:");460lines_to_zed_log_msg(lines, lines_cnt);461}462libzfs_free_str_array(lines, lines_cnt);463464(void) zpool_vdev_online(zhp, fullpath,465ZFS_ONLINE_FORCEFAULT, &newstate);466return;467}468469/*470* The disk labeling is asynchronous on Linux. Just record471* this label request and return as there will be another472* disk add event for the partition after the labeling is473* completed.474*/475device = malloc(sizeof (pendingdev_t));476if (device == NULL) {477perror("malloc");478exit(EXIT_FAILURE);479}480481(void) strlcpy(device->pd_physpath, physpath,482sizeof (device->pd_physpath));483list_insert_tail(&g_device_list, device);484485zed_log_msg(LOG_NOTICE, " zpool_label_disk: async '%s' (%llu)",486leafname, (u_longlong_t)guid);487488return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */489490} else /* labeled */ {491boolean_t found = B_FALSE;492/*493* match up with request above to label the disk494*/495for (device = list_head(&g_device_list); device != NULL;496device = list_next(&g_device_list, device)) {497if (strcmp(physpath, device->pd_physpath) == 0) {498list_remove(&g_device_list, device);499free(device);500found = B_TRUE;501break;502}503zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",504physpath, device->pd_physpath);505}506if (!found) {507/* unexpected partition slice encountered */508zed_log_msg(LOG_WARNING, "labeled disk %s was "509"unexpected here", fullpath);510(void) zpool_vdev_online(zhp, fullpath,511ZFS_ONLINE_FORCEFAULT, &newstate);512return;513}514515zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",516physpath, (u_longlong_t)guid);517518/*519* Paths that begin with '/dev/disk/by-id/' will change and so520* they must be updated before calling zpool_vdev_attach().521*/522if (strncmp(path, DEV_BYID_PATH, strlen(DEV_BYID_PATH)) == 0) {523(void) snprintf(pathbuf, sizeof (pathbuf), "%s%s",524DEV_BYID_PATH, new_devid);525zed_log_msg(LOG_INFO, " zpool_label_disk: path '%s' "526"replaced by '%s'", path, pathbuf);527path = pathbuf;528}529}530531libzfs_free_str_array(lines, lines_cnt);532533/*534* Construct the root vdev to pass to zpool_vdev_attach(). While adding535* the entire vdev structure is harmless, we construct a reduced set of536* path/physpath/wholedisk to keep it simple.537*/538if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {539zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");540return;541}542if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {543zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");544nvlist_free(nvroot);545return;546}547548if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||549nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||550nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||551(physpath != NULL && nvlist_add_string(newvd,552ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||553(enc_sysfs_path != NULL && nvlist_add_string(newvd,554ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||555nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||556nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||557nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,558(const nvlist_t **)&newvd, 1) != 0) {559zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");560nvlist_free(newvd);561nvlist_free(nvroot);562return;563}564565nvlist_free(newvd);566567/*568* Wait for udev to verify the links exist, then auto-replace569* the leaf disk at same physical location.570*/571if (zpool_label_disk_wait(path, DISK_LABEL_WAIT) != 0) {572zed_log_msg(LOG_WARNING, "zfs_mod: pool '%s', after labeling "573"replacement disk, the expected disk partition link '%s' "574"is missing after waiting %u ms",575zpool_get_name(zhp), path, DISK_LABEL_WAIT);576nvlist_free(nvroot);577return;578}579580/*581* Prefer sequential resilvering when supported (mirrors and dRAID),582* otherwise fallback to a traditional healing resilver.583*/584ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);585if (ret != 0) {586ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,587B_TRUE, B_FALSE);588}589590zed_log_msg(LOG_WARNING, " zpool_vdev_replace: %s with %s (%s)",591fullpath, path, (ret == 0) ? "no errors" :592libzfs_error_description(g_zfshdl));593594nvlist_free(nvroot);595}596597/*598* Utility functions to find a vdev matching given criteria.599*/600typedef struct dev_data {601const char *dd_compare;602const char *dd_prop;603zfs_process_func_t dd_func;604boolean_t dd_found;605boolean_t dd_islabeled;606uint64_t dd_pool_guid;607uint64_t dd_vdev_guid;608uint64_t dd_new_vdev_guid;609const char *dd_new_devid;610uint64_t dd_num_spares;611} dev_data_t;612613static void614zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)615{616dev_data_t *dp = data;617const char *path = NULL;618uint_t c, children;619nvlist_t **child;620uint64_t guid = 0;621uint64_t isspare = 0;622623/*624* First iterate over any children.625*/626if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,627&child, &children) == 0) {628for (c = 0; c < children; c++)629zfs_iter_vdev(zhp, child[c], data);630}631632/*633* Iterate over any spares and cache devices634*/635if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,636&child, &children) == 0) {637for (c = 0; c < children; c++)638zfs_iter_vdev(zhp, child[c], data);639}640if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,641&child, &children) == 0) {642for (c = 0; c < children; c++)643zfs_iter_vdev(zhp, child[c], data);644}645646/* once a vdev was matched and processed there is nothing left to do */647if (dp->dd_found && dp->dd_num_spares == 0)648return;649(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);650651/*652* Match by GUID if available otherwise fallback to devid or physical653*/654if (dp->dd_vdev_guid != 0) {655if (guid != dp->dd_vdev_guid)656return;657zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);658dp->dd_found = B_TRUE;659660} else if (dp->dd_compare != NULL) {661/*662* NOTE: On Linux there is an event for partition, so unlike663* illumos, substring matching is not required to accommodate664* the partition suffix. An exact match will be present in665* the dp->dd_compare value.666* If the attached disk already contains a vdev GUID, it means667* the disk is not clean. In such a scenario, the physical path668* would be a match that makes the disk faulted when trying to669* online it. So, we would only want to proceed if either GUID670* matches with the last attached disk or the disk is in clean671* state.672*/673if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||674strcmp(dp->dd_compare, path) != 0) {675return;676}677if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {678zed_log_msg(LOG_INFO, " %s: no match (GUID:%llu"679" != vdev GUID:%llu)", __func__,680dp->dd_new_vdev_guid, guid);681return;682}683684zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",685dp->dd_prop, path);686dp->dd_found = B_TRUE;687688/* pass the new devid for use by auto-replacing code */689if (dp->dd_new_devid != NULL) {690(void) nvlist_add_string(nvl, "new_devid",691dp->dd_new_devid);692}693}694695if (dp->dd_found == B_TRUE && nvlist_lookup_uint64(nvl,696ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)697dp->dd_num_spares++;698699(dp->dd_func)(zhp, nvl, dp->dd_islabeled);700}701702static void703zfs_enable_ds(void *arg)704{705unavailpool_t *pool = (unavailpool_t *)arg;706707(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0, 512);708zpool_close(pool->uap_zhp);709free(pool);710}711712static int713zfs_iter_pool(zpool_handle_t *zhp, void *data)714{715nvlist_t *config, *nvl;716dev_data_t *dp = data;717uint64_t pool_guid;718unavailpool_t *pool;719720zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",721zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);722723/*724* For each vdev in this pool, look for a match to apply dd_func725*/726if ((config = zpool_get_config(zhp, NULL)) != NULL) {727if (dp->dd_pool_guid == 0 ||728(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,729&pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {730(void) nvlist_lookup_nvlist(config,731ZPOOL_CONFIG_VDEV_TREE, &nvl);732zfs_iter_vdev(zhp, nvl, data);733}734} else {735zed_log_msg(LOG_INFO, "%s: no config\n", __func__);736}737738/*739* if this pool was originally unavailable,740* then enable its datasets asynchronously741*/742if (g_enumeration_done) {743for (pool = list_head(&g_pool_list); pool != NULL;744pool = list_next(&g_pool_list, pool)) {745746if (strcmp(zpool_get_name(zhp),747zpool_get_name(pool->uap_zhp)))748continue;749if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {750list_remove(&g_pool_list, pool);751(void) tpool_dispatch(g_tpool, zfs_enable_ds,752pool);753break;754}755}756}757758zpool_close(zhp);759760/* cease iteration after a match */761return (dp->dd_found && dp->dd_num_spares == 0);762}763764/*765* Given a physical device location, iterate over all766* (pool, vdev) pairs which correspond to that location.767*/768static boolean_t769devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,770boolean_t is_slice, uint64_t new_vdev_guid)771{772dev_data_t data = { 0 };773774data.dd_compare = physical;775data.dd_func = func;776data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;777data.dd_found = B_FALSE;778data.dd_islabeled = is_slice;779data.dd_new_devid = devid; /* used by auto replace code */780data.dd_new_vdev_guid = new_vdev_guid;781782(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);783784return (data.dd_found);785}786787/*788* Given a device identifier, find any vdevs with a matching by-vdev789* path. Normally we shouldn't need this as the comparison would be790* made earlier in the devphys_iter(). For example, if we were replacing791* /dev/disk/by-vdev/L28, normally devphys_iter() would match the792* ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"793* of the new disk config. However, we've seen cases where794* ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk. Here's795* an example of a real 2-disk mirror pool where one disk was force796* faulted:797*798* com.delphix:vdev_zap_top: 129799* children[0]:800* type: 'disk'801* id: 0802* guid: 14309659774640089719803* path: '/dev/disk/by-vdev/L28'804* whole_disk: 0805* DTL: 654806* create_txg: 4807* com.delphix:vdev_zap_leaf: 1161808* faulted: 1809* aux_state: 'external'810* children[1]:811* type: 'disk'812* id: 1813* guid: 16002508084177980912814* path: '/dev/disk/by-vdev/L29'815* devid: 'dm-uuid-mpath-35000c500a61d68a3'816* phys_path: 'L29'817* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'818* whole_disk: 0819* DTL: 1028820* create_txg: 4821* com.delphix:vdev_zap_leaf: 131822*823* So in the case above, the only thing we could compare is the path.824*825* We can do this because we assume by-vdev paths are authoritative as physical826* paths. We could not assume this for normal paths like /dev/sda since the827* physical location /dev/sda points to could change over time.828*/829static boolean_t830by_vdev_path_iter(const char *by_vdev_path, const char *devid,831zfs_process_func_t func, boolean_t is_slice)832{833dev_data_t data = { 0 };834835data.dd_compare = by_vdev_path;836data.dd_func = func;837data.dd_prop = ZPOOL_CONFIG_PATH;838data.dd_found = B_FALSE;839data.dd_islabeled = is_slice;840data.dd_new_devid = devid;841842if (strncmp(by_vdev_path, DEV_BYVDEV_PATH,843strlen(DEV_BYVDEV_PATH)) != 0) {844/* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */845return (B_FALSE);846}847848(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);849850return (data.dd_found);851}852853/*854* Given a device identifier, find any vdevs with a matching devid.855* On Linux we can match devid directly which is always a whole disk.856*/857static boolean_t858devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)859{860dev_data_t data = { 0 };861862data.dd_compare = devid;863data.dd_func = func;864data.dd_prop = ZPOOL_CONFIG_DEVID;865data.dd_found = B_FALSE;866data.dd_islabeled = is_slice;867data.dd_new_devid = devid;868869(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);870871return (data.dd_found);872}873874/*875* Given a device guid, find any vdevs with a matching guid.876*/877static boolean_t878guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,879zfs_process_func_t func, boolean_t is_slice)880{881dev_data_t data = { 0 };882883data.dd_func = func;884data.dd_found = B_FALSE;885data.dd_pool_guid = pool_guid;886data.dd_vdev_guid = vdev_guid;887data.dd_islabeled = is_slice;888data.dd_new_devid = devid;889890(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);891892return (data.dd_found);893}894895/*896* Handle a EC_DEV_ADD.ESC_DISK event.897*898* illumos899* Expects: DEV_PHYS_PATH string in schema900* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID901*902* path: '/dev/dsk/c0t1d0s0' (persistent)903* devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'904* phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'905*906* linux907* provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema908* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID909*910* path: '/dev/sdc1' (not persistent)911* devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'912* phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'913*/914static int915zfs_deliver_add(nvlist_t *nvl)916{917const char *devpath = NULL, *devid = NULL;918uint64_t pool_guid = 0, vdev_guid = 0;919boolean_t is_slice;920921/*922* Expecting a devid string and an optional physical location and guid923*/924if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0) {925zed_log_msg(LOG_INFO, "%s: no dev identifier\n", __func__);926return (-1);927}928929(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);930(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);931(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);932933is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);934935zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",936devid, devpath ? devpath : "NULL", is_slice);937938/*939* Iterate over all vdevs looking for a match in the following order:940* 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)941* 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).942* 3. ZPOOL_CONFIG_GUID (identifies unique vdev).943* 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since944* by-vdev paths represent physical paths).945*/946if (devid_iter(devid, zfs_process_add, is_slice))947return (0);948if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,949is_slice, vdev_guid))950return (0);951if (vdev_guid != 0)952(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,953is_slice);954955if (devpath != NULL) {956/* Can we match a /dev/disk/by-vdev/ path? */957char by_vdev_path[MAXPATHLEN];958snprintf(by_vdev_path, sizeof (by_vdev_path),959"/dev/disk/by-vdev/%s", devpath);960if (by_vdev_path_iter(by_vdev_path, devid, zfs_process_add,961is_slice))962return (0);963}964965return (0);966}967968/*969* Called when we receive a VDEV_CHECK event, which indicates a device could not970* be opened during initial pool open, but the autoreplace property was set on971* the pool. In this case, we treat it as if it were an add event.972*/973static int974zfs_deliver_check(nvlist_t *nvl)975{976dev_data_t data = { 0 };977978if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,979&data.dd_pool_guid) != 0 ||980nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,981&data.dd_vdev_guid) != 0 ||982data.dd_vdev_guid == 0)983return (0);984985zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",986data.dd_pool_guid, data.dd_vdev_guid);987988data.dd_func = zfs_process_add;989990(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);991992return (0);993}994995/*996* Given a path to a vdev, lookup the vdev's physical size from its997* config nvlist.998*999* Returns the vdev's physical size in bytes on success, 0 on error.1000*/1001static uint64_t1002vdev_size_from_config(zpool_handle_t *zhp, const char *vdev_path)1003{1004nvlist_t *nvl = NULL;1005boolean_t avail_spare, l2cache, log;1006vdev_stat_t *vs = NULL;1007uint_t c;10081009nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);1010if (!nvl)1011return (0);10121013verify(nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,1014(uint64_t **)&vs, &c) == 0);1015if (!vs) {1016zed_log_msg(LOG_INFO, "%s: no nvlist for '%s'", __func__,1017vdev_path);1018return (0);1019}10201021return (vs->vs_pspace);1022}10231024/*1025* Given a path to a vdev, lookup if the vdev is a "whole disk" in the1026* config nvlist. "whole disk" means that ZFS was passed a whole disk1027* at pool creation time, which it partitioned up and has full control over.1028* Thus a partition with wholedisk=1 set tells us that zfs created the1029* partition at creation time. A partition without whole disk set would have1030* been created by externally (like with fdisk) and passed to ZFS.1031*1032* Returns the whole disk value (either 0 or 1).1033*/1034static uint64_t1035vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)1036{1037nvlist_t *nvl = NULL;1038boolean_t avail_spare, l2cache, log;1039uint64_t wholedisk = 0;10401041nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);1042if (!nvl)1043return (0);10441045(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);10461047return (wholedisk);1048}10491050/*1051* If the device size grew more than 1% then return true.1052*/1053#define DEVICE_GREW(oldsize, newsize) \1054((newsize > oldsize) && \1055((newsize / (newsize - oldsize)) <= 100))10561057static int1058zfsdle_vdev_online(zpool_handle_t *zhp, void *data)1059{1060boolean_t avail_spare, l2cache;1061nvlist_t *udev_nvl = data;1062nvlist_t *tgt;1063int error;10641065const char *tmp_devname;1066char devname[MAXPATHLEN] = "";1067uint64_t guid;10681069if (nvlist_lookup_uint64(udev_nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {1070sprintf(devname, "%llu", (u_longlong_t)guid);1071} else if (nvlist_lookup_string(udev_nvl, DEV_PHYS_PATH,1072&tmp_devname) == 0) {1073strlcpy(devname, tmp_devname, MAXPATHLEN);1074zfs_append_partition(devname, MAXPATHLEN);1075} else {1076zed_log_msg(LOG_INFO, "%s: no guid or physpath", __func__);1077}10781079zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",1080devname, zpool_get_name(zhp));10811082if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,1083&avail_spare, &l2cache, NULL)) != NULL) {1084const char *path;1085char fullpath[MAXPATHLEN];1086uint64_t wholedisk = 0;10871088error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);1089if (error) {1090zpool_close(zhp);1091return (0);1092}10931094(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,1095&wholedisk);10961097if (wholedisk) {1098char *tmp;1099path = strrchr(path, '/');1100if (path != NULL) {1101tmp = zfs_strip_partition(path + 1);1102if (tmp == NULL) {1103zpool_close(zhp);1104return (0);1105}1106} else {1107zpool_close(zhp);1108return (0);1109}11101111(void) strlcpy(fullpath, tmp, sizeof (fullpath));1112free(tmp);11131114/*1115* We need to reopen the pool associated with this1116* device so that the kernel can update the size of1117* the expanded device. When expanding there is no1118* need to restart the scrub from the beginning.1119*/1120boolean_t scrub_restart = B_FALSE;1121(void) zpool_reopen_one(zhp, &scrub_restart);1122} else {1123(void) strlcpy(fullpath, path, sizeof (fullpath));1124}11251126if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {1127vdev_state_t newstate;11281129if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {1130/*1131* If this disk size has not changed, then1132* there's no need to do an autoexpand. To1133* check we look at the disk's size in its1134* config, and compare it to the disk size1135* that udev is reporting.1136*/1137uint64_t udev_size = 0, conf_size = 0,1138wholedisk = 0, udev_parent_size = 0;11391140/*1141* Get the size of our disk that udev is1142* reporting.1143*/1144if (nvlist_lookup_uint64(udev_nvl, DEV_SIZE,1145&udev_size) != 0) {1146udev_size = 0;1147}11481149/*1150* Get the size of our disk's parent device1151* from udev (where sda1's parent is sda).1152*/1153if (nvlist_lookup_uint64(udev_nvl,1154DEV_PARENT_SIZE, &udev_parent_size) != 0) {1155udev_parent_size = 0;1156}11571158conf_size = vdev_size_from_config(zhp,1159fullpath);11601161wholedisk = vdev_whole_disk_from_config(zhp,1162fullpath);11631164/*1165* Only attempt an autoexpand if the vdev size1166* changed. There are two different cases1167* to consider.1168*1169* 1. wholedisk=11170* If you do a 'zpool create' on a whole disk1171* (like /dev/sda), then zfs will create1172* partitions on the disk (like /dev/sda1). In1173* that case, wholedisk=1 will be set in the1174* partition's nvlist config. So zed will need1175* to see if your parent device (/dev/sda)1176* expanded in size, and if so, then attempt1177* the autoexpand.1178*1179* 2. wholedisk=01180* If you do a 'zpool create' on an existing1181* partition, or a device that doesn't allow1182* partitions, then wholedisk=0, and you will1183* simply need to check if the device itself1184* expanded in size.1185*/1186if (DEVICE_GREW(conf_size, udev_size) ||1187(wholedisk && DEVICE_GREW(conf_size,1188udev_parent_size))) {1189error = zpool_vdev_online(zhp, fullpath,11900, &newstate);11911192zed_log_msg(LOG_INFO,1193"%s: autoexpanding '%s' from %llu"1194" to %llu bytes in pool '%s': %d",1195__func__, fullpath, conf_size,1196MAX(udev_size, udev_parent_size),1197zpool_get_name(zhp), error);1198}1199}1200}1201zpool_close(zhp);1202return (1);1203}1204zpool_close(zhp);1205return (0);1206}12071208/*1209* This function handles the ESC_DEV_DLE device change event. Use the1210* provided vdev guid when looking up a disk or partition, when the guid1211* is not present assume the entire disk is owned by ZFS and append the1212* expected -part1 partition information then lookup by physical path.1213*/1214static int1215zfs_deliver_dle(nvlist_t *nvl)1216{1217const char *devname;1218char name[MAXPATHLEN];1219uint64_t guid;12201221if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {1222sprintf(name, "%llu", (u_longlong_t)guid);1223} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {1224strlcpy(name, devname, MAXPATHLEN);1225zfs_append_partition(name, MAXPATHLEN);1226} else {1227sprintf(name, "unknown");1228zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");1229}12301231if (zpool_iter(g_zfshdl, zfsdle_vdev_online, nvl) != 1) {1232zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "1233"found", name);1234return (1);1235}12361237return (0);1238}12391240/*1241* syseventd daemon module event handler1242*1243* Handles syseventd daemon zfs device related events:1244*1245* EC_DEV_ADD.ESC_DISK1246* EC_DEV_STATUS.ESC_DEV_DLE1247* EC_ZFS.ESC_ZFS_VDEV_CHECK1248*1249* Note: assumes only one thread active at a time (not thread safe)1250*/1251static int1252zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)1253{1254int ret;1255boolean_t is_check = B_FALSE, is_dle = B_FALSE;12561257if (strcmp(class, EC_DEV_ADD) == 0) {1258/*1259* We're mainly interested in disk additions, but we also listen1260* for new loop devices, to allow for simplified testing.1261*/1262if (strcmp(subclass, ESC_DISK) != 0 &&1263strcmp(subclass, ESC_LOFI) != 0)1264return (0);12651266is_check = B_FALSE;1267} else if (strcmp(class, EC_ZFS) == 0 &&1268strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {1269/*1270* This event signifies that a device failed to open1271* during pool load, but the 'autoreplace' property was1272* set, so we should pretend it's just been added.1273*/1274is_check = B_TRUE;1275} else if (strcmp(class, EC_DEV_STATUS) == 0 &&1276strcmp(subclass, ESC_DEV_DLE) == 0) {1277is_dle = B_TRUE;1278} else {1279return (0);1280}12811282if (is_dle)1283ret = zfs_deliver_dle(nvl);1284else if (is_check)1285ret = zfs_deliver_check(nvl);1286else1287ret = zfs_deliver_add(nvl);12881289return (ret);1290}12911292static void *1293zfs_enum_pools(void *arg)1294{1295(void) arg;12961297(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);1298/*1299* Linux - instead of using a thread pool, each list entry1300* will spawn a thread when an unavailable pool transitions1301* to available. zfs_slm_fini will wait for these threads.1302*/1303g_enumeration_done = B_TRUE;1304return (NULL);1305}13061307/*1308* called from zed daemon at startup1309*1310* sent messages from zevents or udev monitor1311*1312* For now, each agent has its own libzfs instance1313*/1314int1315zfs_slm_init(void)1316{1317if ((g_zfshdl = libzfs_init()) == NULL)1318return (-1);13191320/*1321* collect a list of unavailable pools (asynchronously,1322* since this can take a while)1323*/1324list_create(&g_pool_list, sizeof (struct unavailpool),1325offsetof(struct unavailpool, uap_node));13261327if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {1328list_destroy(&g_pool_list);1329libzfs_fini(g_zfshdl);1330return (-1);1331}13321333pthread_setname_np(g_zfs_tid, "enum-pools");1334list_create(&g_device_list, sizeof (struct pendingdev),1335offsetof(struct pendingdev, pd_node));13361337return (0);1338}13391340void1341zfs_slm_fini(void)1342{1343unavailpool_t *pool;1344pendingdev_t *device;13451346/* wait for zfs_enum_pools thread to complete */1347(void) pthread_join(g_zfs_tid, NULL);1348/* destroy the thread pool */1349if (g_tpool != NULL) {1350tpool_wait(g_tpool);1351tpool_destroy(g_tpool);1352}13531354while ((pool = list_remove_head(&g_pool_list)) != NULL) {1355zpool_close(pool->uap_zhp);1356free(pool);1357}1358list_destroy(&g_pool_list);13591360while ((device = list_remove_head(&g_device_list)) != NULL)1361free(device);1362list_destroy(&g_device_list);13631364libzfs_fini(g_zfshdl);1365}13661367void1368zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)1369{1370zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);1371(void) zfs_slm_deliver_event(class, subclass, nvl);1372}137313741375