Path: blob/main/sys/contrib/openzfs/cmd/zed/agents/zfs_diagnosis.c
48529 views
// SPDX-License-Identifier: CDDL-1.01/*2* CDDL HEADER START3*4* The contents of this file are subject to the terms of the5* Common Development and Distribution License (the "License").6* You may not use this file except in compliance with the License.7*8* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE9* or https://opensource.org/licenses/CDDL-1.0.10* See the License for the specific language governing permissions11* and limitations under the License.12*13* When distributing Covered Code, include this CDDL HEADER in each14* file and include the License file at usr/src/OPENSOLARIS.LICENSE.15* If applicable, add the following below this CDDL HEADER, with the16* fields enclosed by brackets "[]" replaced with your own identifying17* information: Portions Copyright [yyyy] [name of copyright owner]18*19* CDDL HEADER END20*/2122/*23* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.24* Copyright 2015 Nexenta Systems, Inc. All rights reserved.25* Copyright (c) 2016, Intel Corporation.26* Copyright (c) 2023, Klara Inc.27*/2829#include <stddef.h>30#include <string.h>31#include <libuutil.h>32#include <libzfs.h>33#include <sys/types.h>34#include <sys/time.h>35#include <sys/fs/zfs.h>36#include <sys/fm/protocol.h>37#include <sys/fm/fs/zfs.h>38#include <sys/zio.h>3940#include "zfs_agents.h"41#include "fmd_api.h"4243/*44* Default values for the serd engine when processing checksum or io errors. The45* semantics are N <events> in T <seconds>.46*/47#define DEFAULT_CHECKSUM_N 10 /* events */48#define DEFAULT_CHECKSUM_T 600 /* seconds */49#define DEFAULT_IO_N 10 /* events */50#define DEFAULT_IO_T 600 /* seconds */51#define DEFAULT_SLOW_IO_N 10 /* events */52#define DEFAULT_SLOW_IO_T 30 /* seconds */5354#define CASE_GC_TIMEOUT_SECS 43200 /* 12 hours */5556/*57* Our serd engines are named in the following format:58* 'zfs_<pool_guid>_<vdev_guid>_{checksum,io,slow_io}'59* This #define reserves enough space for two 64-bit hex values plus the60* length of the longest string.61*/62#define MAX_SERDLEN (16 * 2 + sizeof ("zfs___checksum"))6364/*65* On-disk case structure. This must maintain backwards compatibility with66* previous versions of the DE. By default, any members appended to the end67* will be filled with zeros if they don't exist in a previous version.68*/69typedef struct zfs_case_data {70uint64_t zc_version;71uint64_t zc_ena;72uint64_t zc_pool_guid;73uint64_t zc_vdev_guid;74uint64_t zc_parent_guid;75int zc_pool_state;76char zc_serd_checksum[MAX_SERDLEN];77char zc_serd_io[MAX_SERDLEN];78char zc_serd_slow_io[MAX_SERDLEN];79int zc_has_remove_timer;80} zfs_case_data_t;8182/*83* Time-of-day84*/85typedef struct er_timeval {86uint64_t ertv_sec;87uint64_t ertv_nsec;88} er_timeval_t;8990/*91* In-core case structure.92*/93typedef struct zfs_case {94boolean_t zc_present;95uint32_t zc_version;96zfs_case_data_t zc_data;97fmd_case_t *zc_case;98uu_list_node_t zc_node;99id_t zc_remove_timer;100char *zc_fru;101er_timeval_t zc_when;102} zfs_case_t;103104#define CASE_DATA "data"105#define CASE_FRU "fru"106#define CASE_DATA_VERSION_INITIAL 1107#define CASE_DATA_VERSION_SERD 2108109typedef struct zfs_de_stats {110fmd_stat_t old_drops;111fmd_stat_t dev_drops;112fmd_stat_t vdev_drops;113fmd_stat_t import_drops;114fmd_stat_t resource_drops;115} zfs_de_stats_t;116117zfs_de_stats_t zfs_stats = {118{ "old_drops", FMD_TYPE_UINT64, "ereports dropped (from before load)" },119{ "dev_drops", FMD_TYPE_UINT64, "ereports dropped (dev during open)"},120{ "vdev_drops", FMD_TYPE_UINT64, "ereports dropped (weird vdev types)"},121{ "import_drops", FMD_TYPE_UINT64, "ereports dropped (during import)" },122{ "resource_drops", FMD_TYPE_UINT64, "resource related ereports" }123};124125/* wait 15 seconds after a removal */126static hrtime_t zfs_remove_timeout = SEC2NSEC(15);127128uu_list_pool_t *zfs_case_pool;129uu_list_t *zfs_cases;130131#define ZFS_MAKE_RSRC(type) \132FM_RSRC_CLASS "." ZFS_ERROR_CLASS "." type133#define ZFS_MAKE_EREPORT(type) \134FM_EREPORT_CLASS "." ZFS_ERROR_CLASS "." type135136static void zfs_purge_cases(fmd_hdl_t *hdl);137138/*139* Write out the persistent representation of an active case.140*/141static void142zfs_case_serialize(zfs_case_t *zcp)143{144zcp->zc_data.zc_version = CASE_DATA_VERSION_SERD;145}146147/*148* Read back the persistent representation of an active case.149*/150static zfs_case_t *151zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)152{153zfs_case_t *zcp;154155zcp = fmd_hdl_zalloc(hdl, sizeof (zfs_case_t), FMD_SLEEP);156zcp->zc_case = cp;157158fmd_buf_read(hdl, cp, CASE_DATA, &zcp->zc_data,159sizeof (zcp->zc_data));160161if (zcp->zc_data.zc_version > CASE_DATA_VERSION_SERD) {162fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));163return (NULL);164}165166/*167* fmd_buf_read() will have already zeroed out the remainder of the168* buffer, so we don't have to do anything special if the version169* doesn't include the SERD engine name.170*/171172if (zcp->zc_data.zc_has_remove_timer)173zcp->zc_remove_timer = fmd_timer_install(hdl, zcp,174NULL, zfs_remove_timeout);175176uu_list_node_init(zcp, &zcp->zc_node, zfs_case_pool);177(void) uu_list_insert_before(zfs_cases, NULL, zcp);178179fmd_case_setspecific(hdl, cp, zcp);180181return (zcp);182}183184/*185* Return count of other unique SERD cases under same vdev parent186*/187static uint_t188zfs_other_serd_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)189{190zfs_case_t *zcp;191uint_t cases = 0;192static hrtime_t next_check = 0;193194/*195* Note that plumbing in some external GC would require adding locking,196* since most of this module code is not thread safe and assumes there197* is only one thread running against the module. So we perform GC here198* inline periodically so that future delay induced faults will be199* possible once the issue causing multiple vdev delays is resolved.200*/201if (gethrestime_sec() > next_check) {202/* Periodically purge old SERD entries and stale cases */203fmd_serd_gc(hdl);204zfs_purge_cases(hdl);205next_check = gethrestime_sec() + CASE_GC_TIMEOUT_SECS;206}207208for (zcp = uu_list_first(zfs_cases); zcp != NULL;209zcp = uu_list_next(zfs_cases, zcp)) {210zfs_case_data_t *zcd = &zcp->zc_data;211212/*213* must be same pool and parent vdev but different leaf vdev214*/215if (zcd->zc_pool_guid != zfs_case->zc_pool_guid ||216zcd->zc_parent_guid != zfs_case->zc_parent_guid ||217zcd->zc_vdev_guid == zfs_case->zc_vdev_guid) {218continue;219}220221/*222* Check if there is another active serd case besides zfs_case223*224* Only one serd engine will be assigned to the case225*/226if (zcd->zc_serd_checksum[0] == zfs_case->zc_serd_checksum[0] &&227fmd_serd_active(hdl, zcd->zc_serd_checksum)) {228cases++;229}230if (zcd->zc_serd_io[0] == zfs_case->zc_serd_io[0] &&231fmd_serd_active(hdl, zcd->zc_serd_io)) {232cases++;233}234if (zcd->zc_serd_slow_io[0] == zfs_case->zc_serd_slow_io[0] &&235fmd_serd_active(hdl, zcd->zc_serd_slow_io)) {236cases++;237}238}239return (cases);240}241242/*243* Iterate over any active cases. If any cases are associated with a pool or244* vdev which is no longer present on the system, close the associated case.245*/246static void247zfs_mark_vdev(uint64_t pool_guid, nvlist_t *vd, er_timeval_t *loaded)248{249uint64_t vdev_guid = 0;250uint_t c, children;251nvlist_t **child;252zfs_case_t *zcp;253254(void) nvlist_lookup_uint64(vd, ZPOOL_CONFIG_GUID, &vdev_guid);255256/*257* Mark any cases associated with this (pool, vdev) pair.258*/259for (zcp = uu_list_first(zfs_cases); zcp != NULL;260zcp = uu_list_next(zfs_cases, zcp)) {261if (zcp->zc_data.zc_pool_guid == pool_guid &&262zcp->zc_data.zc_vdev_guid == vdev_guid) {263zcp->zc_present = B_TRUE;264zcp->zc_when = *loaded;265}266}267268/*269* Iterate over all children.270*/271if (nvlist_lookup_nvlist_array(vd, ZPOOL_CONFIG_CHILDREN, &child,272&children) == 0) {273for (c = 0; c < children; c++)274zfs_mark_vdev(pool_guid, child[c], loaded);275}276277if (nvlist_lookup_nvlist_array(vd, ZPOOL_CONFIG_L2CACHE, &child,278&children) == 0) {279for (c = 0; c < children; c++)280zfs_mark_vdev(pool_guid, child[c], loaded);281}282283if (nvlist_lookup_nvlist_array(vd, ZPOOL_CONFIG_SPARES, &child,284&children) == 0) {285for (c = 0; c < children; c++)286zfs_mark_vdev(pool_guid, child[c], loaded);287}288}289290static int291zfs_mark_pool(zpool_handle_t *zhp, void *unused)292{293(void) unused;294zfs_case_t *zcp;295uint64_t pool_guid;296uint64_t *tod;297er_timeval_t loaded = { 0 };298nvlist_t *config, *vd;299uint_t nelem = 0;300int ret;301302pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);303/*304* Mark any cases associated with just this pool.305*/306for (zcp = uu_list_first(zfs_cases); zcp != NULL;307zcp = uu_list_next(zfs_cases, zcp)) {308if (zcp->zc_data.zc_pool_guid == pool_guid &&309zcp->zc_data.zc_vdev_guid == 0)310zcp->zc_present = B_TRUE;311}312313if ((config = zpool_get_config(zhp, NULL)) == NULL) {314zpool_close(zhp);315return (-1);316}317318(void) nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME,319&tod, &nelem);320if (nelem == 2) {321loaded.ertv_sec = tod[0];322loaded.ertv_nsec = tod[1];323for (zcp = uu_list_first(zfs_cases); zcp != NULL;324zcp = uu_list_next(zfs_cases, zcp)) {325if (zcp->zc_data.zc_pool_guid == pool_guid &&326zcp->zc_data.zc_vdev_guid == 0) {327zcp->zc_when = loaded;328}329}330}331332ret = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vd);333if (ret) {334zpool_close(zhp);335return (-1);336}337338zfs_mark_vdev(pool_guid, vd, &loaded);339340zpool_close(zhp);341342return (0);343}344345struct load_time_arg {346uint64_t lt_guid;347er_timeval_t *lt_time;348boolean_t lt_found;349};350351static int352zpool_find_load_time(zpool_handle_t *zhp, void *arg)353{354struct load_time_arg *lta = arg;355uint64_t pool_guid;356uint64_t *tod;357nvlist_t *config;358uint_t nelem;359360if (lta->lt_found) {361zpool_close(zhp);362return (0);363}364365pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);366if (pool_guid != lta->lt_guid) {367zpool_close(zhp);368return (0);369}370371if ((config = zpool_get_config(zhp, NULL)) == NULL) {372zpool_close(zhp);373return (-1);374}375376if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME,377&tod, &nelem) == 0 && nelem == 2) {378lta->lt_found = B_TRUE;379lta->lt_time->ertv_sec = tod[0];380lta->lt_time->ertv_nsec = tod[1];381}382383zpool_close(zhp);384385return (0);386}387388static void389zfs_purge_cases(fmd_hdl_t *hdl)390{391zfs_case_t *zcp;392uu_list_walk_t *walk;393libzfs_handle_t *zhdl = fmd_hdl_getspecific(hdl);394395/*396* There is no way to open a pool by GUID, or lookup a vdev by GUID. No397* matter what we do, we're going to have to stomach an O(vdevs * cases)398* algorithm. In reality, both quantities are likely so small that399* neither will matter. Given that iterating over pools is more400* expensive than iterating over the in-memory case list, we opt for a401* 'present' flag in each case that starts off cleared. We then iterate402* over all pools, marking those that are still present, and removing403* those that aren't found.404*405* Note that we could also construct an FMRI and rely on406* fmd_nvl_fmri_present(), but this would end up doing the same search.407*/408409/*410* Mark the cases as not present.411*/412for (zcp = uu_list_first(zfs_cases); zcp != NULL;413zcp = uu_list_next(zfs_cases, zcp))414zcp->zc_present = B_FALSE;415416/*417* Iterate over all pools and mark the pools and vdevs found. If this418* fails (most probably because we're out of memory), then don't close419* any of the cases and we cannot be sure they are accurate.420*/421if (zpool_iter(zhdl, zfs_mark_pool, NULL) != 0)422return;423424/*425* Remove those cases which were not found.426*/427walk = uu_list_walk_start(zfs_cases, UU_WALK_ROBUST);428while ((zcp = uu_list_walk_next(walk)) != NULL) {429if (!zcp->zc_present)430fmd_case_close(hdl, zcp->zc_case);431}432uu_list_walk_end(walk);433}434435/*436* Construct the name of a serd engine given the pool/vdev GUID and type (io or437* checksum).438*/439static void440zfs_serd_name(char *buf, uint64_t pool_guid, uint64_t vdev_guid,441const char *type)442{443(void) snprintf(buf, MAX_SERDLEN, "zfs_%llx_%llx_%s",444(long long unsigned int)pool_guid,445(long long unsigned int)vdev_guid, type);446}447448static void449zfs_case_retire(fmd_hdl_t *hdl, zfs_case_t *zcp)450{451fmd_hdl_debug(hdl, "retiring case");452453fmd_case_close(hdl, zcp->zc_case);454}455456/*457* Solve a given ZFS case. This first checks to make sure the diagnosis is458* still valid, as well as cleaning up any pending timer associated with the459* case.460*/461static void462zfs_case_solve(fmd_hdl_t *hdl, zfs_case_t *zcp, const char *faultname)463{464nvlist_t *detector, *fault;465boolean_t serialize;466nvlist_t *fru = NULL;467fmd_hdl_debug(hdl, "solving fault '%s'", faultname);468469/*470* Construct the detector from the case data. The detector is in the471* ZFS scheme, and is either the pool or the vdev, depending on whether472* this is a vdev or pool fault.473*/474detector = fmd_nvl_alloc(hdl, FMD_SLEEP);475476(void) nvlist_add_uint8(detector, FM_VERSION, ZFS_SCHEME_VERSION0);477(void) nvlist_add_string(detector, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS);478(void) nvlist_add_uint64(detector, FM_FMRI_ZFS_POOL,479zcp->zc_data.zc_pool_guid);480if (zcp->zc_data.zc_vdev_guid != 0) {481(void) nvlist_add_uint64(detector, FM_FMRI_ZFS_VDEV,482zcp->zc_data.zc_vdev_guid);483}484485fault = fmd_nvl_create_fault(hdl, faultname, 100, detector,486fru, detector);487fmd_case_add_suspect(hdl, zcp->zc_case, fault);488489nvlist_free(fru);490491fmd_case_solve(hdl, zcp->zc_case);492493serialize = B_FALSE;494if (zcp->zc_data.zc_has_remove_timer) {495fmd_timer_remove(hdl, zcp->zc_remove_timer);496zcp->zc_data.zc_has_remove_timer = 0;497serialize = B_TRUE;498}499if (serialize)500zfs_case_serialize(zcp);501502nvlist_free(detector);503}504505static boolean_t506timeval_earlier(er_timeval_t *a, er_timeval_t *b)507{508return (a->ertv_sec < b->ertv_sec ||509(a->ertv_sec == b->ertv_sec && a->ertv_nsec < b->ertv_nsec));510}511512static void513zfs_ereport_when(fmd_hdl_t *hdl, nvlist_t *nvl, er_timeval_t *when)514{515(void) hdl;516int64_t *tod;517uint_t nelem;518519if (nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tod,520&nelem) == 0 && nelem == 2) {521when->ertv_sec = tod[0];522when->ertv_nsec = tod[1];523} else {524when->ertv_sec = when->ertv_nsec = UINT64_MAX;525}526}527528/*529* Record the specified event in the SERD engine and return a530* boolean value indicating whether or not the engine fired as531* the result of inserting this event.532*533* When the pool has similar active cases on other vdevs, then534* the fired state is disregarded and the case is retired.535*/536static int537zfs_fm_serd_record(fmd_hdl_t *hdl, const char *name, fmd_event_t *ep,538zfs_case_t *zcp, const char *err_type)539{540int fired = fmd_serd_record(hdl, name, ep);541int peers = 0;542543if (fired && (peers = zfs_other_serd_cases(hdl, &zcp->zc_data)) > 0) {544fmd_hdl_debug(hdl, "pool %llu is tracking %d other %s cases "545"-- skip faulting the vdev %llu",546(u_longlong_t)zcp->zc_data.zc_pool_guid,547peers, err_type,548(u_longlong_t)zcp->zc_data.zc_vdev_guid);549zfs_case_retire(hdl, zcp);550fired = 0;551}552553return (fired);554}555556/*557* Main fmd entry point.558*/559static void560zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)561{562zfs_case_t *zcp, *dcp;563int32_t pool_state;564uint64_t ena, pool_guid, vdev_guid, parent_guid;565uint64_t checksum_n, checksum_t;566uint64_t io_n, io_t;567er_timeval_t pool_load;568er_timeval_t er_when;569nvlist_t *detector;570boolean_t pool_found = B_FALSE;571boolean_t isresource;572const char *type;573574/*575* We subscribe to notifications for vdev or pool removal. In these576* cases, there may be cases that no longer apply. Purge any cases577* that no longer apply.578*/579if (fmd_nvl_class_match(hdl, nvl, "sysevent.fs.zfs.*")) {580fmd_hdl_debug(hdl, "purging orphaned cases from %s",581strrchr(class, '.') + 1);582zfs_purge_cases(hdl);583zfs_stats.resource_drops.fmds_value.ui64++;584return;585}586587isresource = fmd_nvl_class_match(hdl, nvl, "resource.fs.zfs.*");588589if (isresource) {590/*591* For resources, we don't have a normal payload.592*/593if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,594&vdev_guid) != 0)595pool_state = SPA_LOAD_OPEN;596else597pool_state = SPA_LOAD_NONE;598detector = NULL;599} else {600(void) nvlist_lookup_nvlist(nvl,601FM_EREPORT_DETECTOR, &detector);602(void) nvlist_lookup_int32(nvl,603FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, &pool_state);604}605606/*607* We also ignore all ereports generated during an import of a pool,608* since the only possible fault (.pool) would result in import failure,609* and hence no persistent fault. Some day we may want to do something610* with these ereports, so we continue generating them internally.611*/612if (pool_state == SPA_LOAD_IMPORT) {613zfs_stats.import_drops.fmds_value.ui64++;614fmd_hdl_debug(hdl, "ignoring '%s' during import", class);615return;616}617618/*619* Device I/O errors are ignored during pool open.620*/621if (pool_state == SPA_LOAD_OPEN &&622(fmd_nvl_class_match(hdl, nvl,623ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM)) ||624fmd_nvl_class_match(hdl, nvl,625ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO)) ||626fmd_nvl_class_match(hdl, nvl,627ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_PROBE_FAILURE)))) {628fmd_hdl_debug(hdl, "ignoring '%s' during pool open", class);629zfs_stats.dev_drops.fmds_value.ui64++;630return;631}632633/*634* We ignore ereports for anything except disks and files.635*/636if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,637&type) == 0) {638if (strcmp(type, VDEV_TYPE_DISK) != 0 &&639strcmp(type, VDEV_TYPE_FILE) != 0) {640zfs_stats.vdev_drops.fmds_value.ui64++;641return;642}643}644645/*646* Determine if this ereport corresponds to an open case.647* Each vdev or pool can have a single case.648*/649(void) nvlist_lookup_uint64(nvl,650FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, &pool_guid);651if (nvlist_lookup_uint64(nvl,652FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)653vdev_guid = 0;654if (nvlist_lookup_uint64(nvl,655FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID, &parent_guid) != 0)656parent_guid = 0;657if (nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena) != 0)658ena = 0;659660zfs_ereport_when(hdl, nvl, &er_when);661662for (zcp = uu_list_first(zfs_cases); zcp != NULL;663zcp = uu_list_next(zfs_cases, zcp)) {664if (zcp->zc_data.zc_pool_guid == pool_guid) {665pool_found = B_TRUE;666pool_load = zcp->zc_when;667}668if (zcp->zc_data.zc_vdev_guid == vdev_guid)669break;670}671672/*673* Avoid falsely accusing a pool of being faulty. Do so by674* not replaying ereports that were generated prior to the675* current import. If the failure that generated them was676* transient because the device was actually removed but we677* didn't receive the normal asynchronous notification, we678* don't want to mark it as faulted and potentially panic. If679* there is still a problem we'd expect not to be able to680* import the pool, or that new ereports will be generated681* once the pool is used.682*/683if (pool_found && timeval_earlier(&er_when, &pool_load)) {684fmd_hdl_debug(hdl, "ignoring pool %llx, "685"ereport time %lld.%lld, pool load time = %lld.%lld",686pool_guid, er_when.ertv_sec, er_when.ertv_nsec,687pool_load.ertv_sec, pool_load.ertv_nsec);688zfs_stats.old_drops.fmds_value.ui64++;689return;690}691692if (!pool_found) {693/*694* Haven't yet seen this pool, but same situation695* may apply.696*/697libzfs_handle_t *zhdl = fmd_hdl_getspecific(hdl);698struct load_time_arg la;699700la.lt_guid = pool_guid;701la.lt_time = &pool_load;702la.lt_found = B_FALSE;703704if (zhdl != NULL &&705zpool_iter(zhdl, zpool_find_load_time, &la) == 0 &&706la.lt_found == B_TRUE) {707pool_found = B_TRUE;708709if (timeval_earlier(&er_when, &pool_load)) {710fmd_hdl_debug(hdl, "ignoring pool %llx, "711"ereport time %lld.%lld, "712"pool load time = %lld.%lld",713pool_guid, er_when.ertv_sec,714er_when.ertv_nsec, pool_load.ertv_sec,715pool_load.ertv_nsec);716zfs_stats.old_drops.fmds_value.ui64++;717return;718}719}720}721722if (zcp == NULL) {723fmd_case_t *cs;724zfs_case_data_t data = { 0 };725726/*727* If this is one of our 'fake' resource ereports, and there is728* no case open, simply discard it.729*/730if (isresource) {731zfs_stats.resource_drops.fmds_value.ui64++;732fmd_hdl_debug(hdl, "discarding '%s for vdev %llu",733class, vdev_guid);734return;735}736737/*738* Skip tracking some ereports739*/740if (strcmp(class,741ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DATA)) == 0 ||742strcmp(class,743ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE)) == 0) {744zfs_stats.resource_drops.fmds_value.ui64++;745return;746}747748/*749* Open a new case.750*/751cs = fmd_case_open(hdl, NULL);752753fmd_hdl_debug(hdl, "opening case for vdev %llu due to '%s'",754vdev_guid, class);755756/*757* Initialize the case buffer. To commonize code, we actually758* create the buffer with existing data, and then call759* zfs_case_unserialize() to instantiate the in-core structure.760*/761fmd_buf_create(hdl, cs, CASE_DATA, sizeof (zfs_case_data_t));762763data.zc_version = CASE_DATA_VERSION_SERD;764data.zc_ena = ena;765data.zc_pool_guid = pool_guid;766data.zc_vdev_guid = vdev_guid;767data.zc_parent_guid = parent_guid;768data.zc_pool_state = (int)pool_state;769770fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));771772zcp = zfs_case_unserialize(hdl, cs);773assert(zcp != NULL);774if (pool_found)775zcp->zc_when = pool_load;776}777778if (isresource) {779fmd_hdl_debug(hdl, "resource event '%s'", class);780781if (fmd_nvl_class_match(hdl, nvl,782ZFS_MAKE_RSRC(FM_RESOURCE_AUTOREPLACE))) {783/*784* The 'resource.fs.zfs.autoreplace' event indicates785* that the pool was loaded with the 'autoreplace'786* property set. In this case, any pending device787* failures should be ignored, as the asynchronous788* autoreplace handling will take care of them.789*/790fmd_case_close(hdl, zcp->zc_case);791} else if (fmd_nvl_class_match(hdl, nvl,792ZFS_MAKE_RSRC(FM_RESOURCE_REMOVED))) {793/*794* The 'resource.fs.zfs.removed' event indicates that795* device removal was detected, and the device was796* closed asynchronously. If this is the case, we797* assume that any recent I/O errors were due to the798* device removal, not any fault of the device itself.799* We reset the SERD engine, and cancel any pending800* timers.801*/802if (zcp->zc_data.zc_has_remove_timer) {803fmd_timer_remove(hdl, zcp->zc_remove_timer);804zcp->zc_data.zc_has_remove_timer = 0;805zfs_case_serialize(zcp);806}807if (zcp->zc_data.zc_serd_io[0] != '\0')808fmd_serd_reset(hdl, zcp->zc_data.zc_serd_io);809if (zcp->zc_data.zc_serd_checksum[0] != '\0')810fmd_serd_reset(hdl,811zcp->zc_data.zc_serd_checksum);812if (zcp->zc_data.zc_serd_slow_io[0] != '\0')813fmd_serd_reset(hdl,814zcp->zc_data.zc_serd_slow_io);815} else if (fmd_nvl_class_match(hdl, nvl,816ZFS_MAKE_RSRC(FM_RESOURCE_STATECHANGE))) {817uint64_t state = 0;818819if (zcp != NULL &&820nvlist_lookup_uint64(nvl,821FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, &state) == 0 &&822state == VDEV_STATE_HEALTHY) {823fmd_hdl_debug(hdl, "closing case after a "824"device statechange to healthy");825fmd_case_close(hdl, zcp->zc_case);826}827}828zfs_stats.resource_drops.fmds_value.ui64++;829return;830}831832/*833* Associate the ereport with this case.834*/835fmd_case_add_ereport(hdl, zcp->zc_case, ep);836837/*838* Don't do anything else if this case is already solved.839*/840if (fmd_case_solved(hdl, zcp->zc_case))841return;842843if (vdev_guid)844fmd_hdl_debug(hdl, "error event '%s', vdev %llu", class,845vdev_guid);846else847fmd_hdl_debug(hdl, "error event '%s'", class);848849/*850* Determine if we should solve the case and generate a fault. We solve851* a case if:852*853* a. A pool failed to open (ereport.fs.zfs.pool)854* b. A device failed to open (ereport.fs.zfs.pool) while a pool855* was up and running.856*857* We may see a series of ereports associated with a pool open, all858* chained together by the same ENA. If the pool open succeeds, then859* we'll see no further ereports. To detect when a pool open has860* succeeded, we associate a timer with the event. When it expires, we861* close the case.862*/863if (fmd_nvl_class_match(hdl, nvl,864ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_POOL))) {865/*866* Pool level fault. Before solving the case, go through and867* close any open device cases that may be pending.868*/869for (dcp = uu_list_first(zfs_cases); dcp != NULL;870dcp = uu_list_next(zfs_cases, dcp)) {871if (dcp->zc_data.zc_pool_guid ==872zcp->zc_data.zc_pool_guid &&873dcp->zc_data.zc_vdev_guid != 0)874fmd_case_close(hdl, dcp->zc_case);875}876877zfs_case_solve(hdl, zcp, "fault.fs.zfs.pool");878} else if (fmd_nvl_class_match(hdl, nvl,879ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_LOG_REPLAY))) {880/*881* Pool level fault for reading the intent logs.882*/883zfs_case_solve(hdl, zcp, "fault.fs.zfs.log_replay");884} else if (fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.vdev.*")) {885/*886* Device fault.887*/888zfs_case_solve(hdl, zcp, "fault.fs.zfs.device");889} else if (fmd_nvl_class_match(hdl, nvl,890ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO)) ||891fmd_nvl_class_match(hdl, nvl,892ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM)) ||893fmd_nvl_class_match(hdl, nvl,894ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO_FAILURE)) ||895fmd_nvl_class_match(hdl, nvl,896ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY)) ||897fmd_nvl_class_match(hdl, nvl,898ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_PROBE_FAILURE))) {899const char *failmode = NULL;900boolean_t checkremove = B_FALSE;901uint32_t pri = 0;902903/*904* If this is a checksum or I/O error, then toss it into the905* appropriate SERD engine and check to see if it has fired.906* Ideally, we want to do something more sophisticated,907* (persistent errors for a single data block, etc). For now,908* a single SERD engine is sufficient.909*/910if (fmd_nvl_class_match(hdl, nvl,911ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO))) {912if (zcp->zc_data.zc_serd_io[0] == '\0') {913if (nvlist_lookup_uint64(nvl,914FM_EREPORT_PAYLOAD_ZFS_VDEV_IO_N,915&io_n) != 0) {916io_n = DEFAULT_IO_N;917}918if (nvlist_lookup_uint64(nvl,919FM_EREPORT_PAYLOAD_ZFS_VDEV_IO_T,920&io_t) != 0) {921io_t = DEFAULT_IO_T;922}923zfs_serd_name(zcp->zc_data.zc_serd_io,924pool_guid, vdev_guid, "io");925fmd_serd_create(hdl, zcp->zc_data.zc_serd_io,926io_n,927SEC2NSEC(io_t));928zfs_case_serialize(zcp);929}930if (zfs_fm_serd_record(hdl, zcp->zc_data.zc_serd_io,931ep, zcp, "io error")) {932checkremove = B_TRUE;933}934} else if (fmd_nvl_class_match(hdl, nvl,935ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY))) {936uint64_t slow_io_n, slow_io_t;937938/*939* Create a slow io SERD engine when the VDEV has the940* 'vdev_slow_io_n' and 'vdev_slow_io_n' properties.941*/942if (zcp->zc_data.zc_serd_slow_io[0] == '\0' &&943nvlist_lookup_uint64(nvl,944FM_EREPORT_PAYLOAD_ZFS_VDEV_SLOW_IO_N,945&slow_io_n) == 0 &&946nvlist_lookup_uint64(nvl,947FM_EREPORT_PAYLOAD_ZFS_VDEV_SLOW_IO_T,948&slow_io_t) == 0) {949zfs_serd_name(zcp->zc_data.zc_serd_slow_io,950pool_guid, vdev_guid, "slow_io");951fmd_serd_create(hdl,952zcp->zc_data.zc_serd_slow_io,953slow_io_n,954SEC2NSEC(slow_io_t));955zfs_case_serialize(zcp);956}957/* Pass event to SERD engine and see if this triggers */958if (zcp->zc_data.zc_serd_slow_io[0] != '\0' &&959zfs_fm_serd_record(hdl,960zcp->zc_data.zc_serd_slow_io, ep, zcp, "slow io")) {961zfs_case_solve(hdl, zcp,962"fault.fs.zfs.vdev.slow_io");963}964} else if (fmd_nvl_class_match(hdl, nvl,965ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM))) {966uint64_t flags = 0;967int32_t flags32 = 0;968/*969* We ignore ereports for checksum errors generated by970* scrub/resilver I/O to avoid potentially further971* degrading the pool while it's being repaired.972*973* Note that FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS used to974* be int32. To allow newer zed to work on older975* kernels, if we don't find the flags, we look for976* the older ones too.977*/978if (((nvlist_lookup_uint32(nvl,979FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY, &pri) == 0) &&980(pri == ZIO_PRIORITY_SCRUB ||981pri == ZIO_PRIORITY_REBUILD)) ||982((nvlist_lookup_uint64(nvl,983FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags) == 0) &&984(flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) ||985((nvlist_lookup_int32(nvl,986FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags32) == 0) &&987(flags32 & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))) {988fmd_hdl_debug(hdl, "ignoring '%s' for "989"scrub/resilver I/O", class);990return;991}992993if (zcp->zc_data.zc_serd_checksum[0] == '\0') {994if (nvlist_lookup_uint64(nvl,995FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_N,996&checksum_n) != 0) {997checksum_n = DEFAULT_CHECKSUM_N;998}999if (nvlist_lookup_uint64(nvl,1000FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_T,1001&checksum_t) != 0) {1002checksum_t = DEFAULT_CHECKSUM_T;1003}10041005zfs_serd_name(zcp->zc_data.zc_serd_checksum,1006pool_guid, vdev_guid, "checksum");1007fmd_serd_create(hdl,1008zcp->zc_data.zc_serd_checksum,1009checksum_n,1010SEC2NSEC(checksum_t));1011zfs_case_serialize(zcp);1012}1013if (zfs_fm_serd_record(hdl,1014zcp->zc_data.zc_serd_checksum, ep, zcp,1015"checksum")) {1016zfs_case_solve(hdl, zcp,1017"fault.fs.zfs.vdev.checksum");1018}1019} else if (fmd_nvl_class_match(hdl, nvl,1020ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO_FAILURE)) &&1021(nvlist_lookup_string(nvl,1022FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE, &failmode) == 0) &&1023failmode != NULL) {1024if (strncmp(failmode, FM_EREPORT_FAILMODE_CONTINUE,1025strlen(FM_EREPORT_FAILMODE_CONTINUE)) == 0) {1026zfs_case_solve(hdl, zcp,1027"fault.fs.zfs.io_failure_continue");1028} else if (strncmp(failmode, FM_EREPORT_FAILMODE_WAIT,1029strlen(FM_EREPORT_FAILMODE_WAIT)) == 0) {1030zfs_case_solve(hdl, zcp,1031"fault.fs.zfs.io_failure_wait");1032}1033} else if (fmd_nvl_class_match(hdl, nvl,1034ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_PROBE_FAILURE))) {1035#ifndef __linux__1036/* This causes an unexpected fault diagnosis on linux */1037checkremove = B_TRUE;1038#endif1039}10401041/*1042* Because I/O errors may be due to device removal, we postpone1043* any diagnosis until we're sure that we aren't about to1044* receive a 'resource.fs.zfs.removed' event.1045*/1046if (checkremove) {1047if (zcp->zc_data.zc_has_remove_timer)1048fmd_timer_remove(hdl, zcp->zc_remove_timer);1049zcp->zc_remove_timer = fmd_timer_install(hdl, zcp, NULL,1050zfs_remove_timeout);1051if (!zcp->zc_data.zc_has_remove_timer) {1052zcp->zc_data.zc_has_remove_timer = 1;1053zfs_case_serialize(zcp);1054}1055}1056}1057}10581059/*1060* The timeout is fired when we diagnosed an I/O error, and it was not due to1061* device removal (which would cause the timeout to be cancelled).1062*/1063static void1064zfs_fm_timeout(fmd_hdl_t *hdl, id_t id, void *data)1065{1066zfs_case_t *zcp = data;10671068if (id == zcp->zc_remove_timer)1069zfs_case_solve(hdl, zcp, "fault.fs.zfs.vdev.io");1070}10711072/*1073* The specified case has been closed and any case-specific1074* data structures should be deallocated.1075*/1076static void1077zfs_fm_close(fmd_hdl_t *hdl, fmd_case_t *cs)1078{1079zfs_case_t *zcp = fmd_case_getspecific(hdl, cs);10801081if (zcp->zc_data.zc_serd_checksum[0] != '\0')1082fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_checksum);1083if (zcp->zc_data.zc_serd_io[0] != '\0')1084fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_io);1085if (zcp->zc_data.zc_serd_slow_io[0] != '\0')1086fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_slow_io);1087if (zcp->zc_data.zc_has_remove_timer)1088fmd_timer_remove(hdl, zcp->zc_remove_timer);10891090uu_list_remove(zfs_cases, zcp);1091uu_list_node_fini(zcp, &zcp->zc_node, zfs_case_pool);1092fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));1093}10941095static const fmd_hdl_ops_t fmd_ops = {1096zfs_fm_recv, /* fmdo_recv */1097zfs_fm_timeout, /* fmdo_timeout */1098zfs_fm_close, /* fmdo_close */1099NULL, /* fmdo_stats */1100NULL, /* fmdo_gc */1101};11021103static const fmd_prop_t fmd_props[] = {1104{ NULL, 0, NULL }1105};11061107static const fmd_hdl_info_t fmd_info = {1108"ZFS Diagnosis Engine", "1.0", &fmd_ops, fmd_props1109};11101111void1112_zfs_diagnosis_init(fmd_hdl_t *hdl)1113{1114libzfs_handle_t *zhdl;11151116if ((zhdl = libzfs_init()) == NULL)1117return;11181119if ((zfs_case_pool = uu_list_pool_create("zfs_case_pool",1120sizeof (zfs_case_t), offsetof(zfs_case_t, zc_node),1121NULL, UU_LIST_POOL_DEBUG)) == NULL) {1122libzfs_fini(zhdl);1123return;1124}11251126if ((zfs_cases = uu_list_create(zfs_case_pool, NULL,1127UU_LIST_DEBUG)) == NULL) {1128uu_list_pool_destroy(zfs_case_pool);1129libzfs_fini(zhdl);1130return;1131}11321133if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {1134uu_list_destroy(zfs_cases);1135uu_list_pool_destroy(zfs_case_pool);1136libzfs_fini(zhdl);1137return;1138}11391140fmd_hdl_setspecific(hdl, zhdl);11411142(void) fmd_stat_create(hdl, FMD_STAT_NOALLOC, sizeof (zfs_stats) /1143sizeof (fmd_stat_t), (fmd_stat_t *)&zfs_stats);1144}11451146void1147_zfs_diagnosis_fini(fmd_hdl_t *hdl)1148{1149zfs_case_t *zcp;1150uu_list_walk_t *walk;1151libzfs_handle_t *zhdl;11521153/*1154* Remove all active cases.1155*/1156walk = uu_list_walk_start(zfs_cases, UU_WALK_ROBUST);1157while ((zcp = uu_list_walk_next(walk)) != NULL) {1158fmd_hdl_debug(hdl, "removing case ena %llu",1159(long long unsigned)zcp->zc_data.zc_ena);1160uu_list_remove(zfs_cases, zcp);1161uu_list_node_fini(zcp, &zcp->zc_node, zfs_case_pool);1162fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));1163}1164uu_list_walk_end(walk);11651166uu_list_destroy(zfs_cases);1167uu_list_pool_destroy(zfs_case_pool);11681169zhdl = fmd_hdl_getspecific(hdl);1170libzfs_fini(zhdl);1171}117211731174