Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
48529 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
/*
23
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24
*
25
* Copyright (c) 2016, Intel Corporation.
26
* Copyright (c) 2018, loli10K <[email protected]>
27
*/
28
29
/*
30
* The ZFS retire agent is responsible for managing hot spares across all pools.
31
* When we see a device fault or a device removal, we try to open the associated
32
* pool and look for any hot spares. We iterate over any available hot spares
33
* and attempt a 'zpool replace' for each one.
34
*
35
* For vdevs diagnosed as faulty, the agent is also responsible for proactively
36
* marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
37
*/
38
39
#include <sys/fs/zfs.h>
40
#include <sys/fm/protocol.h>
41
#include <sys/fm/fs/zfs.h>
42
#include <libzutil.h>
43
#include <libzfs.h>
44
#include <string.h>
45
#include <libgen.h>
46
47
#include "zfs_agents.h"
48
#include "fmd_api.h"
49
50
51
typedef struct zfs_retire_repaired {
52
struct zfs_retire_repaired *zrr_next;
53
uint64_t zrr_pool;
54
uint64_t zrr_vdev;
55
} zfs_retire_repaired_t;
56
57
typedef struct zfs_retire_data {
58
libzfs_handle_t *zrd_hdl;
59
zfs_retire_repaired_t *zrd_repaired;
60
} zfs_retire_data_t;
61
62
static void
63
zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
64
{
65
zfs_retire_repaired_t *zrp;
66
67
while ((zrp = zdp->zrd_repaired) != NULL) {
68
zdp->zrd_repaired = zrp->zrr_next;
69
fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
70
}
71
}
72
73
/*
74
* Find a pool with a matching GUID.
75
*/
76
typedef struct find_cbdata {
77
uint64_t cb_guid;
78
zpool_handle_t *cb_zhp;
79
nvlist_t *cb_vdev;
80
uint64_t cb_vdev_guid;
81
uint64_t cb_num_spares;
82
} find_cbdata_t;
83
84
static int
85
find_pool(zpool_handle_t *zhp, void *data)
86
{
87
find_cbdata_t *cbp = data;
88
89
if (cbp->cb_guid ==
90
zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
91
cbp->cb_zhp = zhp;
92
return (1);
93
}
94
95
zpool_close(zhp);
96
return (0);
97
}
98
99
/*
100
* Find a vdev within a tree with a matching GUID.
101
*/
102
static nvlist_t *
103
find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, uint64_t search_guid)
104
{
105
uint64_t guid;
106
nvlist_t **child;
107
uint_t c, children;
108
nvlist_t *ret;
109
110
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
111
guid == search_guid) {
112
fmd_hdl_debug(fmd_module_hdl("zfs-retire"),
113
"matched vdev %llu", guid);
114
return (nv);
115
}
116
117
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
118
&child, &children) != 0)
119
return (NULL);
120
121
for (c = 0; c < children; c++) {
122
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
123
return (ret);
124
}
125
126
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
127
&child, &children) != 0)
128
return (NULL);
129
130
for (c = 0; c < children; c++) {
131
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
132
return (ret);
133
}
134
135
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
136
&child, &children) != 0)
137
return (NULL);
138
139
for (c = 0; c < children; c++) {
140
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
141
return (ret);
142
}
143
144
return (NULL);
145
}
146
147
static int
148
remove_spares(zpool_handle_t *zhp, void *data)
149
{
150
nvlist_t *config, *nvroot;
151
nvlist_t **spares;
152
uint_t nspares;
153
char *devname;
154
find_cbdata_t *cbp = data;
155
uint64_t spareguid = 0;
156
vdev_stat_t *vs;
157
unsigned int c;
158
159
config = zpool_get_config(zhp, NULL);
160
if (nvlist_lookup_nvlist(config,
161
ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0) {
162
zpool_close(zhp);
163
return (0);
164
}
165
166
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
167
&spares, &nspares) != 0) {
168
zpool_close(zhp);
169
return (0);
170
}
171
172
for (int i = 0; i < nspares; i++) {
173
if (nvlist_lookup_uint64(spares[i], ZPOOL_CONFIG_GUID,
174
&spareguid) == 0 && spareguid == cbp->cb_vdev_guid) {
175
devname = zpool_vdev_name(NULL, zhp, spares[i],
176
B_FALSE);
177
nvlist_lookup_uint64_array(spares[i],
178
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c);
179
if (vs->vs_state != VDEV_STATE_REMOVED &&
180
zpool_vdev_remove_wanted(zhp, devname) == 0)
181
cbp->cb_num_spares++;
182
break;
183
}
184
}
185
186
zpool_close(zhp);
187
return (0);
188
}
189
190
/*
191
* Given a vdev guid, find and remove all spares associated with it.
192
*/
193
static int
194
find_and_remove_spares(libzfs_handle_t *zhdl, uint64_t vdev_guid)
195
{
196
find_cbdata_t cb;
197
198
cb.cb_num_spares = 0;
199
cb.cb_vdev_guid = vdev_guid;
200
zpool_iter(zhdl, remove_spares, &cb);
201
202
return (cb.cb_num_spares);
203
}
204
205
/*
206
* Given a (pool, vdev) GUID pair, find the matching pool and vdev.
207
*/
208
static zpool_handle_t *
209
find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
210
nvlist_t **vdevp)
211
{
212
find_cbdata_t cb;
213
zpool_handle_t *zhp;
214
nvlist_t *config, *nvroot;
215
216
/*
217
* Find the corresponding pool and make sure the vdev still exists.
218
*/
219
cb.cb_guid = pool_guid;
220
if (zpool_iter(zhdl, find_pool, &cb) != 1)
221
return (NULL);
222
223
zhp = cb.cb_zhp;
224
config = zpool_get_config(zhp, NULL);
225
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
226
&nvroot) != 0) {
227
zpool_close(zhp);
228
return (NULL);
229
}
230
231
if (vdev_guid != 0) {
232
if ((*vdevp = find_vdev(zhdl, nvroot, vdev_guid)) == NULL) {
233
zpool_close(zhp);
234
return (NULL);
235
}
236
}
237
238
return (zhp);
239
}
240
241
/*
242
* Given a vdev, attempt to replace it with every known spare until one
243
* succeeds or we run out of devices to try.
244
* Return whether we were successful or not in replacing the device.
245
*/
246
static boolean_t
247
replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
248
{
249
nvlist_t *config, *nvroot, *replacement;
250
nvlist_t **spares;
251
uint_t s, nspares;
252
char *dev_name;
253
zprop_source_t source;
254
int ashift;
255
256
config = zpool_get_config(zhp, NULL);
257
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
258
&nvroot) != 0)
259
return (B_FALSE);
260
261
/*
262
* Find out if there are any hot spares available in the pool.
263
*/
264
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
265
&spares, &nspares) != 0)
266
return (B_FALSE);
267
268
/*
269
* lookup "ashift" pool property, we may need it for the replacement
270
*/
271
ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source);
272
273
replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
274
275
(void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
276
VDEV_TYPE_ROOT);
277
278
dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
279
280
/*
281
* Try to replace each spare, ending when we successfully
282
* replace it.
283
*/
284
for (s = 0; s < nspares; s++) {
285
boolean_t rebuild = B_FALSE;
286
const char *spare_name, *type;
287
288
if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
289
&spare_name) != 0)
290
continue;
291
292
/* prefer sequential resilvering for distributed spares */
293
if ((nvlist_lookup_string(spares[s], ZPOOL_CONFIG_TYPE,
294
&type) == 0) && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0)
295
rebuild = B_TRUE;
296
297
/* if set, add the "ashift" pool property to the spare nvlist */
298
if (source != ZPROP_SRC_DEFAULT)
299
(void) nvlist_add_uint64(spares[s],
300
ZPOOL_CONFIG_ASHIFT, ashift);
301
302
(void) nvlist_add_nvlist_array(replacement,
303
ZPOOL_CONFIG_CHILDREN, (const nvlist_t **)&spares[s], 1);
304
305
fmd_hdl_debug(hdl, "zpool_vdev_replace '%s' with spare '%s'",
306
dev_name, zfs_basename(spare_name));
307
308
if (zpool_vdev_attach(zhp, dev_name, spare_name,
309
replacement, B_TRUE, rebuild) == 0) {
310
free(dev_name);
311
nvlist_free(replacement);
312
return (B_TRUE);
313
}
314
}
315
316
free(dev_name);
317
nvlist_free(replacement);
318
319
return (B_FALSE);
320
}
321
322
/*
323
* Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
324
* ASRU is now usable. ZFS has found the device to be present and
325
* functioning.
326
*/
327
static void
328
zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
329
{
330
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
331
zfs_retire_repaired_t *zrp;
332
uint64_t pool_guid, vdev_guid;
333
if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
334
&pool_guid) != 0 || nvlist_lookup_uint64(nvl,
335
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
336
return;
337
338
/*
339
* Before checking the state of the ASRU, go through and see if we've
340
* already made an attempt to repair this ASRU. This list is cleared
341
* whenever we receive any kind of list event, and is designed to
342
* prevent us from generating a feedback loop when we attempt repairs
343
* against a faulted pool. The problem is that checking the unusable
344
* state of the ASRU can involve opening the pool, which can post
345
* statechange events but otherwise leave the pool in the faulted
346
* state. This list allows us to detect when a statechange event is
347
* due to our own request.
348
*/
349
for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
350
if (zrp->zrr_pool == pool_guid &&
351
zrp->zrr_vdev == vdev_guid)
352
return;
353
}
354
355
zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
356
zrp->zrr_next = zdp->zrd_repaired;
357
zrp->zrr_pool = pool_guid;
358
zrp->zrr_vdev = vdev_guid;
359
zdp->zrd_repaired = zrp;
360
361
fmd_hdl_debug(hdl, "marking repaired vdev %llu on pool %llu",
362
vdev_guid, pool_guid);
363
}
364
365
static void
366
zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
367
const char *class)
368
{
369
(void) ep;
370
uint64_t pool_guid, vdev_guid;
371
zpool_handle_t *zhp;
372
nvlist_t *resource, *fault;
373
nvlist_t **faults;
374
uint_t f, nfaults;
375
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
376
libzfs_handle_t *zhdl = zdp->zrd_hdl;
377
boolean_t fault_device, degrade_device;
378
boolean_t is_repair;
379
boolean_t l2arc = B_FALSE;
380
boolean_t spare = B_FALSE;
381
const char *scheme;
382
nvlist_t *vdev = NULL;
383
const char *uuid;
384
int repair_done = 0;
385
boolean_t retire;
386
boolean_t is_disk;
387
vdev_aux_t aux;
388
uint64_t state = 0;
389
vdev_stat_t *vs;
390
unsigned int c;
391
392
fmd_hdl_debug(hdl, "zfs_retire_recv: '%s'", class);
393
394
(void) nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE,
395
&state);
396
397
/*
398
* If this is a resource notifying us of device removal then simply
399
* check for an available spare and continue unless the device is a
400
* l2arc vdev, in which case we just offline it.
401
*/
402
if (strcmp(class, "resource.fs.zfs.removed") == 0 ||
403
(strcmp(class, "resource.fs.zfs.statechange") == 0 &&
404
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
405
const char *devtype;
406
char *devname;
407
boolean_t skip_removal = B_FALSE;
408
409
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
410
&devtype) == 0) {
411
if (strcmp(devtype, VDEV_TYPE_SPARE) == 0)
412
spare = B_TRUE;
413
else if (strcmp(devtype, VDEV_TYPE_L2CACHE) == 0)
414
l2arc = B_TRUE;
415
}
416
417
if (nvlist_lookup_uint64(nvl,
418
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
419
return;
420
421
if (vdev_guid == 0) {
422
fmd_hdl_debug(hdl, "Got a zero GUID");
423
return;
424
}
425
426
if (spare) {
427
int nspares = find_and_remove_spares(zhdl, vdev_guid);
428
fmd_hdl_debug(hdl, "%d spares removed", nspares);
429
return;
430
}
431
432
if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
433
&pool_guid) != 0)
434
return;
435
436
if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
437
&vdev)) == NULL)
438
return;
439
440
devname = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
441
442
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
443
(uint64_t **)&vs, &c);
444
445
if (vs->vs_state == VDEV_STATE_OFFLINE)
446
return;
447
448
/*
449
* If state removed is requested for already removed vdev,
450
* its a loopback event from spa_async_remove(). Just
451
* ignore it.
452
*/
453
if ((vs->vs_state == VDEV_STATE_REMOVED &&
454
state == VDEV_STATE_REMOVED)) {
455
if (strcmp(class, "resource.fs.zfs.removed") == 0 &&
456
nvlist_exists(nvl, "by_kernel")) {
457
skip_removal = B_TRUE;
458
} else {
459
return;
460
}
461
}
462
463
/* Remove the vdev since device is unplugged */
464
int remove_status = 0;
465
if (!skip_removal && (l2arc ||
466
(strcmp(class, "resource.fs.zfs.removed") == 0))) {
467
remove_status = zpool_vdev_remove_wanted(zhp, devname);
468
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
469
", err:%d", devname, libzfs_errno(zhdl));
470
}
471
472
/* Replace the vdev with a spare if its not a l2arc */
473
if (!l2arc && !remove_status &&
474
(!fmd_prop_get_int32(hdl, "spare_on_remove") ||
475
replace_with_spare(hdl, zhp, vdev) == B_FALSE)) {
476
/* Could not handle with spare */
477
fmd_hdl_debug(hdl, "no spare for '%s'", devname);
478
}
479
480
free(devname);
481
zpool_close(zhp);
482
return;
483
}
484
485
if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
486
return;
487
488
/*
489
* Note: on Linux statechange events are more than just
490
* healthy ones so we need to confirm the actual state value.
491
*/
492
if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
493
state == VDEV_STATE_HEALTHY) {
494
zfs_vdev_repair(hdl, nvl);
495
return;
496
}
497
if (strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0) {
498
zfs_vdev_repair(hdl, nvl);
499
return;
500
}
501
502
zfs_retire_clear_data(hdl, zdp);
503
504
if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0)
505
is_repair = B_TRUE;
506
else
507
is_repair = B_FALSE;
508
509
/*
510
* We subscribe to zfs faults as well as all repair events.
511
*/
512
if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
513
&faults, &nfaults) != 0)
514
return;
515
516
for (f = 0; f < nfaults; f++) {
517
fault = faults[f];
518
519
fault_device = B_FALSE;
520
degrade_device = B_FALSE;
521
is_disk = B_FALSE;
522
523
if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
524
&retire) == 0 && retire == 0)
525
continue;
526
527
/*
528
* While we subscribe to fault.fs.zfs.*, we only take action
529
* for faults targeting a specific vdev (open failure or SERD
530
* failure). We also subscribe to fault.io.* events, so that
531
* faulty disks will be faulted in the ZFS configuration.
532
*/
533
if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
534
fault_device = B_TRUE;
535
} else if (fmd_nvl_class_match(hdl, fault,
536
"fault.fs.zfs.vdev.checksum")) {
537
degrade_device = B_TRUE;
538
} else if (fmd_nvl_class_match(hdl, fault,
539
"fault.fs.zfs.vdev.slow_io")) {
540
degrade_device = B_TRUE;
541
} else if (fmd_nvl_class_match(hdl, fault,
542
"fault.fs.zfs.device")) {
543
fault_device = B_FALSE;
544
} else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
545
is_disk = B_TRUE;
546
fault_device = B_TRUE;
547
} else {
548
continue;
549
}
550
551
if (is_disk) {
552
continue;
553
} else {
554
/*
555
* This is a ZFS fault. Lookup the resource, and
556
* attempt to find the matching vdev.
557
*/
558
if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
559
&resource) != 0 ||
560
nvlist_lookup_string(resource, FM_FMRI_SCHEME,
561
&scheme) != 0)
562
continue;
563
564
if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
565
continue;
566
567
if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
568
&pool_guid) != 0)
569
continue;
570
571
if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
572
&vdev_guid) != 0) {
573
if (is_repair)
574
vdev_guid = 0;
575
else
576
continue;
577
}
578
579
if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
580
&vdev)) == NULL)
581
continue;
582
583
aux = VDEV_AUX_ERR_EXCEEDED;
584
}
585
586
if (vdev_guid == 0) {
587
/*
588
* For pool-level repair events, clear the entire pool.
589
*/
590
fmd_hdl_debug(hdl, "zpool_clear of pool '%s'",
591
zpool_get_name(zhp));
592
(void) zpool_clear(zhp, NULL, NULL);
593
zpool_close(zhp);
594
continue;
595
}
596
597
/*
598
* If this is a repair event, then mark the vdev as repaired and
599
* continue.
600
*/
601
if (is_repair) {
602
repair_done = 1;
603
fmd_hdl_debug(hdl, "zpool_clear of pool '%s' vdev %llu",
604
zpool_get_name(zhp), vdev_guid);
605
(void) zpool_vdev_clear(zhp, vdev_guid);
606
zpool_close(zhp);
607
continue;
608
}
609
610
/*
611
* Actively fault the device if needed.
612
*/
613
if (fault_device)
614
(void) zpool_vdev_fault(zhp, vdev_guid, aux);
615
if (degrade_device)
616
(void) zpool_vdev_degrade(zhp, vdev_guid, aux);
617
618
if (fault_device || degrade_device)
619
fmd_hdl_debug(hdl, "zpool_vdev_%s: vdev %llu on '%s'",
620
fault_device ? "fault" : "degrade", vdev_guid,
621
zpool_get_name(zhp));
622
623
/*
624
* Attempt to substitute a hot spare.
625
*/
626
(void) replace_with_spare(hdl, zhp, vdev);
627
628
zpool_close(zhp);
629
}
630
631
if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
632
nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
633
fmd_case_uuresolved(hdl, uuid);
634
}
635
636
static const fmd_hdl_ops_t fmd_ops = {
637
zfs_retire_recv, /* fmdo_recv */
638
NULL, /* fmdo_timeout */
639
NULL, /* fmdo_close */
640
NULL, /* fmdo_stats */
641
NULL, /* fmdo_gc */
642
};
643
644
static const fmd_prop_t fmd_props[] = {
645
{ "spare_on_remove", FMD_TYPE_BOOL, "true" },
646
{ NULL, 0, NULL }
647
};
648
649
static const fmd_hdl_info_t fmd_info = {
650
"ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
651
};
652
653
void
654
_zfs_retire_init(fmd_hdl_t *hdl)
655
{
656
zfs_retire_data_t *zdp;
657
libzfs_handle_t *zhdl;
658
659
if ((zhdl = libzfs_init()) == NULL)
660
return;
661
662
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
663
libzfs_fini(zhdl);
664
return;
665
}
666
667
zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
668
zdp->zrd_hdl = zhdl;
669
670
fmd_hdl_setspecific(hdl, zdp);
671
}
672
673
void
674
_zfs_retire_fini(fmd_hdl_t *hdl)
675
{
676
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
677
678
if (zdp != NULL) {
679
zfs_retire_clear_data(hdl, zdp);
680
libzfs_fini(zdp->zrd_hdl);
681
fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
682
}
683
}
684
685