4451
|
1 |
/*
|
|
2 |
* CDDL HEADER START
|
|
3 |
*
|
|
4 |
* The contents of this file are subject to the terms of the
|
|
5 |
* Common Development and Distribution License (the "License").
|
|
6 |
* You may not use this file except in compliance with the License.
|
|
7 |
*
|
|
8 |
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
9 |
* or http://www.opensolaris.org/os/licensing.
|
|
10 |
* See the License for the specific language governing permissions
|
|
11 |
* and limitations under the License.
|
|
12 |
*
|
|
13 |
* When distributing Covered Code, include this CDDL HEADER in each
|
|
14 |
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
15 |
* If applicable, add the following below this CDDL HEADER, with the
|
|
16 |
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
17 |
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
18 |
*
|
|
19 |
* CDDL HEADER END
|
|
20 |
*/
|
|
21 |
/*
|
|
22 |
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
|
|
23 |
* Use is subject to license terms.
|
|
24 |
*/
|
|
25 |
|
|
26 |
#pragma ident "%Z%%M% %I% %E% SMI"
|
|
27 |
|
|
28 |
/*
|
|
29 |
* ZFS syseventd module.
|
|
30 |
*
|
|
31 |
* The purpose of this module is to identify when devices are added to the
|
|
32 |
* system, and appropriately online or replace the affected vdevs.
|
|
33 |
*
|
|
34 |
* When a device is added to the system:
|
|
35 |
*
|
|
36 |
* 1. Search for any vdevs whose devid matches that of the newly added
|
|
37 |
* device.
|
|
38 |
*
|
|
39 |
* 2. If no vdevs are found, then search for any vdevs whose devfs path
|
|
40 |
* matches that of the new device.
|
|
41 |
*
|
|
42 |
* 3. If no vdevs match by either method, then ignore the event.
|
|
43 |
*
|
|
44 |
* 4. Attempt to online the device with a flag to indicate that it should
|
|
45 |
* be unspared when resilvering completes. If this succeeds, then the
|
|
46 |
* same device was inserted and we should continue normally.
|
|
47 |
*
|
|
48 |
* 5. If the pool does not have the 'autoreplace' property set, attempt to
|
|
49 |
* online the device again without the unspare flag, which will
|
|
50 |
* generate a FMA fault.
|
|
51 |
*
|
|
52 |
* 6. If the pool has the 'autoreplace' property set, and the matching vdev
|
|
53 |
* is a whole disk, then label the new disk and attempt a 'zpool
|
|
54 |
* replace'.
|
|
55 |
*
|
|
56 |
* The module responds to EC_DEV_ADD events for both disks and lofi devices,
|
|
57 |
* with the latter used for testing. The special ESC_ZFS_VDEV_CHECK event
|
|
58 |
* indicates that a device failed to open during pool load, but the autoreplace
|
|
59 |
* property was set. In this case, we deferred the associated FMA fault until
|
|
60 |
* our module had a chance to process the autoreplace logic. If the device
|
|
61 |
* could not be replaced, then the second online attempt will trigger the FMA
|
|
62 |
* fault that we skipped earlier.
|
|
63 |
*/
|
|
64 |
|
|
65 |
#include <alloca.h>
|
|
66 |
#include <devid.h>
|
|
67 |
#include <fcntl.h>
|
|
68 |
#include <libnvpair.h>
|
|
69 |
#include <libsysevent.h>
|
|
70 |
#include <libzfs.h>
|
|
71 |
#include <limits.h>
|
|
72 |
#include <stdlib.h>
|
|
73 |
#include <string.h>
|
|
74 |
#include <syslog.h>
|
|
75 |
#include <sys/sunddi.h>
|
|
76 |
#include <sys/sysevent/eventdefs.h>
|
|
77 |
#include <sys/sysevent/dev.h>
|
|
78 |
#include <unistd.h>
|
|
79 |
|
|
80 |
#if defined(__i386) || defined(__amd64)
|
|
81 |
#define PHYS_PATH ":q"
|
|
82 |
#define RAW_SLICE "p0"
|
|
83 |
#elif defined(__sparc)
|
|
84 |
#define PHYS_PATH ":c"
|
|
85 |
#define RAW_SLICE "s2"
|
|
86 |
#else
|
|
87 |
#error Unknown architecture
|
|
88 |
#endif
|
|
89 |
|
|
90 |
typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
|
|
91 |
|
|
92 |
libzfs_handle_t *g_zfshdl;
|
|
93 |
|
|
94 |
/*
|
|
95 |
* The device associated with the given vdev (either by devid or physical path)
|
|
96 |
* has been added to the system. If 'isdisk' is set, then we only attempt a
|
|
97 |
* replacement if it's a whole disk. This also implies that we should label the
|
|
98 |
* disk first.
|
|
99 |
*
|
|
100 |
* First, we attempt to online the device (making sure to undo any spare
|
|
101 |
* operation when finished). If this succeeds, then we're done. If it fails,
|
|
102 |
* and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
|
|
103 |
* but that the label was not what we expected. If the 'autoreplace' property
|
|
104 |
* is not set, then we relabel the disk (if specified), and attempt a 'zpool
|
|
105 |
* replace'. If the online is successful, but the new state is something else
|
|
106 |
* (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
|
|
107 |
* race, and we should avoid attempting to relabel the disk.
|
|
108 |
*/
|
|
109 |
static void
|
|
110 |
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
|
|
111 |
{
|
|
112 |
char *path;
|
|
113 |
vdev_state_t newstate;
|
|
114 |
nvlist_t *nvroot, *newvd;
|
|
115 |
uint64_t wholedisk = 0ULL;
|
|
116 |
char *devid = NULL;
|
|
117 |
char rawpath[PATH_MAX], fullpath[PATH_MAX];
|
|
118 |
size_t len;
|
|
119 |
|
|
120 |
if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
|
|
121 |
return;
|
|
122 |
|
|
123 |
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_DEVID, &devid);
|
|
124 |
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
|
|
125 |
|
|
126 |
/*
|
|
127 |
* We should have a way to online a device by guid. With the current
|
|
128 |
* interface, we are forced to chop off the 's0' for whole disks.
|
|
129 |
*/
|
|
130 |
(void) strlcpy(fullpath, path, sizeof (fullpath));
|
|
131 |
if (wholedisk)
|
|
132 |
fullpath[strlen(fullpath) - 2] = '\0';
|
|
133 |
|
|
134 |
/*
|
|
135 |
* Attempt to online the device. It would be nice to online this by
|
|
136 |
* GUID, but the current interface only supports lookup by path.
|
|
137 |
*/
|
|
138 |
if (zpool_vdev_online(zhp, fullpath,
|
|
139 |
ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
|
|
140 |
newstate != VDEV_STATE_CANT_OPEN)
|
|
141 |
return;
|
|
142 |
|
|
143 |
/*
|
|
144 |
* If the pool doesn't have the autoreplace property set, then attempt a
|
|
145 |
* true online (without the unspare flag), which will trigger a FMA
|
|
146 |
* fault.
|
|
147 |
*/
|
|
148 |
if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE) ||
|
|
149 |
(isdisk && !wholedisk)) {
|
|
150 |
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
|
|
151 |
&newstate);
|
|
152 |
return;
|
|
153 |
}
|
|
154 |
|
|
155 |
if (isdisk) {
|
|
156 |
/*
|
|
157 |
* If this is a request to label a whole disk, then attempt to
|
|
158 |
* write out the label. Before we can label the disk, we need
|
|
159 |
* access to a raw node. Ideally, we'd like to walk the devinfo
|
|
160 |
* tree and find a raw node from the corresponding parent node.
|
|
161 |
* This is overly complicated, and since we know how we labeled
|
|
162 |
* this device in the first place, we know it's save to switch
|
|
163 |
* from /dev/dsk to /dev/rdsk and append the backup slice.
|
|
164 |
*/
|
|
165 |
if (strncmp(path, "/dev/dsk/", 9) != 0)
|
|
166 |
return;
|
|
167 |
|
|
168 |
(void) strlcpy(rawpath, path + 9, sizeof (rawpath));
|
|
169 |
len = strlen(rawpath);
|
|
170 |
rawpath[len - 2] = '\0';
|
|
171 |
|
|
172 |
if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0)
|
|
173 |
return;
|
|
174 |
}
|
|
175 |
|
|
176 |
/*
|
|
177 |
* Cosntruct the root vdev to pass to zpool_vdev_attach(). While adding
|
|
178 |
* the entire vdev structure is harmless, we construct a reduced set of
|
|
179 |
* path/devid/wholedisk to keep it simple.
|
|
180 |
*/
|
|
181 |
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
|
|
182 |
return;
|
|
183 |
|
|
184 |
if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
|
|
185 |
nvlist_free(nvroot);
|
|
186 |
return;
|
|
187 |
}
|
|
188 |
|
|
189 |
if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
|
|
190 |
nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
|
|
191 |
(devid && nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID,
|
|
192 |
devid) != 0) ||
|
|
193 |
nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
|
|
194 |
nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
|
|
195 |
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
|
|
196 |
1) != 0) {
|
|
197 |
nvlist_free(newvd);
|
|
198 |
nvlist_free(nvroot);
|
|
199 |
return;
|
|
200 |
}
|
|
201 |
|
|
202 |
nvlist_free(newvd);
|
|
203 |
|
|
204 |
(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
|
|
205 |
|
|
206 |
nvlist_free(nvroot);
|
|
207 |
|
|
208 |
}
|
|
209 |
|
|
210 |
/*
|
|
211 |
* Utility functions to find a vdev matching given criteria.
|
|
212 |
*/
|
|
213 |
typedef struct dev_data {
|
|
214 |
const char *dd_compare;
|
|
215 |
const char *dd_prop;
|
|
216 |
zfs_process_func_t dd_func;
|
|
217 |
boolean_t dd_found;
|
|
218 |
boolean_t dd_isdisk;
|
|
219 |
uint64_t dd_pool_guid;
|
|
220 |
uint64_t dd_vdev_guid;
|
|
221 |
} dev_data_t;
|
|
222 |
|
|
223 |
static void
|
|
224 |
zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
|
|
225 |
{
|
|
226 |
dev_data_t *dp = data;
|
|
227 |
char *path;
|
|
228 |
uint_t c, children;
|
|
229 |
nvlist_t **child;
|
|
230 |
size_t len = strlen(dp->dd_compare);
|
|
231 |
uint64_t wholedisk = 0ULL;
|
|
232 |
uint64_t guid;
|
|
233 |
|
|
234 |
/*
|
|
235 |
* First iterate over any children.
|
|
236 |
*/
|
|
237 |
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
|
|
238 |
&child, &children) == 0) {
|
|
239 |
for (c = 0; c < children; c++)
|
|
240 |
zfs_iter_vdev(zhp, child[c], data);
|
|
241 |
return;
|
|
242 |
}
|
|
243 |
|
|
244 |
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK,
|
|
245 |
&wholedisk);
|
|
246 |
|
|
247 |
if (dp->dd_vdev_guid != 0) {
|
|
248 |
if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
|
|
249 |
&guid) != 0 || guid != dp->dd_vdev_guid)
|
|
250 |
return;
|
|
251 |
} else {
|
|
252 |
if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
|
|
253 |
strncmp(dp->dd_compare, path, len) != 0)
|
|
254 |
return;
|
|
255 |
|
|
256 |
/*
|
|
257 |
* Normally, we want to have an exact match for the comparison
|
|
258 |
* string. However, we allow substring matches in the following
|
|
259 |
* cases:
|
|
260 |
*
|
|
261 |
* <path>: This is a devpath, and the target is one
|
|
262 |
* of its children.
|
|
263 |
*
|
|
264 |
* <path/> This is a devid for a whole disk, and
|
|
265 |
* the target is one of its children.
|
|
266 |
*/
|
|
267 |
if (path[len] != '\0' && path[len] != ':' &&
|
|
268 |
path[len - 1] != '/')
|
|
269 |
return;
|
|
270 |
}
|
|
271 |
|
|
272 |
(dp->dd_func)(zhp, nvl, dp->dd_isdisk);
|
|
273 |
}
|
|
274 |
|
|
275 |
static int
|
|
276 |
zfs_iter_pool(zpool_handle_t *zhp, void *data)
|
|
277 |
{
|
|
278 |
nvlist_t *config, *nvl;
|
|
279 |
dev_data_t *dp = data;
|
|
280 |
uint64_t pool_guid;
|
|
281 |
|
|
282 |
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
|
|
283 |
if (dp->dd_pool_guid == 0 ||
|
|
284 |
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
|
|
285 |
&pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
|
|
286 |
(void) nvlist_lookup_nvlist(config,
|
|
287 |
ZPOOL_CONFIG_VDEV_TREE, &nvl);
|
|
288 |
zfs_iter_vdev(zhp, nvl, data);
|
|
289 |
}
|
|
290 |
}
|
|
291 |
|
|
292 |
zpool_close(zhp);
|
|
293 |
return (0);
|
|
294 |
}
|
|
295 |
|
|
296 |
/*
|
|
297 |
* Given a physical device path, iterate over all (pool, vdev) pairs which
|
|
298 |
* correspond to the given path.
|
|
299 |
*/
|
|
300 |
static boolean_t
|
|
301 |
devpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
|
|
302 |
{
|
|
303 |
dev_data_t data = { 0 };
|
|
304 |
|
|
305 |
data.dd_compare = devpath;
|
|
306 |
data.dd_func = func;
|
|
307 |
data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
|
|
308 |
data.dd_found = B_FALSE;
|
|
309 |
data.dd_isdisk = wholedisk;
|
|
310 |
|
|
311 |
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
|
|
312 |
|
|
313 |
return (data.dd_found);
|
|
314 |
}
|
|
315 |
|
|
316 |
/*
|
|
317 |
* Given a /devices path, lookup the corresponding devid for each minor node,
|
|
318 |
* and find any vdevs with matching devids. Doing this straight up would be
|
|
319 |
* rather inefficient, O(minor nodes * vdevs in system), so we take advantage of
|
|
320 |
* the fact that each devid ends with "/<minornode>". Once we find any valid
|
|
321 |
* minor node, we chop off the portion after the last slash, and then search for
|
|
322 |
* matching vdevs, which is O(vdevs in system).
|
|
323 |
*/
|
|
324 |
static boolean_t
|
|
325 |
devid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
|
|
326 |
{
|
|
327 |
size_t len = strlen(devpath) + sizeof ("/devices") +
|
|
328 |
sizeof (PHYS_PATH) - 1;
|
|
329 |
char *fullpath;
|
|
330 |
int fd;
|
|
331 |
ddi_devid_t devid;
|
|
332 |
char *devidstr, *fulldevid;
|
|
333 |
dev_data_t data = { 0 };
|
|
334 |
|
|
335 |
/*
|
|
336 |
* Try to open a known minor node.
|
|
337 |
*/
|
|
338 |
fullpath = alloca(len);
|
|
339 |
(void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH);
|
|
340 |
if ((fd = open(fullpath, O_RDONLY)) < 0)
|
|
341 |
return (B_FALSE);
|
|
342 |
|
|
343 |
/*
|
|
344 |
* Determine the devid as a string, with no trailing slash for the minor
|
|
345 |
* node.
|
|
346 |
*/
|
|
347 |
if (devid_get(fd, &devid) != 0) {
|
|
348 |
(void) close(fd);
|
|
349 |
return (B_FALSE);
|
|
350 |
}
|
|
351 |
(void) close(fd);
|
|
352 |
|
|
353 |
if ((devidstr = devid_str_encode(devid, NULL)) == NULL) {
|
|
354 |
devid_free(devid);
|
|
355 |
return (B_FALSE);
|
|
356 |
}
|
|
357 |
|
|
358 |
len = strlen(devidstr) + 2;
|
|
359 |
fulldevid = alloca(len);
|
|
360 |
(void) snprintf(fulldevid, len, "%s/", devidstr);
|
|
361 |
|
|
362 |
data.dd_compare = fulldevid;
|
|
363 |
data.dd_func = func;
|
|
364 |
data.dd_prop = ZPOOL_CONFIG_DEVID;
|
|
365 |
data.dd_found = B_FALSE;
|
|
366 |
data.dd_isdisk = wholedisk;
|
|
367 |
|
|
368 |
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
|
|
369 |
|
|
370 |
devid_str_free(devidstr);
|
|
371 |
|
|
372 |
return (data.dd_found);
|
|
373 |
}
|
|
374 |
|
|
375 |
/*
|
|
376 |
* This function is called when we receive a devfs add event. This can be
|
|
377 |
* either a disk event or a lofi event, and the behavior is slightly different
|
|
378 |
* depending on which it is.
|
|
379 |
*/
|
|
380 |
static int
|
|
381 |
zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
|
|
382 |
{
|
|
383 |
char *devpath, *devname;
|
|
384 |
char path[PATH_MAX], realpath[PATH_MAX];
|
|
385 |
char *colon, *raw;
|
|
386 |
int ret;
|
|
387 |
|
|
388 |
/*
|
|
389 |
* The main unit of operation is the physical device path. For disks,
|
|
390 |
* this is the device node, as all minor nodes are affected. For lofi
|
|
391 |
* devices, this includes the minor path. Unfortunately, this isn't
|
|
392 |
* represented in the DEV_PHYS_PATH for various reasons.
|
|
393 |
*/
|
|
394 |
if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath) != 0)
|
|
395 |
return (-1);
|
|
396 |
|
|
397 |
/*
|
|
398 |
* If this is a lofi device, then also get the minor instance name.
|
|
399 |
* Unfortunately, the current payload doesn't include an easy way to get
|
|
400 |
* this information. So we cheat by resolving the 'dev_name' (which
|
|
401 |
* refers to the raw device) and taking the portion between ':(*),raw'.
|
|
402 |
*/
|
|
403 |
(void) strlcpy(realpath, devpath, sizeof (realpath));
|
|
404 |
if (is_lofi) {
|
|
405 |
if (nvlist_lookup_string(nvl, DEV_NAME,
|
|
406 |
&devname) == 0 &&
|
|
407 |
(ret = resolvepath(devname, path,
|
|
408 |
sizeof (path))) > 0) {
|
|
409 |
path[ret] = '\0';
|
|
410 |
colon = strchr(path, ':');
|
|
411 |
if (colon != NULL)
|
|
412 |
raw = strstr(colon + 1, ",raw");
|
|
413 |
if (colon != NULL && raw != NULL) {
|
|
414 |
*raw = '\0';
|
|
415 |
(void) snprintf(realpath,
|
|
416 |
sizeof (realpath), "%s%s",
|
|
417 |
devpath, colon);
|
|
418 |
*raw = ',';
|
|
419 |
}
|
|
420 |
}
|
|
421 |
}
|
|
422 |
|
|
423 |
/*
|
|
424 |
* Iterate over all vdevs with a matching devid, and then those with a
|
|
425 |
* matching /devices path. For disks, we only want to pay attention to
|
|
426 |
* vdevs marked as whole disks. For lofi, we don't care (because we're
|
|
427 |
* matching an exact minor name).
|
|
428 |
*/
|
|
429 |
if (!devid_iter(realpath, zfs_process_add, !is_lofi))
|
|
430 |
(void) devpath_iter(realpath, zfs_process_add, !is_lofi);
|
|
431 |
|
|
432 |
return (0);
|
|
433 |
}
|
|
434 |
|
|
435 |
/*
|
|
436 |
* Called when we receive a VDEV_CHECK event, which indicates a device could not
|
|
437 |
* be opened during initial pool open, but the autoreplace property was set on
|
|
438 |
* the pool. In this case, we treat it as if it were an add event.
|
|
439 |
*/
|
|
440 |
static int
|
|
441 |
zfs_deliver_check(nvlist_t *nvl)
|
|
442 |
{
|
|
443 |
dev_data_t data = { 0 };
|
|
444 |
|
|
445 |
if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
|
|
446 |
&data.dd_pool_guid) != 0 ||
|
|
447 |
nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
|
|
448 |
&data.dd_vdev_guid) != 0)
|
|
449 |
return (0);
|
|
450 |
|
|
451 |
data.dd_isdisk = B_TRUE;
|
|
452 |
data.dd_func = zfs_process_add;
|
|
453 |
|
|
454 |
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
|
|
455 |
|
|
456 |
return (0);
|
|
457 |
}
|
|
458 |
|
|
459 |
/*ARGSUSED*/
|
|
460 |
static int
|
|
461 |
zfs_deliver_event(sysevent_t *ev, int unused)
|
|
462 |
{
|
|
463 |
const char *class = sysevent_get_class_name(ev);
|
|
464 |
const char *subclass = sysevent_get_subclass_name(ev);
|
|
465 |
nvlist_t *nvl;
|
|
466 |
int ret;
|
|
467 |
boolean_t is_lofi, is_check;
|
|
468 |
|
|
469 |
if (strcmp(class, EC_DEV_ADD) == 0) {
|
|
470 |
/*
|
|
471 |
* We're mainly interested in disk additions, but we also listen
|
|
472 |
* for new lofi devices, to allow for simplified testing.
|
|
473 |
*/
|
|
474 |
if (strcmp(subclass, ESC_DISK) == 0)
|
|
475 |
is_lofi = B_FALSE;
|
|
476 |
else if (strcmp(subclass, ESC_LOFI) == 0)
|
|
477 |
is_lofi = B_TRUE;
|
|
478 |
else
|
|
479 |
return (0);
|
|
480 |
|
|
481 |
is_check = B_FALSE;
|
|
482 |
} else if (strcmp(class, EC_ZFS) == 0 &&
|
|
483 |
strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
|
|
484 |
/*
|
|
485 |
* This event signifies that a device failed to open during pool
|
|
486 |
* load, but the 'autoreplace' property was set, so we should
|
|
487 |
* pretend it's just been added.
|
|
488 |
*/
|
|
489 |
is_check = B_TRUE;
|
|
490 |
} else {
|
|
491 |
return (0);
|
|
492 |
}
|
|
493 |
|
|
494 |
if (sysevent_get_attr_list(ev, &nvl) != 0)
|
|
495 |
return (-1);
|
|
496 |
|
|
497 |
if (is_check)
|
|
498 |
ret = zfs_deliver_check(nvl);
|
|
499 |
else
|
|
500 |
ret = zfs_deliver_add(nvl, is_lofi);
|
|
501 |
|
|
502 |
|
|
503 |
nvlist_free(nvl);
|
|
504 |
return (ret);
|
|
505 |
}
|
|
506 |
|
|
507 |
static struct slm_mod_ops zfs_mod_ops = {
|
|
508 |
SE_MAJOR_VERSION, SE_MINOR_VERSION, 10, zfs_deliver_event
|
|
509 |
};
|
|
510 |
|
|
511 |
struct slm_mod_ops *
|
|
512 |
slm_init()
|
|
513 |
{
|
|
514 |
if ((g_zfshdl = libzfs_init()) == NULL)
|
|
515 |
return (NULL);
|
|
516 |
|
|
517 |
return (&zfs_mod_ops);
|
|
518 |
}
|
|
519 |
|
|
520 |
void
|
|
521 |
slm_fini()
|
|
522 |
{
|
|
523 |
}
|