author | eschrock |
Wed, 02 Jan 2008 14:11:28 -0800 | |
changeset 5756 | 05eb4c1ff492 |
parent 5094 | 71a3e95fb9e2 |
child 6643 | 3a34b0dbb107 |
permissions | -rw-r--r-- |
4451 | 1 |
/* |
2 |
* CDDL HEADER START |
|
3 |
* |
|
4 |
* The contents of this file are subject to the terms of the |
|
5 |
* Common Development and Distribution License (the "License"). |
|
6 |
* You may not use this file except in compliance with the License. |
|
7 |
* |
|
8 |
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
|
9 |
* or http://www.opensolaris.org/os/licensing. |
|
10 |
* See the License for the specific language governing permissions |
|
11 |
* and limitations under the License. |
|
12 |
* |
|
13 |
* When distributing Covered Code, include this CDDL HEADER in each |
|
14 |
* file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
|
15 |
* If applicable, add the following below this CDDL HEADER, with the |
|
16 |
* fields enclosed by brackets "[]" replaced with your own identifying |
|
17 |
* information: Portions Copyright [yyyy] [name of copyright owner] |
|
18 |
* |
|
19 |
* CDDL HEADER END |
|
20 |
*/ |
|
21 |
/* |
|
5756
05eb4c1ff492
6585441 spa_event_notify() doesn't pass its attributes
eschrock
parents:
5094
diff
changeset
|
22 |
* Copyright 2008 Sun Microsystems, Inc. All rights reserved. |
4451 | 23 |
* Use is subject to license terms. |
24 |
*/ |
|
25 |
||
26 |
#pragma ident "%Z%%M% %I% %E% SMI" |
|
27 |
||
28 |
/* |
|
29 |
* ZFS syseventd module. |
|
30 |
* |
|
31 |
* The purpose of this module is to identify when devices are added to the |
|
32 |
* system, and appropriately online or replace the affected vdevs. |
|
33 |
* |
|
34 |
* When a device is added to the system: |
|
35 |
* |
|
36 |
* 1. Search for any vdevs whose devid matches that of the newly added |
|
37 |
* device. |
|
38 |
* |
|
39 |
* 2. If no vdevs are found, then search for any vdevs whose devfs path |
|
40 |
* matches that of the new device. |
|
41 |
* |
|
42 |
* 3. If no vdevs match by either method, then ignore the event. |
|
43 |
* |
|
44 |
* 4. Attempt to online the device with a flag to indicate that it should |
|
45 |
* be unspared when resilvering completes. If this succeeds, then the |
|
46 |
* same device was inserted and we should continue normally. |
|
47 |
* |
|
48 |
* 5. If the pool does not have the 'autoreplace' property set, attempt to |
|
49 |
* online the device again without the unspare flag, which will |
|
50 |
* generate a FMA fault. |
|
51 |
* |
|
52 |
* 6. If the pool has the 'autoreplace' property set, and the matching vdev |
|
53 |
* is a whole disk, then label the new disk and attempt a 'zpool |
|
54 |
* replace'. |
|
55 |
* |
|
56 |
* The module responds to EC_DEV_ADD events for both disks and lofi devices, |
|
57 |
* with the latter used for testing. The special ESC_ZFS_VDEV_CHECK event |
|
58 |
* indicates that a device failed to open during pool load, but the autoreplace |
|
59 |
* property was set. In this case, we deferred the associated FMA fault until |
|
60 |
* our module had a chance to process the autoreplace logic. If the device |
|
61 |
* could not be replaced, then the second online attempt will trigger the FMA |
|
62 |
* fault that we skipped earlier. |
|
63 |
*/ |
|
64 |
||
65 |
#include <alloca.h> |
|
66 |
#include <devid.h> |
|
67 |
#include <fcntl.h> |
|
68 |
#include <libnvpair.h> |
|
69 |
#include <libsysevent.h> |
|
70 |
#include <libzfs.h> |
|
71 |
#include <limits.h> |
|
72 |
#include <stdlib.h> |
|
73 |
#include <string.h> |
|
74 |
#include <syslog.h> |
|
75 |
#include <sys/sunddi.h> |
|
76 |
#include <sys/sysevent/eventdefs.h> |
|
77 |
#include <sys/sysevent/dev.h> |
|
78 |
#include <unistd.h> |
|
79 |
||
80 |
#if defined(__i386) || defined(__amd64) |
|
81 |
#define PHYS_PATH ":q" |
|
82 |
#define RAW_SLICE "p0" |
|
83 |
#elif defined(__sparc) |
|
84 |
#define PHYS_PATH ":c" |
|
85 |
#define RAW_SLICE "s2" |
|
86 |
#else |
|
87 |
#error Unknown architecture |
|
88 |
#endif |
|
89 |
||
90 |
typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t); |
|
91 |
||
92 |
libzfs_handle_t *g_zfshdl; |
|
93 |
||
94 |
/* |
|
95 |
* The device associated with the given vdev (either by devid or physical path) |
|
96 |
* has been added to the system. If 'isdisk' is set, then we only attempt a |
|
97 |
* replacement if it's a whole disk. This also implies that we should label the |
|
98 |
* disk first. |
|
99 |
* |
|
100 |
* First, we attempt to online the device (making sure to undo any spare |
|
101 |
* operation when finished). If this succeeds, then we're done. If it fails, |
|
102 |
* and the new state is VDEV_CANT_OPEN, it indicates that the device was opened, |
|
103 |
* but that the label was not what we expected. If the 'autoreplace' property |
|
104 |
* is not set, then we relabel the disk (if specified), and attempt a 'zpool |
|
105 |
* replace'. If the online is successful, but the new state is something else |
|
106 |
* (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of |
|
107 |
* race, and we should avoid attempting to relabel the disk. |
|
108 |
*/ |
|
109 |
static void |
|
110 |
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk) |
|
111 |
{ |
|
112 |
char *path; |
|
113 |
vdev_state_t newstate; |
|
114 |
nvlist_t *nvroot, *newvd; |
|
115 |
uint64_t wholedisk = 0ULL; |
|
116 |
char *devid = NULL; |
|
117 |
char rawpath[PATH_MAX], fullpath[PATH_MAX]; |
|
118 |
size_t len; |
|
119 |
||
120 |
if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0) |
|
121 |
return; |
|
122 |
||
123 |
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_DEVID, &devid); |
|
124 |
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk); |
|
125 |
||
126 |
/* |
|
127 |
* We should have a way to online a device by guid. With the current |
|
128 |
* interface, we are forced to chop off the 's0' for whole disks. |
|
129 |
*/ |
|
130 |
(void) strlcpy(fullpath, path, sizeof (fullpath)); |
|
131 |
if (wholedisk) |
|
132 |
fullpath[strlen(fullpath) - 2] = '\0'; |
|
133 |
||
134 |
/* |
|
135 |
* Attempt to online the device. It would be nice to online this by |
|
136 |
* GUID, but the current interface only supports lookup by path. |
|
137 |
*/ |
|
138 |
if (zpool_vdev_online(zhp, fullpath, |
|
139 |
ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 && |
|
140 |
newstate != VDEV_STATE_CANT_OPEN) |
|
141 |
return; |
|
142 |
||
143 |
/* |
|
144 |
* If the pool doesn't have the autoreplace property set, then attempt a |
|
145 |
* true online (without the unspare flag), which will trigger a FMA |
|
146 |
* fault. |
|
147 |
*/ |
|
5094 | 148 |
if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) || |
4451 | 149 |
(isdisk && !wholedisk)) { |
150 |
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT, |
|
151 |
&newstate); |
|
152 |
return; |
|
153 |
} |
|
154 |
||
155 |
if (isdisk) { |
|
156 |
/* |
|
157 |
* If this is a request to label a whole disk, then attempt to |
|
158 |
* write out the label. Before we can label the disk, we need |
|
159 |
* access to a raw node. Ideally, we'd like to walk the devinfo |
|
160 |
* tree and find a raw node from the corresponding parent node. |
|
161 |
* This is overly complicated, and since we know how we labeled |
|
162 |
* this device in the first place, we know it's save to switch |
|
163 |
* from /dev/dsk to /dev/rdsk and append the backup slice. |
|
164 |
*/ |
|
165 |
if (strncmp(path, "/dev/dsk/", 9) != 0) |
|
166 |
return; |
|
167 |
||
168 |
(void) strlcpy(rawpath, path + 9, sizeof (rawpath)); |
|
169 |
len = strlen(rawpath); |
|
170 |
rawpath[len - 2] = '\0'; |
|
171 |
||
172 |
if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) |
|
173 |
return; |
|
174 |
} |
|
175 |
||
176 |
/* |
|
177 |
* Cosntruct the root vdev to pass to zpool_vdev_attach(). While adding |
|
178 |
* the entire vdev structure is harmless, we construct a reduced set of |
|
179 |
* path/devid/wholedisk to keep it simple. |
|
180 |
*/ |
|
181 |
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) |
|
182 |
return; |
|
183 |
||
184 |
if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) { |
|
185 |
nvlist_free(nvroot); |
|
186 |
return; |
|
187 |
} |
|
188 |
||
189 |
if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 || |
|
190 |
nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 || |
|
191 |
(devid && nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, |
|
192 |
devid) != 0) || |
|
193 |
nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 || |
|
194 |
nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 || |
|
195 |
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd, |
|
196 |
1) != 0) { |
|
197 |
nvlist_free(newvd); |
|
198 |
nvlist_free(nvroot); |
|
199 |
return; |
|
200 |
} |
|
201 |
||
202 |
nvlist_free(newvd); |
|
203 |
||
204 |
(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE); |
|
205 |
||
206 |
nvlist_free(nvroot); |
|
207 |
||
208 |
} |
|
209 |
||
210 |
/* |
|
211 |
* Utility functions to find a vdev matching given criteria. |
|
212 |
*/ |
|
213 |
typedef struct dev_data { |
|
214 |
const char *dd_compare; |
|
215 |
const char *dd_prop; |
|
216 |
zfs_process_func_t dd_func; |
|
217 |
boolean_t dd_found; |
|
218 |
boolean_t dd_isdisk; |
|
219 |
uint64_t dd_pool_guid; |
|
220 |
uint64_t dd_vdev_guid; |
|
221 |
} dev_data_t; |
|
222 |
||
223 |
static void |
|
224 |
zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data) |
|
225 |
{ |
|
226 |
dev_data_t *dp = data; |
|
227 |
char *path; |
|
228 |
uint_t c, children; |
|
229 |
nvlist_t **child; |
|
5756
05eb4c1ff492
6585441 spa_event_notify() doesn't pass its attributes
eschrock
parents:
5094
diff
changeset
|
230 |
size_t len; |
4451 | 231 |
uint64_t guid; |
232 |
||
233 |
/* |
|
234 |
* First iterate over any children. |
|
235 |
*/ |
|
236 |
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, |
|
237 |
&child, &children) == 0) { |
|
238 |
for (c = 0; c < children; c++) |
|
239 |
zfs_iter_vdev(zhp, child[c], data); |
|
240 |
return; |
|
241 |
} |
|
242 |
||
243 |
if (dp->dd_vdev_guid != 0) { |
|
244 |
if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, |
|
245 |
&guid) != 0 || guid != dp->dd_vdev_guid) |
|
246 |
return; |
|
247 |
} else { |
|
5756
05eb4c1ff492
6585441 spa_event_notify() doesn't pass its attributes
eschrock
parents:
5094
diff
changeset
|
248 |
len = strlen(dp->dd_compare); |
05eb4c1ff492
6585441 spa_event_notify() doesn't pass its attributes
eschrock
parents:
5094
diff
changeset
|
249 |
|
4451 | 250 |
if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 || |
251 |
strncmp(dp->dd_compare, path, len) != 0) |
|
252 |
return; |
|
253 |
||
254 |
/* |
|
255 |
* Normally, we want to have an exact match for the comparison |
|
256 |
* string. However, we allow substring matches in the following |
|
257 |
* cases: |
|
258 |
* |
|
259 |
* <path>: This is a devpath, and the target is one |
|
260 |
* of its children. |
|
261 |
* |
|
262 |
* <path/> This is a devid for a whole disk, and |
|
263 |
* the target is one of its children. |
|
264 |
*/ |
|
265 |
if (path[len] != '\0' && path[len] != ':' && |
|
266 |
path[len - 1] != '/') |
|
267 |
return; |
|
268 |
} |
|
269 |
||
270 |
(dp->dd_func)(zhp, nvl, dp->dd_isdisk); |
|
271 |
} |
|
272 |
||
273 |
static int |
|
274 |
zfs_iter_pool(zpool_handle_t *zhp, void *data) |
|
275 |
{ |
|
276 |
nvlist_t *config, *nvl; |
|
277 |
dev_data_t *dp = data; |
|
278 |
uint64_t pool_guid; |
|
279 |
||
280 |
if ((config = zpool_get_config(zhp, NULL)) != NULL) { |
|
281 |
if (dp->dd_pool_guid == 0 || |
|
282 |
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, |
|
283 |
&pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) { |
|
284 |
(void) nvlist_lookup_nvlist(config, |
|
285 |
ZPOOL_CONFIG_VDEV_TREE, &nvl); |
|
286 |
zfs_iter_vdev(zhp, nvl, data); |
|
287 |
} |
|
288 |
} |
|
289 |
||
290 |
zpool_close(zhp); |
|
291 |
return (0); |
|
292 |
} |
|
293 |
||
294 |
/* |
|
295 |
* Given a physical device path, iterate over all (pool, vdev) pairs which |
|
296 |
* correspond to the given path. |
|
297 |
*/ |
|
298 |
static boolean_t |
|
299 |
devpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk) |
|
300 |
{ |
|
301 |
dev_data_t data = { 0 }; |
|
302 |
||
303 |
data.dd_compare = devpath; |
|
304 |
data.dd_func = func; |
|
305 |
data.dd_prop = ZPOOL_CONFIG_PHYS_PATH; |
|
306 |
data.dd_found = B_FALSE; |
|
307 |
data.dd_isdisk = wholedisk; |
|
308 |
||
309 |
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data); |
|
310 |
||
311 |
return (data.dd_found); |
|
312 |
} |
|
313 |
||
314 |
/* |
|
315 |
* Given a /devices path, lookup the corresponding devid for each minor node, |
|
316 |
* and find any vdevs with matching devids. Doing this straight up would be |
|
317 |
* rather inefficient, O(minor nodes * vdevs in system), so we take advantage of |
|
318 |
* the fact that each devid ends with "/<minornode>". Once we find any valid |
|
319 |
* minor node, we chop off the portion after the last slash, and then search for |
|
320 |
* matching vdevs, which is O(vdevs in system). |
|
321 |
*/ |
|
322 |
static boolean_t |
|
323 |
devid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk) |
|
324 |
{ |
|
325 |
size_t len = strlen(devpath) + sizeof ("/devices") + |
|
326 |
sizeof (PHYS_PATH) - 1; |
|
327 |
char *fullpath; |
|
328 |
int fd; |
|
329 |
ddi_devid_t devid; |
|
330 |
char *devidstr, *fulldevid; |
|
331 |
dev_data_t data = { 0 }; |
|
332 |
||
333 |
/* |
|
334 |
* Try to open a known minor node. |
|
335 |
*/ |
|
336 |
fullpath = alloca(len); |
|
337 |
(void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH); |
|
338 |
if ((fd = open(fullpath, O_RDONLY)) < 0) |
|
339 |
return (B_FALSE); |
|
340 |
||
341 |
/* |
|
342 |
* Determine the devid as a string, with no trailing slash for the minor |
|
343 |
* node. |
|
344 |
*/ |
|
345 |
if (devid_get(fd, &devid) != 0) { |
|
346 |
(void) close(fd); |
|
347 |
return (B_FALSE); |
|
348 |
} |
|
349 |
(void) close(fd); |
|
350 |
||
351 |
if ((devidstr = devid_str_encode(devid, NULL)) == NULL) { |
|
352 |
devid_free(devid); |
|
353 |
return (B_FALSE); |
|
354 |
} |
|
355 |
||
356 |
len = strlen(devidstr) + 2; |
|
357 |
fulldevid = alloca(len); |
|
358 |
(void) snprintf(fulldevid, len, "%s/", devidstr); |
|
359 |
||
360 |
data.dd_compare = fulldevid; |
|
361 |
data.dd_func = func; |
|
362 |
data.dd_prop = ZPOOL_CONFIG_DEVID; |
|
363 |
data.dd_found = B_FALSE; |
|
364 |
data.dd_isdisk = wholedisk; |
|
365 |
||
366 |
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data); |
|
367 |
||
368 |
devid_str_free(devidstr); |
|
369 |
||
370 |
return (data.dd_found); |
|
371 |
} |
|
372 |
||
373 |
/* |
|
374 |
* This function is called when we receive a devfs add event. This can be |
|
375 |
* either a disk event or a lofi event, and the behavior is slightly different |
|
376 |
* depending on which it is. |
|
377 |
*/ |
|
378 |
static int |
|
379 |
zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi) |
|
380 |
{ |
|
381 |
char *devpath, *devname; |
|
382 |
char path[PATH_MAX], realpath[PATH_MAX]; |
|
383 |
char *colon, *raw; |
|
384 |
int ret; |
|
385 |
||
386 |
/* |
|
387 |
* The main unit of operation is the physical device path. For disks, |
|
388 |
* this is the device node, as all minor nodes are affected. For lofi |
|
389 |
* devices, this includes the minor path. Unfortunately, this isn't |
|
390 |
* represented in the DEV_PHYS_PATH for various reasons. |
|
391 |
*/ |
|
392 |
if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath) != 0) |
|
393 |
return (-1); |
|
394 |
||
395 |
/* |
|
396 |
* If this is a lofi device, then also get the minor instance name. |
|
397 |
* Unfortunately, the current payload doesn't include an easy way to get |
|
398 |
* this information. So we cheat by resolving the 'dev_name' (which |
|
399 |
* refers to the raw device) and taking the portion between ':(*),raw'. |
|
400 |
*/ |
|
401 |
(void) strlcpy(realpath, devpath, sizeof (realpath)); |
|
402 |
if (is_lofi) { |
|
403 |
if (nvlist_lookup_string(nvl, DEV_NAME, |
|
404 |
&devname) == 0 && |
|
405 |
(ret = resolvepath(devname, path, |
|
406 |
sizeof (path))) > 0) { |
|
407 |
path[ret] = '\0'; |
|
408 |
colon = strchr(path, ':'); |
|
409 |
if (colon != NULL) |
|
410 |
raw = strstr(colon + 1, ",raw"); |
|
411 |
if (colon != NULL && raw != NULL) { |
|
412 |
*raw = '\0'; |
|
413 |
(void) snprintf(realpath, |
|
414 |
sizeof (realpath), "%s%s", |
|
415 |
devpath, colon); |
|
416 |
*raw = ','; |
|
417 |
} |
|
418 |
} |
|
419 |
} |
|
420 |
||
421 |
/* |
|
422 |
* Iterate over all vdevs with a matching devid, and then those with a |
|
423 |
* matching /devices path. For disks, we only want to pay attention to |
|
424 |
* vdevs marked as whole disks. For lofi, we don't care (because we're |
|
425 |
* matching an exact minor name). |
|
426 |
*/ |
|
427 |
if (!devid_iter(realpath, zfs_process_add, !is_lofi)) |
|
428 |
(void) devpath_iter(realpath, zfs_process_add, !is_lofi); |
|
429 |
||
430 |
return (0); |
|
431 |
} |
|
432 |
||
433 |
/* |
|
434 |
* Called when we receive a VDEV_CHECK event, which indicates a device could not |
|
435 |
* be opened during initial pool open, but the autoreplace property was set on |
|
436 |
* the pool. In this case, we treat it as if it were an add event. |
|
437 |
*/ |
|
438 |
static int |
|
439 |
zfs_deliver_check(nvlist_t *nvl) |
|
440 |
{ |
|
441 |
dev_data_t data = { 0 }; |
|
442 |
||
443 |
if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, |
|
444 |
&data.dd_pool_guid) != 0 || |
|
445 |
nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, |
|
446 |
&data.dd_vdev_guid) != 0) |
|
447 |
return (0); |
|
448 |
||
449 |
data.dd_isdisk = B_TRUE; |
|
450 |
data.dd_func = zfs_process_add; |
|
451 |
||
452 |
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data); |
|
453 |
||
454 |
return (0); |
|
455 |
} |
|
456 |
||
457 |
/*ARGSUSED*/ |
|
458 |
static int |
|
459 |
zfs_deliver_event(sysevent_t *ev, int unused) |
|
460 |
{ |
|
461 |
const char *class = sysevent_get_class_name(ev); |
|
462 |
const char *subclass = sysevent_get_subclass_name(ev); |
|
463 |
nvlist_t *nvl; |
|
464 |
int ret; |
|
465 |
boolean_t is_lofi, is_check; |
|
466 |
||
467 |
if (strcmp(class, EC_DEV_ADD) == 0) { |
|
468 |
/* |
|
469 |
* We're mainly interested in disk additions, but we also listen |
|
470 |
* for new lofi devices, to allow for simplified testing. |
|
471 |
*/ |
|
472 |
if (strcmp(subclass, ESC_DISK) == 0) |
|
473 |
is_lofi = B_FALSE; |
|
474 |
else if (strcmp(subclass, ESC_LOFI) == 0) |
|
475 |
is_lofi = B_TRUE; |
|
476 |
else |
|
477 |
return (0); |
|
478 |
||
479 |
is_check = B_FALSE; |
|
480 |
} else if (strcmp(class, EC_ZFS) == 0 && |
|
481 |
strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) { |
|
482 |
/* |
|
483 |
* This event signifies that a device failed to open during pool |
|
484 |
* load, but the 'autoreplace' property was set, so we should |
|
485 |
* pretend it's just been added. |
|
486 |
*/ |
|
487 |
is_check = B_TRUE; |
|
488 |
} else { |
|
489 |
return (0); |
|
490 |
} |
|
491 |
||
492 |
if (sysevent_get_attr_list(ev, &nvl) != 0) |
|
493 |
return (-1); |
|
494 |
||
495 |
if (is_check) |
|
496 |
ret = zfs_deliver_check(nvl); |
|
497 |
else |
|
498 |
ret = zfs_deliver_add(nvl, is_lofi); |
|
499 |
||
500 |
||
501 |
nvlist_free(nvl); |
|
502 |
return (ret); |
|
503 |
} |
|
504 |
||
505 |
static struct slm_mod_ops zfs_mod_ops = { |
|
506 |
SE_MAJOR_VERSION, SE_MINOR_VERSION, 10, zfs_deliver_event |
|
507 |
}; |
|
508 |
||
509 |
struct slm_mod_ops * |
|
510 |
slm_init() |
|
511 |
{ |
|
512 |
if ((g_zfshdl = libzfs_init()) == NULL) |
|
513 |
return (NULL); |
|
514 |
||
515 |
return (&zfs_mod_ops); |
|
516 |
} |
|
517 |
||
518 |
void |
|
519 |
slm_fini() |
|
520 |
{ |
|
521 |
} |