author | Chris Kirby <Chris.Kirby@oracle.com> |
Thu, 10 Jun 2010 15:46:47 -0600 | |
changeset 12605 | 6790e683d5a5 |
parent 11066 | cebb50cbe4f9 |
child 13346 | f3ce1af7c12d |
permissions | -rw-r--r-- |
789 | 1 |
/* |
2 |
* CDDL HEADER START |
|
3 |
* |
|
4 |
* The contents of this file are subject to the terms of the |
|
1544 | 5 |
* Common Development and Distribution License (the "License"). |
6 |
* You may not use this file except in compliance with the License. |
|
789 | 7 |
* |
8 |
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
|
9 |
* or http://www.opensolaris.org/os/licensing. |
|
10 |
* See the License for the specific language governing permissions |
|
11 |
* and limitations under the License. |
|
12 |
* |
|
13 |
* When distributing Covered Code, include this CDDL HEADER in each |
|
14 |
* file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
|
15 |
* If applicable, add the following below this CDDL HEADER, with the |
|
16 |
* fields enclosed by brackets "[]" replaced with your own identifying |
|
17 |
* information: Portions Copyright [yyyy] [name of copyright owner] |
|
18 |
* |
|
19 |
* CDDL HEADER END |
|
20 |
*/ |
|
21 |
/* |
|
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
22 |
* Copyright 2009 Sun Microsystems, Inc. All rights reserved. |
789 | 23 |
* Use is subject to license terms. |
24 |
*/ |
|
25 |
||
26 |
#include <sys/zfs_context.h> |
|
27 |
#include <sys/spa.h> |
|
28 |
#include <sys/vdev_impl.h> |
|
29 |
#include <sys/zio.h> |
|
5810 | 30 |
#include <sys/kstat.h> |
789 | 31 |
|
32 |
/* |
|
33 |
* Virtual device read-ahead caching. |
|
34 |
* |
|
35 |
* This file implements a simple LRU read-ahead cache. When the DMU reads |
|
36 |
* a given block, it will often want other, nearby blocks soon thereafter. |
|
37 |
* We take advantage of this by reading a larger disk region and caching |
|
5810 | 38 |
* the result. In the best case, this can turn 128 back-to-back 512-byte |
39 |
* reads into a single 64k read followed by 127 cache hits; this reduces |
|
789 | 40 |
* latency dramatically. In the worst case, it can turn an isolated 512-byte |
5810 | 41 |
* read into a 64k read, which doesn't affect latency all that much but is |
789 | 42 |
* terribly wasteful of bandwidth. A more intelligent version of the cache |
43 |
* could keep track of access patterns and not do read-ahead unless it sees |
|
4634
39bfb9e90d34
6437054 vdev_cache wises up: increase DB performance by 16%
ek110237
parents:
4451
diff
changeset
|
44 |
* at least two temporally close I/Os to the same region. Currently, only |
39bfb9e90d34
6437054 vdev_cache wises up: increase DB performance by 16%
ek110237
parents:
4451
diff
changeset
|
45 |
* metadata I/O is inflated. A futher enhancement could take advantage of |
39bfb9e90d34
6437054 vdev_cache wises up: increase DB performance by 16%
ek110237
parents:
4451
diff
changeset
|
46 |
* more semantic information about the I/O. And it could use something |
39bfb9e90d34
6437054 vdev_cache wises up: increase DB performance by 16%
ek110237
parents:
4451
diff
changeset
|
47 |
* faster than an AVL tree; that was chosen solely for convenience. |
789 | 48 |
* |
49 |
* There are five cache operations: allocate, fill, read, write, evict. |
|
50 |
* |
|
51 |
* (1) Allocate. This reserves a cache entry for the specified region. |
|
52 |
* We separate the allocate and fill operations so that multiple threads |
|
53 |
* don't generate I/O for the same cache miss. |
|
54 |
* |
|
55 |
* (2) Fill. When the I/O for a cache miss completes, the fill routine |
|
56 |
* places the data in the previously allocated cache entry. |
|
57 |
* |
|
58 |
* (3) Read. Read data from the cache. |
|
59 |
* |
|
60 |
* (4) Write. Update cache contents after write completion. |
|
61 |
* |
|
62 |
* (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry |
|
3059 | 63 |
* if the total cache size exceeds zfs_vdev_cache_size. |
64 |
*/ |
|
65 |
||
66 |
/* |
|
67 |
* These tunables are for performance analysis. |
|
789 | 68 |
*/ |
3059 | 69 |
/* |
70 |
* All i/os smaller than zfs_vdev_cache_max will be turned into |
|
71 |
* 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software |
|
5810 | 72 |
* track buffer). At most zfs_vdev_cache_size bytes will be kept in each |
3059 | 73 |
* vdev's vdev_cache. |
74 |
*/ |
|
5810 | 75 |
int zfs_vdev_cache_max = 1<<14; /* 16KB */ |
76 |
int zfs_vdev_cache_size = 10ULL << 20; /* 10MB */ |
|
3059 | 77 |
int zfs_vdev_cache_bshift = 16; |
78 |
||
5810 | 79 |
#define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */ |
80 |
||
81 |
kstat_t *vdc_ksp = NULL; |
|
82 |
||
83 |
typedef struct vdc_stats { |
|
84 |
kstat_named_t vdc_stat_delegations; |
|
85 |
kstat_named_t vdc_stat_hits; |
|
86 |
kstat_named_t vdc_stat_misses; |
|
87 |
} vdc_stats_t; |
|
88 |
||
89 |
static vdc_stats_t vdc_stats = { |
|
90 |
{ "delegations", KSTAT_DATA_UINT64 }, |
|
91 |
{ "hits", KSTAT_DATA_UINT64 }, |
|
92 |
{ "misses", KSTAT_DATA_UINT64 } |
|
93 |
}; |
|
94 |
||
95 |
#define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1); |
|
789 | 96 |
|
97 |
static int |
|
98 |
vdev_cache_offset_compare(const void *a1, const void *a2) |
|
99 |
{ |
|
100 |
const vdev_cache_entry_t *ve1 = a1; |
|
101 |
const vdev_cache_entry_t *ve2 = a2; |
|
102 |
||
103 |
if (ve1->ve_offset < ve2->ve_offset) |
|
104 |
return (-1); |
|
105 |
if (ve1->ve_offset > ve2->ve_offset) |
|
106 |
return (1); |
|
107 |
return (0); |
|
108 |
} |
|
109 |
||
110 |
static int |
|
111 |
vdev_cache_lastused_compare(const void *a1, const void *a2) |
|
112 |
{ |
|
113 |
const vdev_cache_entry_t *ve1 = a1; |
|
114 |
const vdev_cache_entry_t *ve2 = a2; |
|
115 |
||
116 |
if (ve1->ve_lastused < ve2->ve_lastused) |
|
117 |
return (-1); |
|
118 |
if (ve1->ve_lastused > ve2->ve_lastused) |
|
119 |
return (1); |
|
120 |
||
121 |
/* |
|
122 |
* Among equally old entries, sort by offset to ensure uniqueness. |
|
123 |
*/ |
|
124 |
return (vdev_cache_offset_compare(a1, a2)); |
|
125 |
} |
|
126 |
||
127 |
/* |
|
128 |
* Evict the specified entry from the cache. |
|
129 |
*/ |
|
130 |
static void |
|
131 |
vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) |
|
132 |
{ |
|
133 |
ASSERT(MUTEX_HELD(&vc->vc_lock)); |
|
134 |
ASSERT(ve->ve_fill_io == NULL); |
|
135 |
ASSERT(ve->ve_data != NULL); |
|
136 |
||
137 |
avl_remove(&vc->vc_lastused_tree, ve); |
|
138 |
avl_remove(&vc->vc_offset_tree, ve); |
|
3059 | 139 |
zio_buf_free(ve->ve_data, VCBS); |
789 | 140 |
kmem_free(ve, sizeof (vdev_cache_entry_t)); |
141 |
} |
|
142 |
||
143 |
/* |
|
144 |
* Allocate an entry in the cache. At the point we don't have the data, |
|
145 |
* we're just creating a placeholder so that multiple threads don't all |
|
146 |
* go off and read the same blocks. |
|
147 |
*/ |
|
148 |
static vdev_cache_entry_t * |
|
149 |
vdev_cache_allocate(zio_t *zio) |
|
150 |
{ |
|
151 |
vdev_cache_t *vc = &zio->io_vd->vdev_cache; |
|
3059 | 152 |
uint64_t offset = P2ALIGN(zio->io_offset, VCBS); |
789 | 153 |
vdev_cache_entry_t *ve; |
154 |
||
155 |
ASSERT(MUTEX_HELD(&vc->vc_lock)); |
|
156 |
||
3059 | 157 |
if (zfs_vdev_cache_size == 0) |
789 | 158 |
return (NULL); |
159 |
||
160 |
/* |
|
161 |
* If adding a new entry would exceed the cache size, |
|
162 |
* evict the oldest entry (LRU). |
|
163 |
*/ |
|
3059 | 164 |
if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) > |
165 |
zfs_vdev_cache_size) { |
|
789 | 166 |
ve = avl_first(&vc->vc_lastused_tree); |
7754
b80e4842ad54
6754011 SPA 3.0: lock breakup, i/o pipeline refactoring, device failure handling
Jeff Bonwick <Jeff.Bonwick@Sun.COM>
parents:
5810
diff
changeset
|
167 |
if (ve->ve_fill_io != NULL) |
789 | 168 |
return (NULL); |
169 |
ASSERT(ve->ve_hits != 0); |
|
170 |
vdev_cache_evict(vc, ve); |
|
171 |
} |
|
172 |
||
173 |
ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP); |
|
174 |
ve->ve_offset = offset; |
|
11066
cebb50cbe4f9
PSARC/2009/396 Tickless Kernel Architecture / lbolt decoupling
Rafael Vanoni <rafael.vanoni@sun.com>
parents:
8632
diff
changeset
|
175 |
ve->ve_lastused = ddi_get_lbolt(); |
3059 | 176 |
ve->ve_data = zio_buf_alloc(VCBS); |
789 | 177 |
|
178 |
avl_add(&vc->vc_offset_tree, ve); |
|
179 |
avl_add(&vc->vc_lastused_tree, ve); |
|
180 |
||
181 |
return (ve); |
|
182 |
} |
|
183 |
||
184 |
static void |
|
185 |
vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) |
|
186 |
{ |
|
3059 | 187 |
uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); |
789 | 188 |
|
189 |
ASSERT(MUTEX_HELD(&vc->vc_lock)); |
|
190 |
ASSERT(ve->ve_fill_io == NULL); |
|
191 |
||
11066
cebb50cbe4f9
PSARC/2009/396 Tickless Kernel Architecture / lbolt decoupling
Rafael Vanoni <rafael.vanoni@sun.com>
parents:
8632
diff
changeset
|
192 |
if (ve->ve_lastused != ddi_get_lbolt()) { |
789 | 193 |
avl_remove(&vc->vc_lastused_tree, ve); |
11066
cebb50cbe4f9
PSARC/2009/396 Tickless Kernel Architecture / lbolt decoupling
Rafael Vanoni <rafael.vanoni@sun.com>
parents:
8632
diff
changeset
|
194 |
ve->ve_lastused = ddi_get_lbolt(); |
789 | 195 |
avl_add(&vc->vc_lastused_tree, ve); |
196 |
} |
|
197 |
||
198 |
ve->ve_hits++; |
|
199 |
bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size); |
|
200 |
} |
|
201 |
||
202 |
/* |
|
203 |
* Fill a previously allocated cache entry with data. |
|
204 |
*/ |
|
205 |
static void |
|
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
206 |
vdev_cache_fill(zio_t *fio) |
789 | 207 |
{ |
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
208 |
vdev_t *vd = fio->io_vd; |
789 | 209 |
vdev_cache_t *vc = &vd->vdev_cache; |
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
210 |
vdev_cache_entry_t *ve = fio->io_private; |
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
211 |
zio_t *pio; |
789 | 212 |
|
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
213 |
ASSERT(fio->io_size == VCBS); |
789 | 214 |
|
215 |
/* |
|
216 |
* Add data to the cache. |
|
217 |
*/ |
|
218 |
mutex_enter(&vc->vc_lock); |
|
219 |
||
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
220 |
ASSERT(ve->ve_fill_io == fio); |
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
221 |
ASSERT(ve->ve_offset == fio->io_offset); |
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
222 |
ASSERT(ve->ve_data == fio->io_data); |
789 | 223 |
|
224 |
ve->ve_fill_io = NULL; |
|
225 |
||
226 |
/* |
|
227 |
* Even if this cache line was invalidated by a missed write update, |
|
228 |
* any reads that were queued up before the missed update are still |
|
229 |
* valid, so we can satisfy them from this line before we evict it. |
|
230 |
*/ |
|
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
231 |
while ((pio = zio_walk_parents(fio)) != NULL) |
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
232 |
vdev_cache_hit(vc, ve, pio); |
789 | 233 |
|
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
234 |
if (fio->io_error || ve->ve_missed_update) |
789 | 235 |
vdev_cache_evict(vc, ve); |
236 |
||
237 |
mutex_exit(&vc->vc_lock); |
|
238 |
} |
|
239 |
||
240 |
/* |
|
241 |
* Read data from the cache. Returns 0 on cache hit, errno on a miss. |
|
242 |
*/ |
|
243 |
int |
|
244 |
vdev_cache_read(zio_t *zio) |
|
245 |
{ |
|
246 |
vdev_cache_t *vc = &zio->io_vd->vdev_cache; |
|
247 |
vdev_cache_entry_t *ve, ve_search; |
|
3059 | 248 |
uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); |
249 |
uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); |
|
789 | 250 |
zio_t *fio; |
251 |
||
252 |
ASSERT(zio->io_type == ZIO_TYPE_READ); |
|
253 |
||
254 |
if (zio->io_flags & ZIO_FLAG_DONT_CACHE) |
|
255 |
return (EINVAL); |
|
256 |
||
3059 | 257 |
if (zio->io_size > zfs_vdev_cache_max) |
789 | 258 |
return (EOVERFLOW); |
259 |
||
260 |
/* |
|
261 |
* If the I/O straddles two or more cache blocks, don't cache it. |
|
262 |
*/ |
|
7837
001de5627df3
6333409 traversal code should be able to issue multiple reads in parallel
Matthew Ahrens <Matthew.Ahrens@Sun.COM>
parents:
7754
diff
changeset
|
263 |
if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS)) |
789 | 264 |
return (EXDEV); |
265 |
||
3059 | 266 |
ASSERT(cache_phase + zio->io_size <= VCBS); |
789 | 267 |
|
268 |
mutex_enter(&vc->vc_lock); |
|
269 |
||
270 |
ve_search.ve_offset = cache_offset; |
|
271 |
ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL); |
|
272 |
||
273 |
if (ve != NULL) { |
|
274 |
if (ve->ve_missed_update) { |
|
275 |
mutex_exit(&vc->vc_lock); |
|
276 |
return (ESTALE); |
|
277 |
} |
|
278 |
||
279 |
if ((fio = ve->ve_fill_io) != NULL) { |
|
280 |
zio_vdev_io_bypass(zio); |
|
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
281 |
zio_add_child(zio, fio); |
789 | 282 |
mutex_exit(&vc->vc_lock); |
5810 | 283 |
VDCSTAT_BUMP(vdc_stat_delegations); |
789 | 284 |
return (0); |
285 |
} |
|
286 |
||
287 |
vdev_cache_hit(vc, ve, zio); |
|
288 |
zio_vdev_io_bypass(zio); |
|
289 |
||
290 |
mutex_exit(&vc->vc_lock); |
|
5810 | 291 |
VDCSTAT_BUMP(vdc_stat_hits); |
789 | 292 |
return (0); |
293 |
} |
|
294 |
||
295 |
ve = vdev_cache_allocate(zio); |
|
296 |
||
297 |
if (ve == NULL) { |
|
298 |
mutex_exit(&vc->vc_lock); |
|
299 |
return (ENOMEM); |
|
300 |
} |
|
301 |
||
7754
b80e4842ad54
6754011 SPA 3.0: lock breakup, i/o pipeline refactoring, device failure handling
Jeff Bonwick <Jeff.Bonwick@Sun.COM>
parents:
5810
diff
changeset
|
302 |
fio = zio_vdev_delegated_io(zio->io_vd, cache_offset, |
3059 | 303 |
ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_CACHE_FILL, |
7754
b80e4842ad54
6754011 SPA 3.0: lock breakup, i/o pipeline refactoring, device failure handling
Jeff Bonwick <Jeff.Bonwick@Sun.COM>
parents:
5810
diff
changeset
|
304 |
ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve); |
789 | 305 |
|
306 |
ve->ve_fill_io = fio; |
|
307 |
zio_vdev_io_bypass(zio); |
|
8632
36ef517870a3
6798384 It can take a village to raise a zio
Bill Moore <Bill.Moore@Sun.COM>
parents:
7837
diff
changeset
|
308 |
zio_add_child(zio, fio); |
789 | 309 |
|
310 |
mutex_exit(&vc->vc_lock); |
|
311 |
zio_nowait(fio); |
|
5810 | 312 |
VDCSTAT_BUMP(vdc_stat_misses); |
789 | 313 |
|
314 |
return (0); |
|
315 |
} |
|
316 |
||
317 |
/* |
|
318 |
* Update cache contents upon write completion. |
|
319 |
*/ |
|
320 |
void |
|
321 |
vdev_cache_write(zio_t *zio) |
|
322 |
{ |
|
323 |
vdev_cache_t *vc = &zio->io_vd->vdev_cache; |
|
324 |
vdev_cache_entry_t *ve, ve_search; |
|
325 |
uint64_t io_start = zio->io_offset; |
|
326 |
uint64_t io_end = io_start + zio->io_size; |
|
3059 | 327 |
uint64_t min_offset = P2ALIGN(io_start, VCBS); |
328 |
uint64_t max_offset = P2ROUNDUP(io_end, VCBS); |
|
789 | 329 |
avl_index_t where; |
330 |
||
331 |
ASSERT(zio->io_type == ZIO_TYPE_WRITE); |
|
332 |
||
333 |
mutex_enter(&vc->vc_lock); |
|
334 |
||
335 |
ve_search.ve_offset = min_offset; |
|
336 |
ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); |
|
337 |
||
338 |
if (ve == NULL) |
|
339 |
ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); |
|
340 |
||
341 |
while (ve != NULL && ve->ve_offset < max_offset) { |
|
342 |
uint64_t start = MAX(ve->ve_offset, io_start); |
|
3059 | 343 |
uint64_t end = MIN(ve->ve_offset + VCBS, io_end); |
789 | 344 |
|
345 |
if (ve->ve_fill_io != NULL) { |
|
346 |
ve->ve_missed_update = 1; |
|
347 |
} else { |
|
348 |
bcopy((char *)zio->io_data + start - io_start, |
|
349 |
ve->ve_data + start - ve->ve_offset, end - start); |
|
350 |
} |
|
351 |
ve = AVL_NEXT(&vc->vc_offset_tree, ve); |
|
352 |
} |
|
353 |
mutex_exit(&vc->vc_lock); |
|
354 |
} |
|
355 |
||
356 |
void |
|
4451 | 357 |
vdev_cache_purge(vdev_t *vd) |
358 |
{ |
|
359 |
vdev_cache_t *vc = &vd->vdev_cache; |
|
360 |
vdev_cache_entry_t *ve; |
|
361 |
||
362 |
mutex_enter(&vc->vc_lock); |
|
363 |
while ((ve = avl_first(&vc->vc_offset_tree)) != NULL) |
|
364 |
vdev_cache_evict(vc, ve); |
|
365 |
mutex_exit(&vc->vc_lock); |
|
366 |
} |
|
367 |
||
368 |
void |
|
789 | 369 |
vdev_cache_init(vdev_t *vd) |
370 |
{ |
|
371 |
vdev_cache_t *vc = &vd->vdev_cache; |
|
372 |
||
373 |
mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL); |
|
374 |
||
375 |
avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare, |
|
376 |
sizeof (vdev_cache_entry_t), |
|
377 |
offsetof(struct vdev_cache_entry, ve_offset_node)); |
|
378 |
||
379 |
avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare, |
|
380 |
sizeof (vdev_cache_entry_t), |
|
381 |
offsetof(struct vdev_cache_entry, ve_lastused_node)); |
|
382 |
} |
|
383 |
||
384 |
void |
|
385 |
vdev_cache_fini(vdev_t *vd) |
|
386 |
{ |
|
387 |
vdev_cache_t *vc = &vd->vdev_cache; |
|
388 |
||
4451 | 389 |
vdev_cache_purge(vd); |
789 | 390 |
|
391 |
avl_destroy(&vc->vc_offset_tree); |
|
392 |
avl_destroy(&vc->vc_lastused_tree); |
|
393 |
||
394 |
mutex_destroy(&vc->vc_lock); |
|
395 |
} |
|
5810 | 396 |
|
397 |
void |
|
398 |
vdev_cache_stat_init(void) |
|
399 |
{ |
|
400 |
vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc", |
|
401 |
KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t), |
|
402 |
KSTAT_FLAG_VIRTUAL); |
|
403 |
if (vdc_ksp != NULL) { |
|
404 |
vdc_ksp->ks_data = &vdc_stats; |
|
405 |
kstat_install(vdc_ksp); |
|
406 |
} |
|
407 |
} |
|
408 |
||
409 |
void |
|
410 |
vdev_cache_stat_fini(void) |
|
411 |
{ |
|
412 |
if (vdc_ksp != NULL) { |
|
413 |
kstat_delete(vdc_ksp); |
|
414 |
vdc_ksp = NULL; |
|
415 |
} |
|
416 |
} |