789
|
1 |
/*
|
|
2 |
* CDDL HEADER START
|
|
3 |
*
|
|
4 |
* The contents of this file are subject to the terms of the
|
1544
|
5 |
* Common Development and Distribution License (the "License").
|
|
6 |
* You may not use this file except in compliance with the License.
|
789
|
7 |
*
|
|
8 |
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
9 |
* or http://www.opensolaris.org/os/licensing.
|
|
10 |
* See the License for the specific language governing permissions
|
|
11 |
* and limitations under the License.
|
|
12 |
*
|
|
13 |
* When distributing Covered Code, include this CDDL HEADER in each
|
|
14 |
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
15 |
* If applicable, add the following below this CDDL HEADER, with the
|
|
16 |
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
17 |
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
18 |
*
|
|
19 |
* CDDL HEADER END
|
|
20 |
*/
|
|
21 |
/*
|
1544
|
22 |
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
|
789
|
23 |
* Use is subject to license terms.
|
|
24 |
*/
|
|
25 |
|
|
26 |
#pragma ident "%Z%%M% %I% %E% SMI"
|
|
27 |
|
|
28 |
#include <sys/zfs_context.h>
|
|
29 |
#include <sys/spa.h>
|
|
30 |
#include <sys/vdev_impl.h>
|
|
31 |
#include <sys/zio.h>
|
|
32 |
|
|
33 |
/*
|
|
34 |
* Virtual device read-ahead caching.
|
|
35 |
*
|
|
36 |
* This file implements a simple LRU read-ahead cache. When the DMU reads
|
|
37 |
* a given block, it will often want other, nearby blocks soon thereafter.
|
|
38 |
* We take advantage of this by reading a larger disk region and caching
|
|
39 |
* the result. In the best case, this can turn 256 back-to-back 512-byte
|
|
40 |
* reads into a single 128k read followed by 255 cache hits; this reduces
|
|
41 |
* latency dramatically. In the worst case, it can turn an isolated 512-byte
|
|
42 |
* read into a 128k read, which doesn't affect latency all that much but is
|
|
43 |
* terribly wasteful of bandwidth. A more intelligent version of the cache
|
|
44 |
* could keep track of access patterns and not do read-ahead unless it sees
|
|
45 |
* at least two temporally close I/Os to the same region. It could also
|
|
46 |
* take advantage of semantic information about the I/O. And it could use
|
|
47 |
* something faster than an AVL tree; that was chosen solely for convenience.
|
|
48 |
*
|
|
49 |
* There are five cache operations: allocate, fill, read, write, evict.
|
|
50 |
*
|
|
51 |
* (1) Allocate. This reserves a cache entry for the specified region.
|
|
52 |
* We separate the allocate and fill operations so that multiple threads
|
|
53 |
* don't generate I/O for the same cache miss.
|
|
54 |
*
|
|
55 |
* (2) Fill. When the I/O for a cache miss completes, the fill routine
|
|
56 |
* places the data in the previously allocated cache entry.
|
|
57 |
*
|
|
58 |
* (3) Read. Read data from the cache.
|
|
59 |
*
|
|
60 |
* (4) Write. Update cache contents after write completion.
|
|
61 |
*
|
|
62 |
* (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
|
|
63 |
* if the total cache size exceeds vc_size.
|
|
64 |
*/
|
|
65 |
|
|
66 |
static int
|
|
67 |
vdev_cache_offset_compare(const void *a1, const void *a2)
|
|
68 |
{
|
|
69 |
const vdev_cache_entry_t *ve1 = a1;
|
|
70 |
const vdev_cache_entry_t *ve2 = a2;
|
|
71 |
|
|
72 |
if (ve1->ve_offset < ve2->ve_offset)
|
|
73 |
return (-1);
|
|
74 |
if (ve1->ve_offset > ve2->ve_offset)
|
|
75 |
return (1);
|
|
76 |
return (0);
|
|
77 |
}
|
|
78 |
|
|
79 |
static int
|
|
80 |
vdev_cache_lastused_compare(const void *a1, const void *a2)
|
|
81 |
{
|
|
82 |
const vdev_cache_entry_t *ve1 = a1;
|
|
83 |
const vdev_cache_entry_t *ve2 = a2;
|
|
84 |
|
|
85 |
if (ve1->ve_lastused < ve2->ve_lastused)
|
|
86 |
return (-1);
|
|
87 |
if (ve1->ve_lastused > ve2->ve_lastused)
|
|
88 |
return (1);
|
|
89 |
|
|
90 |
/*
|
|
91 |
* Among equally old entries, sort by offset to ensure uniqueness.
|
|
92 |
*/
|
|
93 |
return (vdev_cache_offset_compare(a1, a2));
|
|
94 |
}
|
|
95 |
|
|
96 |
/*
|
|
97 |
* Evict the specified entry from the cache.
|
|
98 |
*/
|
|
99 |
static void
|
|
100 |
vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve)
|
|
101 |
{
|
|
102 |
ASSERT(MUTEX_HELD(&vc->vc_lock));
|
|
103 |
ASSERT(ve->ve_fill_io == NULL);
|
|
104 |
ASSERT(ve->ve_data != NULL);
|
|
105 |
|
|
106 |
dprintf("evicting %p, off %llx, LRU %llu, age %lu, hits %u, stale %u\n",
|
|
107 |
vc, ve->ve_offset, ve->ve_lastused, lbolt - ve->ve_lastused,
|
|
108 |
ve->ve_hits, ve->ve_missed_update);
|
|
109 |
|
|
110 |
avl_remove(&vc->vc_lastused_tree, ve);
|
|
111 |
avl_remove(&vc->vc_offset_tree, ve);
|
|
112 |
zio_buf_free(ve->ve_data, vc->vc_blocksize);
|
|
113 |
kmem_free(ve, sizeof (vdev_cache_entry_t));
|
|
114 |
}
|
|
115 |
|
|
116 |
/*
|
|
117 |
* Allocate an entry in the cache. At the point we don't have the data,
|
|
118 |
* we're just creating a placeholder so that multiple threads don't all
|
|
119 |
* go off and read the same blocks.
|
|
120 |
*/
|
|
121 |
static vdev_cache_entry_t *
|
|
122 |
vdev_cache_allocate(zio_t *zio)
|
|
123 |
{
|
|
124 |
vdev_cache_t *vc = &zio->io_vd->vdev_cache;
|
|
125 |
uint64_t offset = P2ALIGN(zio->io_offset, vc->vc_blocksize);
|
|
126 |
vdev_cache_entry_t *ve;
|
|
127 |
|
|
128 |
ASSERT(MUTEX_HELD(&vc->vc_lock));
|
|
129 |
|
|
130 |
if (vc->vc_size == 0)
|
|
131 |
return (NULL);
|
|
132 |
|
|
133 |
/*
|
|
134 |
* If adding a new entry would exceed the cache size,
|
|
135 |
* evict the oldest entry (LRU).
|
|
136 |
*/
|
|
137 |
if ((avl_numnodes(&vc->vc_lastused_tree) << vc->vc_bshift) >
|
|
138 |
vc->vc_size) {
|
|
139 |
ve = avl_first(&vc->vc_lastused_tree);
|
|
140 |
if (ve->ve_fill_io != NULL) {
|
|
141 |
dprintf("can't evict in %p, still filling\n", vc);
|
|
142 |
return (NULL);
|
|
143 |
}
|
|
144 |
ASSERT(ve->ve_hits != 0);
|
|
145 |
vdev_cache_evict(vc, ve);
|
|
146 |
}
|
|
147 |
|
|
148 |
ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
|
|
149 |
ve->ve_offset = offset;
|
|
150 |
ve->ve_lastused = lbolt;
|
|
151 |
ve->ve_data = zio_buf_alloc(vc->vc_blocksize);
|
|
152 |
|
|
153 |
avl_add(&vc->vc_offset_tree, ve);
|
|
154 |
avl_add(&vc->vc_lastused_tree, ve);
|
|
155 |
|
|
156 |
return (ve);
|
|
157 |
}
|
|
158 |
|
|
159 |
static void
|
|
160 |
vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio)
|
|
161 |
{
|
|
162 |
uint64_t cache_phase = P2PHASE(zio->io_offset, vc->vc_blocksize);
|
|
163 |
|
|
164 |
ASSERT(MUTEX_HELD(&vc->vc_lock));
|
|
165 |
ASSERT(ve->ve_fill_io == NULL);
|
|
166 |
|
|
167 |
if (ve->ve_lastused != lbolt) {
|
|
168 |
avl_remove(&vc->vc_lastused_tree, ve);
|
|
169 |
ve->ve_lastused = lbolt;
|
|
170 |
avl_add(&vc->vc_lastused_tree, ve);
|
|
171 |
}
|
|
172 |
|
|
173 |
ve->ve_hits++;
|
|
174 |
bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size);
|
|
175 |
}
|
|
176 |
|
|
177 |
/*
|
|
178 |
* Fill a previously allocated cache entry with data.
|
|
179 |
*/
|
|
180 |
static void
|
|
181 |
vdev_cache_fill(zio_t *zio)
|
|
182 |
{
|
|
183 |
vdev_t *vd = zio->io_vd;
|
|
184 |
vdev_cache_t *vc = &vd->vdev_cache;
|
|
185 |
vdev_cache_entry_t *ve = zio->io_private;
|
|
186 |
zio_t *dio;
|
|
187 |
|
|
188 |
ASSERT(zio->io_size == vc->vc_blocksize);
|
|
189 |
|
|
190 |
/*
|
|
191 |
* Add data to the cache.
|
|
192 |
*/
|
|
193 |
mutex_enter(&vc->vc_lock);
|
|
194 |
|
|
195 |
ASSERT(ve->ve_fill_io == zio);
|
|
196 |
ASSERT(ve->ve_offset == zio->io_offset);
|
|
197 |
ASSERT(ve->ve_data == zio->io_data);
|
|
198 |
|
|
199 |
ve->ve_fill_io = NULL;
|
|
200 |
|
|
201 |
/*
|
|
202 |
* Even if this cache line was invalidated by a missed write update,
|
|
203 |
* any reads that were queued up before the missed update are still
|
|
204 |
* valid, so we can satisfy them from this line before we evict it.
|
|
205 |
*/
|
|
206 |
for (dio = zio->io_delegate_list; dio; dio = dio->io_delegate_next)
|
|
207 |
vdev_cache_hit(vc, ve, dio);
|
|
208 |
|
|
209 |
if (zio->io_error || ve->ve_missed_update)
|
|
210 |
vdev_cache_evict(vc, ve);
|
|
211 |
|
|
212 |
mutex_exit(&vc->vc_lock);
|
|
213 |
|
|
214 |
while ((dio = zio->io_delegate_list) != NULL) {
|
|
215 |
zio->io_delegate_list = dio->io_delegate_next;
|
|
216 |
dio->io_delegate_next = NULL;
|
|
217 |
dio->io_error = zio->io_error;
|
|
218 |
zio_next_stage(dio);
|
|
219 |
}
|
|
220 |
}
|
|
221 |
|
|
222 |
/*
|
|
223 |
* Read data from the cache. Returns 0 on cache hit, errno on a miss.
|
|
224 |
*/
|
|
225 |
int
|
|
226 |
vdev_cache_read(zio_t *zio)
|
|
227 |
{
|
|
228 |
vdev_cache_t *vc = &zio->io_vd->vdev_cache;
|
|
229 |
vdev_cache_entry_t *ve, ve_search;
|
|
230 |
uint64_t cache_offset = P2ALIGN(zio->io_offset, vc->vc_blocksize);
|
|
231 |
uint64_t cache_phase = P2PHASE(zio->io_offset, vc->vc_blocksize);
|
|
232 |
zio_t *fio;
|
|
233 |
|
|
234 |
ASSERT(zio->io_type == ZIO_TYPE_READ);
|
|
235 |
|
|
236 |
if (zio->io_flags & ZIO_FLAG_DONT_CACHE)
|
|
237 |
return (EINVAL);
|
|
238 |
|
|
239 |
if (zio->io_size > vc->vc_max)
|
|
240 |
return (EOVERFLOW);
|
|
241 |
|
|
242 |
/*
|
|
243 |
* If the I/O straddles two or more cache blocks, don't cache it.
|
|
244 |
*/
|
|
245 |
if (P2CROSS(zio->io_offset, zio->io_offset + zio->io_size - 1,
|
|
246 |
vc->vc_blocksize))
|
|
247 |
return (EXDEV);
|
|
248 |
|
|
249 |
ASSERT(cache_phase + zio->io_size <= vc->vc_blocksize);
|
|
250 |
|
|
251 |
mutex_enter(&vc->vc_lock);
|
|
252 |
|
|
253 |
ve_search.ve_offset = cache_offset;
|
|
254 |
ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL);
|
|
255 |
|
|
256 |
if (ve != NULL) {
|
|
257 |
if (ve->ve_missed_update) {
|
|
258 |
mutex_exit(&vc->vc_lock);
|
|
259 |
return (ESTALE);
|
|
260 |
}
|
|
261 |
|
|
262 |
if ((fio = ve->ve_fill_io) != NULL) {
|
|
263 |
zio->io_delegate_next = fio->io_delegate_list;
|
|
264 |
fio->io_delegate_list = zio;
|
|
265 |
zio_vdev_io_bypass(zio);
|
|
266 |
mutex_exit(&vc->vc_lock);
|
|
267 |
return (0);
|
|
268 |
}
|
|
269 |
|
|
270 |
vdev_cache_hit(vc, ve, zio);
|
|
271 |
zio_vdev_io_bypass(zio);
|
|
272 |
|
|
273 |
mutex_exit(&vc->vc_lock);
|
|
274 |
zio_next_stage(zio);
|
|
275 |
return (0);
|
|
276 |
}
|
|
277 |
|
|
278 |
ve = vdev_cache_allocate(zio);
|
|
279 |
|
|
280 |
if (ve == NULL) {
|
|
281 |
mutex_exit(&vc->vc_lock);
|
|
282 |
return (ENOMEM);
|
|
283 |
}
|
|
284 |
|
|
285 |
fio = zio_vdev_child_io(zio, NULL, zio->io_vd, cache_offset,
|
|
286 |
ve->ve_data, vc->vc_blocksize, ZIO_TYPE_READ,
|
|
287 |
ZIO_PRIORITY_CACHE_FILL,
|
1544
|
288 |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE |
|
|
289 |
ZIO_FLAG_DONT_RETRY | ZIO_FLAG_NOBOOKMARK,
|
789
|
290 |
vdev_cache_fill, ve);
|
|
291 |
|
|
292 |
ve->ve_fill_io = fio;
|
|
293 |
fio->io_delegate_list = zio;
|
|
294 |
zio_vdev_io_bypass(zio);
|
|
295 |
|
|
296 |
mutex_exit(&vc->vc_lock);
|
|
297 |
zio_nowait(fio);
|
|
298 |
|
|
299 |
return (0);
|
|
300 |
}
|
|
301 |
|
|
302 |
/*
|
|
303 |
* Update cache contents upon write completion.
|
|
304 |
*/
|
|
305 |
void
|
|
306 |
vdev_cache_write(zio_t *zio)
|
|
307 |
{
|
|
308 |
vdev_cache_t *vc = &zio->io_vd->vdev_cache;
|
|
309 |
vdev_cache_entry_t *ve, ve_search;
|
|
310 |
uint64_t io_start = zio->io_offset;
|
|
311 |
uint64_t io_end = io_start + zio->io_size;
|
|
312 |
uint64_t min_offset = P2ALIGN(io_start, vc->vc_blocksize);
|
|
313 |
uint64_t max_offset = P2ROUNDUP(io_end, vc->vc_blocksize);
|
|
314 |
avl_index_t where;
|
|
315 |
|
|
316 |
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
|
|
317 |
|
|
318 |
mutex_enter(&vc->vc_lock);
|
|
319 |
|
|
320 |
ve_search.ve_offset = min_offset;
|
|
321 |
ve = avl_find(&vc->vc_offset_tree, &ve_search, &where);
|
|
322 |
|
|
323 |
if (ve == NULL)
|
|
324 |
ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER);
|
|
325 |
|
|
326 |
while (ve != NULL && ve->ve_offset < max_offset) {
|
|
327 |
uint64_t start = MAX(ve->ve_offset, io_start);
|
|
328 |
uint64_t end = MIN(ve->ve_offset + vc->vc_blocksize, io_end);
|
|
329 |
|
|
330 |
if (ve->ve_fill_io != NULL) {
|
|
331 |
ve->ve_missed_update = 1;
|
|
332 |
} else {
|
|
333 |
bcopy((char *)zio->io_data + start - io_start,
|
|
334 |
ve->ve_data + start - ve->ve_offset, end - start);
|
|
335 |
}
|
|
336 |
ve = AVL_NEXT(&vc->vc_offset_tree, ve);
|
|
337 |
}
|
|
338 |
mutex_exit(&vc->vc_lock);
|
|
339 |
}
|
|
340 |
|
|
341 |
void
|
|
342 |
vdev_cache_init(vdev_t *vd)
|
|
343 |
{
|
|
344 |
vdev_cache_t *vc = &vd->vdev_cache;
|
|
345 |
|
|
346 |
mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL);
|
|
347 |
|
|
348 |
avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare,
|
|
349 |
sizeof (vdev_cache_entry_t),
|
|
350 |
offsetof(struct vdev_cache_entry, ve_offset_node));
|
|
351 |
|
|
352 |
avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare,
|
|
353 |
sizeof (vdev_cache_entry_t),
|
|
354 |
offsetof(struct vdev_cache_entry, ve_lastused_node));
|
|
355 |
|
|
356 |
vc->vc_blocksize = 1ULL << vc->vc_bshift;
|
|
357 |
}
|
|
358 |
|
|
359 |
void
|
|
360 |
vdev_cache_fini(vdev_t *vd)
|
|
361 |
{
|
|
362 |
vdev_cache_t *vc = &vd->vdev_cache;
|
|
363 |
vdev_cache_entry_t *ve;
|
|
364 |
|
|
365 |
mutex_enter(&vc->vc_lock);
|
|
366 |
while ((ve = avl_first(&vc->vc_offset_tree)) != NULL)
|
|
367 |
vdev_cache_evict(vc, ve);
|
|
368 |
mutex_exit(&vc->vc_lock);
|
|
369 |
|
|
370 |
avl_destroy(&vc->vc_offset_tree);
|
|
371 |
avl_destroy(&vc->vc_lastused_tree);
|
|
372 |
|
|
373 |
mutex_destroy(&vc->vc_lock);
|
|
374 |
}
|