author | bonwick |
Sun, 02 Apr 2006 00:47:06 -0800 | |
changeset 1732 | 9e3ae798af31 |
parent 1544 | 938876158511 |
child 1775 | e51e26b432c0 |
permissions | -rw-r--r-- |
789 | 1 |
/* |
2 |
* CDDL HEADER START |
|
3 |
* |
|
4 |
* The contents of this file are subject to the terms of the |
|
1544 | 5 |
* Common Development and Distribution License (the "License"). |
6 |
* You may not use this file except in compliance with the License. |
|
789 | 7 |
* |
8 |
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
|
9 |
* or http://www.opensolaris.org/os/licensing. |
|
10 |
* See the License for the specific language governing permissions |
|
11 |
* and limitations under the License. |
|
12 |
* |
|
13 |
* When distributing Covered Code, include this CDDL HEADER in each |
|
14 |
* file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
|
15 |
* If applicable, add the following below this CDDL HEADER, with the |
|
16 |
* fields enclosed by brackets "[]" replaced with your own identifying |
|
17 |
* information: Portions Copyright [yyyy] [name of copyright owner] |
|
18 |
* |
|
19 |
* CDDL HEADER END |
|
20 |
*/ |
|
21 |
/* |
|
1544 | 22 |
* Copyright 2006 Sun Microsystems, Inc. All rights reserved. |
789 | 23 |
* Use is subject to license terms. |
24 |
*/ |
|
25 |
||
26 |
#pragma ident "%Z%%M% %I% %E% SMI" |
|
27 |
||
28 |
#include <sys/zfs_context.h> |
|
1544 | 29 |
#include <sys/fm/fs/zfs.h> |
789 | 30 |
#include <sys/spa.h> |
31 |
#include <sys/txg.h> |
|
32 |
#include <sys/spa_impl.h> |
|
33 |
#include <sys/vdev_impl.h> |
|
34 |
#include <sys/zio_impl.h> |
|
35 |
#include <sys/zio_compress.h> |
|
36 |
#include <sys/zio_checksum.h> |
|
37 |
||
38 |
/* |
|
39 |
* ========================================================================== |
|
40 |
* I/O priority table |
|
41 |
* ========================================================================== |
|
42 |
*/ |
|
43 |
uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { |
|
44 |
0, /* ZIO_PRIORITY_NOW */ |
|
45 |
0, /* ZIO_PRIORITY_SYNC_READ */ |
|
46 |
0, /* ZIO_PRIORITY_SYNC_WRITE */ |
|
47 |
6, /* ZIO_PRIORITY_ASYNC_READ */ |
|
48 |
4, /* ZIO_PRIORITY_ASYNC_WRITE */ |
|
49 |
4, /* ZIO_PRIORITY_FREE */ |
|
50 |
0, /* ZIO_PRIORITY_CACHE_FILL */ |
|
51 |
0, /* ZIO_PRIORITY_LOG_WRITE */ |
|
52 |
10, /* ZIO_PRIORITY_RESILVER */ |
|
53 |
20, /* ZIO_PRIORITY_SCRUB */ |
|
54 |
}; |
|
55 |
||
56 |
/* |
|
57 |
* ========================================================================== |
|
58 |
* I/O type descriptions |
|
59 |
* ========================================================================== |
|
60 |
*/ |
|
61 |
char *zio_type_name[ZIO_TYPES] = { |
|
62 |
"null", "read", "write", "free", "claim", "ioctl" }; |
|
63 |
||
64 |
/* At or above this size, force gang blocking - for testing */ |
|
65 |
uint64_t zio_gang_bang = SPA_MAXBLOCKSIZE + 1; |
|
66 |
||
67 |
typedef struct zio_sync_pass { |
|
68 |
int zp_defer_free; /* defer frees after this pass */ |
|
69 |
int zp_dontcompress; /* don't compress after this pass */ |
|
70 |
int zp_rewrite; /* rewrite new bps after this pass */ |
|
71 |
} zio_sync_pass_t; |
|
72 |
||
73 |
zio_sync_pass_t zio_sync_pass = { |
|
74 |
1, /* zp_defer_free */ |
|
75 |
4, /* zp_dontcompress */ |
|
76 |
1, /* zp_rewrite */ |
|
77 |
}; |
|
78 |
||
79 |
/* |
|
80 |
* ========================================================================== |
|
81 |
* I/O kmem caches |
|
82 |
* ========================================================================== |
|
83 |
*/ |
|
84 |
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; |
|
85 |
||
86 |
void |
|
87 |
zio_init(void) |
|
88 |
{ |
|
89 |
size_t c; |
|
90 |
||
91 |
/* |
|
92 |
* For small buffers, we want a cache for each multiple of |
|
93 |
* SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache |
|
94 |
* for each quarter-power of 2. For large buffers, we want |
|
95 |
* a cache for each multiple of PAGESIZE. |
|
96 |
*/ |
|
97 |
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { |
|
98 |
size_t size = (c + 1) << SPA_MINBLOCKSHIFT; |
|
99 |
size_t p2 = size; |
|
100 |
size_t align = 0; |
|
101 |
||
102 |
while (p2 & (p2 - 1)) |
|
103 |
p2 &= p2 - 1; |
|
104 |
||
105 |
if (size <= 4 * SPA_MINBLOCKSIZE) { |
|
106 |
align = SPA_MINBLOCKSIZE; |
|
107 |
} else if (P2PHASE(size, PAGESIZE) == 0) { |
|
108 |
align = PAGESIZE; |
|
109 |
} else if (P2PHASE(size, p2 >> 2) == 0) { |
|
110 |
align = p2 >> 2; |
|
111 |
} |
|
112 |
||
113 |
if (align != 0) { |
|
114 |
char name[30]; |
|
115 |
(void) sprintf(name, "zio_buf_%lu", size); |
|
116 |
zio_buf_cache[c] = kmem_cache_create(name, size, |
|
849
8d799fd81a9b
6345023 /dev/zfs fails to open once ZFS module is unloaded
bonwick
parents:
789
diff
changeset
|
117 |
align, NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); |
789 | 118 |
dprintf("creating cache for size %5lx align %5lx\n", |
119 |
size, align); |
|
120 |
} |
|
121 |
} |
|
122 |
||
123 |
while (--c != 0) { |
|
124 |
ASSERT(zio_buf_cache[c] != NULL); |
|
125 |
if (zio_buf_cache[c - 1] == NULL) |
|
126 |
zio_buf_cache[c - 1] = zio_buf_cache[c]; |
|
127 |
} |
|
1544 | 128 |
|
129 |
zio_inject_init(); |
|
789 | 130 |
} |
131 |
||
132 |
void |
|
133 |
zio_fini(void) |
|
134 |
{ |
|
135 |
size_t c; |
|
136 |
kmem_cache_t *last_cache = NULL; |
|
137 |
||
138 |
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { |
|
139 |
if (zio_buf_cache[c] != last_cache) { |
|
140 |
last_cache = zio_buf_cache[c]; |
|
141 |
kmem_cache_destroy(zio_buf_cache[c]); |
|
142 |
} |
|
143 |
zio_buf_cache[c] = NULL; |
|
144 |
} |
|
1544 | 145 |
|
146 |
zio_inject_fini(); |
|
789 | 147 |
} |
148 |
||
149 |
/* |
|
150 |
* ========================================================================== |
|
151 |
* Allocate and free I/O buffers |
|
152 |
* ========================================================================== |
|
153 |
*/ |
|
154 |
void * |
|
155 |
zio_buf_alloc(size_t size) |
|
156 |
{ |
|
157 |
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; |
|
158 |
||
159 |
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
|
160 |
||
161 |
return (kmem_cache_alloc(zio_buf_cache[c], KM_SLEEP)); |
|
162 |
} |
|
163 |
||
164 |
void |
|
165 |
zio_buf_free(void *buf, size_t size) |
|
166 |
{ |
|
167 |
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; |
|
168 |
||
169 |
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
|
170 |
||
171 |
kmem_cache_free(zio_buf_cache[c], buf); |
|
172 |
} |
|
173 |
||
174 |
/* |
|
175 |
* ========================================================================== |
|
176 |
* Push and pop I/O transform buffers |
|
177 |
* ========================================================================== |
|
178 |
*/ |
|
179 |
static void |
|
180 |
zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize) |
|
181 |
{ |
|
182 |
zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); |
|
183 |
||
184 |
zt->zt_data = data; |
|
185 |
zt->zt_size = size; |
|
186 |
zt->zt_bufsize = bufsize; |
|
187 |
||
188 |
zt->zt_next = zio->io_transform_stack; |
|
189 |
zio->io_transform_stack = zt; |
|
190 |
||
191 |
zio->io_data = data; |
|
192 |
zio->io_size = size; |
|
193 |
} |
|
194 |
||
195 |
static void |
|
196 |
zio_pop_transform(zio_t *zio, void **data, uint64_t *size, uint64_t *bufsize) |
|
197 |
{ |
|
198 |
zio_transform_t *zt = zio->io_transform_stack; |
|
199 |
||
200 |
*data = zt->zt_data; |
|
201 |
*size = zt->zt_size; |
|
202 |
*bufsize = zt->zt_bufsize; |
|
203 |
||
204 |
zio->io_transform_stack = zt->zt_next; |
|
205 |
kmem_free(zt, sizeof (zio_transform_t)); |
|
206 |
||
207 |
if ((zt = zio->io_transform_stack) != NULL) { |
|
208 |
zio->io_data = zt->zt_data; |
|
209 |
zio->io_size = zt->zt_size; |
|
210 |
} |
|
211 |
} |
|
212 |
||
213 |
static void |
|
214 |
zio_clear_transform_stack(zio_t *zio) |
|
215 |
{ |
|
216 |
void *data; |
|
217 |
uint64_t size, bufsize; |
|
218 |
||
219 |
ASSERT(zio->io_transform_stack != NULL); |
|
220 |
||
221 |
zio_pop_transform(zio, &data, &size, &bufsize); |
|
222 |
while (zio->io_transform_stack != NULL) { |
|
223 |
zio_buf_free(data, bufsize); |
|
224 |
zio_pop_transform(zio, &data, &size, &bufsize); |
|
225 |
} |
|
226 |
} |
|
227 |
||
228 |
/* |
|
229 |
* ========================================================================== |
|
230 |
* Create the various types of I/O (read, write, free) |
|
231 |
* ========================================================================== |
|
232 |
*/ |
|
233 |
static zio_t * |
|
234 |
zio_create(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, |
|
235 |
void *data, uint64_t size, zio_done_func_t *done, void *private, |
|
236 |
zio_type_t type, int priority, int flags, uint8_t stage, uint32_t pipeline) |
|
237 |
{ |
|
238 |
zio_t *zio; |
|
239 |
||
240 |
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); |
|
241 |
ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); |
|
242 |
||
243 |
zio = kmem_zalloc(sizeof (zio_t), KM_SLEEP); |
|
244 |
zio->io_parent = pio; |
|
245 |
zio->io_spa = spa; |
|
246 |
zio->io_txg = txg; |
|
247 |
if (bp != NULL) { |
|
248 |
zio->io_bp = bp; |
|
249 |
zio->io_bp_copy = *bp; |
|
250 |
zio->io_bp_orig = *bp; |
|
251 |
/* XXBP - Need to inherit this when it matters */ |
|
252 |
zio->io_dva_index = 0; |
|
253 |
} |
|
254 |
zio->io_done = done; |
|
255 |
zio->io_private = private; |
|
256 |
zio->io_type = type; |
|
257 |
zio->io_priority = priority; |
|
258 |
zio->io_stage = stage; |
|
259 |
zio->io_pipeline = pipeline; |
|
260 |
zio->io_async_stages = ZIO_ASYNC_PIPELINE_STAGES; |
|
261 |
zio->io_timestamp = lbolt64; |
|
262 |
zio->io_flags = flags; |
|
263 |
zio_push_transform(zio, data, size, size); |
|
264 |
||
265 |
if (pio == NULL) { |
|
266 |
if (!(flags & ZIO_FLAG_CONFIG_HELD)) |
|
1544 | 267 |
spa_config_enter(zio->io_spa, RW_READER, zio); |
789 | 268 |
zio->io_root = zio; |
269 |
} else { |
|
270 |
zio->io_root = pio->io_root; |
|
1544 | 271 |
if (!(flags & ZIO_FLAG_NOBOOKMARK)) |
272 |
zio->io_logical = pio->io_logical; |
|
789 | 273 |
mutex_enter(&pio->io_lock); |
274 |
if (stage < ZIO_STAGE_READY) |
|
275 |
pio->io_children_notready++; |
|
276 |
pio->io_children_notdone++; |
|
277 |
zio->io_sibling_next = pio->io_child; |
|
278 |
zio->io_sibling_prev = NULL; |
|
279 |
if (pio->io_child != NULL) |
|
280 |
pio->io_child->io_sibling_prev = zio; |
|
281 |
pio->io_child = zio; |
|
282 |
mutex_exit(&pio->io_lock); |
|
283 |
} |
|
284 |
||
285 |
return (zio); |
|
286 |
} |
|
287 |
||
288 |
zio_t * |
|
289 |
zio_null(zio_t *pio, spa_t *spa, zio_done_func_t *done, void *private, |
|
290 |
int flags) |
|
291 |
{ |
|
292 |
zio_t *zio; |
|
293 |
||
294 |
zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, |
|
295 |
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, ZIO_STAGE_OPEN, |
|
296 |
ZIO_WAIT_FOR_CHILDREN_PIPELINE); |
|
297 |
||
298 |
return (zio); |
|
299 |
} |
|
300 |
||
301 |
zio_t * |
|
302 |
zio_root(spa_t *spa, zio_done_func_t *done, void *private, int flags) |
|
303 |
{ |
|
304 |
return (zio_null(NULL, spa, done, private, flags)); |
|
305 |
} |
|
306 |
||
307 |
zio_t * |
|
308 |
zio_read(zio_t *pio, spa_t *spa, blkptr_t *bp, void *data, |
|
309 |
uint64_t size, zio_done_func_t *done, void *private, |
|
1544 | 310 |
int priority, int flags, zbookmark_t *zb) |
789 | 311 |
{ |
312 |
zio_t *zio; |
|
313 |
dva_t *dva; |
|
314 |
||
315 |
ASSERT3U(size, ==, BP_GET_LSIZE(bp)); |
|
316 |
||
317 |
zio = zio_create(pio, spa, bp->blk_birth, bp, data, size, done, private, |
|
318 |
ZIO_TYPE_READ, priority, flags, ZIO_STAGE_OPEN, ZIO_READ_PIPELINE); |
|
1544 | 319 |
zio->io_bookmark = *zb; |
320 |
||
321 |
zio->io_logical = zio; |
|
789 | 322 |
|
323 |
/* |
|
324 |
* Work off our copy of the bp so the caller can free it. |
|
325 |
*/ |
|
326 |
zio->io_bp = &zio->io_bp_copy; |
|
327 |
||
328 |
bp = zio->io_bp; |
|
329 |
dva = ZIO_GET_DVA(zio); |
|
330 |
||
331 |
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { |
|
332 |
uint64_t csize = BP_GET_PSIZE(bp); |
|
333 |
void *cbuf = zio_buf_alloc(csize); |
|
334 |
||
335 |
zio_push_transform(zio, cbuf, csize, csize); |
|
336 |
zio->io_pipeline |= 1U << ZIO_STAGE_READ_DECOMPRESS; |
|
337 |
} |
|
338 |
||
339 |
if (DVA_GET_GANG(dva)) { |
|
340 |
uint64_t gsize = SPA_GANGBLOCKSIZE; |
|
341 |
void *gbuf = zio_buf_alloc(gsize); |
|
342 |
||
343 |
zio_push_transform(zio, gbuf, gsize, gsize); |
|
344 |
zio->io_pipeline |= 1U << ZIO_STAGE_READ_GANG_MEMBERS; |
|
345 |
} |
|
346 |
||
347 |
return (zio); |
|
348 |
} |
|
349 |
||
350 |
zio_t * |
|
351 |
zio_write(zio_t *pio, spa_t *spa, int checksum, int compress, |
|
352 |
uint64_t txg, blkptr_t *bp, void *data, uint64_t size, |
|
1544 | 353 |
zio_done_func_t *done, void *private, int priority, int flags, |
354 |
zbookmark_t *zb) |
|
789 | 355 |
{ |
356 |
zio_t *zio; |
|
357 |
||
358 |
ASSERT(checksum >= ZIO_CHECKSUM_OFF && |
|
359 |
checksum < ZIO_CHECKSUM_FUNCTIONS); |
|
360 |
||
361 |
ASSERT(compress >= ZIO_COMPRESS_OFF && |
|
362 |
compress < ZIO_COMPRESS_FUNCTIONS); |
|
363 |
||
364 |
zio = zio_create(pio, spa, txg, bp, data, size, done, private, |
|
365 |
ZIO_TYPE_WRITE, priority, flags, |
|
366 |
ZIO_STAGE_OPEN, ZIO_WRITE_PIPELINE); |
|
367 |
||
1544 | 368 |
zio->io_bookmark = *zb; |
369 |
||
370 |
zio->io_logical = zio; |
|
371 |
||
789 | 372 |
zio->io_checksum = checksum; |
373 |
zio->io_compress = compress; |
|
374 |
||
375 |
if (compress != ZIO_COMPRESS_OFF) |
|
376 |
zio->io_async_stages |= 1U << ZIO_STAGE_WRITE_COMPRESS; |
|
377 |
||
378 |
if (bp->blk_birth != txg) { |
|
379 |
/* XXX the bp usually (always?) gets re-zeroed later */ |
|
380 |
BP_ZERO(bp); |
|
381 |
BP_SET_LSIZE(bp, size); |
|
382 |
BP_SET_PSIZE(bp, size); |
|
383 |
} |
|
384 |
||
385 |
return (zio); |
|
386 |
} |
|
387 |
||
388 |
zio_t * |
|
389 |
zio_rewrite(zio_t *pio, spa_t *spa, int checksum, |
|
390 |
uint64_t txg, blkptr_t *bp, void *data, uint64_t size, |
|
1544 | 391 |
zio_done_func_t *done, void *private, int priority, int flags, |
392 |
zbookmark_t *zb) |
|
789 | 393 |
{ |
394 |
zio_t *zio; |
|
395 |
||
396 |
/* XXBP - We need to re-evaluate when to insert pipeline stages */ |
|
397 |
zio = zio_create(pio, spa, txg, bp, data, size, done, private, |
|
398 |
ZIO_TYPE_WRITE, priority, flags, |
|
399 |
ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); |
|
400 |
||
1544 | 401 |
zio->io_bookmark = *zb; |
789 | 402 |
zio->io_checksum = checksum; |
403 |
zio->io_compress = ZIO_COMPRESS_OFF; |
|
404 |
||
405 |
return (zio); |
|
406 |
} |
|
407 |
||
408 |
static zio_t * |
|
409 |
zio_write_allocate(zio_t *pio, spa_t *spa, int checksum, |
|
410 |
uint64_t txg, blkptr_t *bp, void *data, uint64_t size, |
|
411 |
zio_done_func_t *done, void *private, int priority, int flags) |
|
412 |
{ |
|
413 |
zio_t *zio; |
|
414 |
||
415 |
BP_ZERO(bp); |
|
416 |
BP_SET_LSIZE(bp, size); |
|
417 |
BP_SET_PSIZE(bp, size); |
|
418 |
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); |
|
419 |
||
420 |
zio = zio_create(pio, spa, txg, bp, data, size, done, private, |
|
421 |
ZIO_TYPE_WRITE, priority, flags, |
|
422 |
ZIO_STAGE_OPEN, ZIO_WRITE_ALLOCATE_PIPELINE); |
|
423 |
||
424 |
zio->io_checksum = checksum; |
|
425 |
zio->io_compress = ZIO_COMPRESS_OFF; |
|
426 |
||
427 |
return (zio); |
|
428 |
} |
|
429 |
||
430 |
zio_t * |
|
431 |
zio_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, |
|
432 |
zio_done_func_t *done, void *private) |
|
433 |
{ |
|
434 |
zio_t *zio; |
|
435 |
||
436 |
ASSERT(!BP_IS_HOLE(bp)); |
|
437 |
||
438 |
if (txg == spa->spa_syncing_txg && |
|
439 |
spa->spa_sync_pass > zio_sync_pass.zp_defer_free) { |
|
440 |
bplist_enqueue_deferred(&spa->spa_sync_bplist, bp); |
|
441 |
return (zio_null(pio, spa, NULL, NULL, 0)); |
|
442 |
} |
|
443 |
||
444 |
/* XXBP - We need to re-evaluate when to insert pipeline stages */ |
|
445 |
zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private, |
|
446 |
ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, 0, |
|
447 |
ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); |
|
448 |
||
449 |
zio->io_bp = &zio->io_bp_copy; |
|
450 |
||
451 |
return (zio); |
|
452 |
} |
|
453 |
||
454 |
zio_t * |
|
455 |
zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, |
|
456 |
zio_done_func_t *done, void *private) |
|
457 |
{ |
|
458 |
zio_t *zio; |
|
459 |
||
460 |
/* |
|
461 |
* A claim is an allocation of a specific block. Claims are needed |
|
462 |
* to support immediate writes in the intent log. The issue is that |
|
463 |
* immediate writes contain committed data, but in a txg that was |
|
464 |
* *not* committed. Upon opening the pool after an unclean shutdown, |
|
465 |
* the intent log claims all blocks that contain immediate write data |
|
466 |
* so that the SPA knows they're in use. |
|
467 |
* |
|
468 |
* All claims *must* be resolved in the first txg -- before the SPA |
|
469 |
* starts allocating blocks -- so that nothing is allocated twice. |
|
470 |
*/ |
|
471 |
ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); |
|
472 |
ASSERT3U(spa_first_txg(spa), <=, txg); |
|
473 |
||
474 |
/* XXBP - We need to re-evaluate when to insert pipeline stages */ |
|
475 |
zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private, |
|
476 |
ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 0, |
|
477 |
ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); |
|
478 |
||
479 |
zio->io_bp = &zio->io_bp_copy; |
|
480 |
||
481 |
return (zio); |
|
482 |
} |
|
483 |
||
484 |
zio_t * |
|
485 |
zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, |
|
486 |
zio_done_func_t *done, void *private, int priority, int flags) |
|
487 |
{ |
|
488 |
zio_t *zio; |
|
489 |
int c; |
|
490 |
||
491 |
if (vd->vdev_children == 0) { |
|
492 |
zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, |
|
493 |
ZIO_TYPE_IOCTL, priority, flags, |
|
494 |
ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); |
|
495 |
||
496 |
zio->io_vd = vd; |
|
497 |
zio->io_cmd = cmd; |
|
498 |
} else { |
|
499 |
zio = zio_null(pio, spa, NULL, NULL, flags); |
|
500 |
||
501 |
for (c = 0; c < vd->vdev_children; c++) |
|
502 |
zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, |
|
503 |
done, private, priority, flags)); |
|
504 |
} |
|
505 |
||
506 |
return (zio); |
|
507 |
} |
|
508 |
||
509 |
static void |
|
510 |
zio_phys_bp_init(vdev_t *vd, blkptr_t *bp, uint64_t offset, uint64_t size, |
|
511 |
int checksum) |
|
512 |
{ |
|
513 |
ASSERT(vd->vdev_children == 0); |
|
514 |
||
515 |
ASSERT(size <= SPA_MAXBLOCKSIZE); |
|
516 |
ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); |
|
517 |
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); |
|
518 |
||
519 |
ASSERT(offset + size <= VDEV_LABEL_START_SIZE || |
|
520 |
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); |
|
521 |
ASSERT3U(offset + size, <=, vd->vdev_psize); |
|
522 |
||
523 |
BP_ZERO(bp); |
|
524 |
||
525 |
BP_SET_LSIZE(bp, size); |
|
526 |
BP_SET_PSIZE(bp, size); |
|
527 |
||
528 |
BP_SET_CHECKSUM(bp, checksum); |
|
529 |
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); |
|
530 |
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); |
|
531 |
||
532 |
if (checksum != ZIO_CHECKSUM_OFF) |
|
533 |
ZIO_SET_CHECKSUM(&bp->blk_cksum, offset, 0, 0, 0); |
|
534 |
} |
|
535 |
||
536 |
zio_t * |
|
537 |
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, |
|
538 |
void *data, int checksum, zio_done_func_t *done, void *private, |
|
539 |
int priority, int flags) |
|
540 |
{ |
|
541 |
zio_t *zio; |
|
542 |
blkptr_t blk; |
|
543 |
||
544 |
zio_phys_bp_init(vd, &blk, offset, size, checksum); |
|
545 |
||
546 |
zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private, |
|
547 |
ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, |
|
548 |
ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); |
|
549 |
||
550 |
zio->io_vd = vd; |
|
551 |
zio->io_offset = offset; |
|
552 |
||
553 |
/* |
|
554 |
* Work off our copy of the bp so the caller can free it. |
|
555 |
*/ |
|
556 |
zio->io_bp = &zio->io_bp_copy; |
|
557 |
||
558 |
return (zio); |
|
559 |
} |
|
560 |
||
561 |
zio_t * |
|
562 |
zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, |
|
563 |
void *data, int checksum, zio_done_func_t *done, void *private, |
|
564 |
int priority, int flags) |
|
565 |
{ |
|
566 |
zio_block_tail_t *zbt; |
|
567 |
void *wbuf; |
|
568 |
zio_t *zio; |
|
569 |
blkptr_t blk; |
|
570 |
||
571 |
zio_phys_bp_init(vd, &blk, offset, size, checksum); |
|
572 |
||
573 |
zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private, |
|
574 |
ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, |
|
575 |
ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); |
|
576 |
||
577 |
zio->io_vd = vd; |
|
578 |
zio->io_offset = offset; |
|
579 |
||
580 |
zio->io_bp = &zio->io_bp_copy; |
|
581 |
zio->io_checksum = checksum; |
|
582 |
||
583 |
if (zio_checksum_table[checksum].ci_zbt) { |
|
584 |
/* |
|
585 |
* zbt checksums are necessarily destructive -- they modify |
|
586 |
* one word of the write buffer to hold the verifier/checksum. |
|
587 |
* Therefore, we must make a local copy in case the data is |
|
588 |
* being written to multiple places. |
|
589 |
*/ |
|
590 |
wbuf = zio_buf_alloc(size); |
|
591 |
bcopy(data, wbuf, size); |
|
592 |
zio_push_transform(zio, wbuf, size, size); |
|
593 |
||
594 |
zbt = (zio_block_tail_t *)((char *)wbuf + size) - 1; |
|
595 |
zbt->zbt_cksum = blk.blk_cksum; |
|
596 |
} |
|
597 |
||
598 |
return (zio); |
|
599 |
} |
|
600 |
||
601 |
/* |
|
602 |
* Create a child I/O to do some work for us. It has no associated bp. |
|
603 |
*/ |
|
604 |
zio_t * |
|
605 |
zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd, uint64_t offset, |
|
606 |
void *data, uint64_t size, int type, int priority, int flags, |
|
607 |
zio_done_func_t *done, void *private) |
|
608 |
{ |
|
609 |
uint32_t pipeline = ZIO_VDEV_CHILD_PIPELINE; |
|
610 |
zio_t *cio; |
|
611 |
||
612 |
if (type == ZIO_TYPE_READ && bp != NULL) { |
|
613 |
/* |
|
614 |
* If we have the bp, then the child should perform the |
|
615 |
* checksum and the parent need not. This pushes error |
|
616 |
* detection as close to the leaves as possible and |
|
617 |
* eliminates redundant checksums in the interior nodes. |
|
618 |
*/ |
|
619 |
pipeline |= 1U << ZIO_STAGE_CHECKSUM_VERIFY; |
|
620 |
zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); |
|
621 |
} |
|
622 |
||
623 |
cio = zio_create(zio, zio->io_spa, zio->io_txg, bp, data, size, |
|
624 |
done, private, type, priority, |
|
625 |
(zio->io_flags & ZIO_FLAG_VDEV_INHERIT) | ZIO_FLAG_CANFAIL | flags, |
|
626 |
ZIO_STAGE_VDEV_IO_SETUP - 1, pipeline); |
|
627 |
||
628 |
cio->io_vd = vd; |
|
629 |
cio->io_offset = offset; |
|
630 |
||
631 |
return (cio); |
|
632 |
} |
|
633 |
||
634 |
/* |
|
635 |
* ========================================================================== |
|
636 |
* Initiate I/O, either sync or async |
|
637 |
* ========================================================================== |
|
638 |
*/ |
|
639 |
int |
|
640 |
zio_wait(zio_t *zio) |
|
641 |
{ |
|
642 |
int error; |
|
643 |
||
644 |
ASSERT(zio->io_stage == ZIO_STAGE_OPEN); |
|
645 |
||
646 |
zio->io_waiter = curthread; |
|
647 |
||
648 |
zio_next_stage_async(zio); |
|
649 |
||
650 |
mutex_enter(&zio->io_lock); |
|
651 |
while (zio->io_stalled != ZIO_STAGE_DONE) |
|
652 |
cv_wait(&zio->io_cv, &zio->io_lock); |
|
653 |
mutex_exit(&zio->io_lock); |
|
654 |
||
655 |
error = zio->io_error; |
|
656 |
||
657 |
kmem_free(zio, sizeof (zio_t)); |
|
658 |
||
659 |
return (error); |
|
660 |
} |
|
661 |
||
662 |
void |
|
663 |
zio_nowait(zio_t *zio) |
|
664 |
{ |
|
665 |
zio_next_stage_async(zio); |
|
666 |
} |
|
667 |
||
668 |
/* |
|
669 |
* ========================================================================== |
|
670 |
* I/O pipeline interlocks: parent/child dependency scoreboarding |
|
671 |
* ========================================================================== |
|
672 |
*/ |
|
673 |
static void |
|
674 |
zio_wait_for_children(zio_t *zio, uint32_t stage, uint64_t *countp) |
|
675 |
{ |
|
676 |
mutex_enter(&zio->io_lock); |
|
677 |
if (*countp == 0) { |
|
678 |
ASSERT(zio->io_stalled == 0); |
|
679 |
mutex_exit(&zio->io_lock); |
|
680 |
zio_next_stage(zio); |
|
681 |
} else { |
|
682 |
zio->io_stalled = stage; |
|
683 |
mutex_exit(&zio->io_lock); |
|
684 |
} |
|
685 |
} |
|
686 |
||
687 |
static void |
|
688 |
zio_notify_parent(zio_t *zio, uint32_t stage, uint64_t *countp) |
|
689 |
{ |
|
690 |
zio_t *pio = zio->io_parent; |
|
691 |
||
692 |
mutex_enter(&pio->io_lock); |
|
693 |
if (pio->io_error == 0 && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) |
|
694 |
pio->io_error = zio->io_error; |
|
695 |
if (--*countp == 0 && pio->io_stalled == stage) { |
|
696 |
pio->io_stalled = 0; |
|
697 |
mutex_exit(&pio->io_lock); |
|
698 |
zio_next_stage_async(pio); |
|
699 |
} else { |
|
700 |
mutex_exit(&pio->io_lock); |
|
701 |
} |
|
702 |
} |
|
703 |
||
704 |
static void |
|
705 |
zio_wait_children_ready(zio_t *zio) |
|
706 |
{ |
|
707 |
zio_wait_for_children(zio, ZIO_STAGE_WAIT_CHILDREN_READY, |
|
708 |
&zio->io_children_notready); |
|
709 |
} |
|
710 |
||
711 |
void |
|
712 |
zio_wait_children_done(zio_t *zio) |
|
713 |
{ |
|
714 |
zio_wait_for_children(zio, ZIO_STAGE_WAIT_CHILDREN_DONE, |
|
715 |
&zio->io_children_notdone); |
|
716 |
} |
|
717 |
||
718 |
static void |
|
719 |
zio_ready(zio_t *zio) |
|
720 |
{ |
|
721 |
zio_t *pio = zio->io_parent; |
|
722 |
||
723 |
if (pio != NULL) |
|
724 |
zio_notify_parent(zio, ZIO_STAGE_WAIT_CHILDREN_READY, |
|
725 |
&pio->io_children_notready); |
|
726 |
||
727 |
if (zio->io_bp) |
|
728 |
zio->io_bp_copy = *zio->io_bp; |
|
729 |
||
730 |
zio_next_stage(zio); |
|
731 |
} |
|
732 |
||
733 |
static void |
|
734 |
zio_done(zio_t *zio) |
|
735 |
{ |
|
736 |
zio_t *pio = zio->io_parent; |
|
737 |
spa_t *spa = zio->io_spa; |
|
738 |
blkptr_t *bp = zio->io_bp; |
|
739 |
vdev_t *vd = zio->io_vd; |
|
896 | 740 |
char blkbuf[BP_SPRINTF_LEN]; |
789 | 741 |
|
742 |
ASSERT(zio->io_children_notready == 0); |
|
743 |
ASSERT(zio->io_children_notdone == 0); |
|
744 |
||
745 |
if (bp != NULL) { |
|
746 |
ASSERT(bp->blk_pad[0] == 0); |
|
747 |
ASSERT(bp->blk_pad[1] == 0); |
|
748 |
ASSERT(bp->blk_pad[2] == 0); |
|
749 |
ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0); |
|
750 |
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && |
|
751 |
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) |
|
752 |
ASSERT(!BP_SHOULD_BYTESWAP(bp)); |
|
753 |
} |
|
754 |
||
755 |
if (vd != NULL) |
|
756 |
vdev_stat_update(zio); |
|
757 |
||
758 |
if (zio->io_error) { |
|
1544 | 759 |
/* |
760 |
* If this I/O is attached to a particular vdev, |
|
761 |
* generate an error message describing the I/O failure |
|
762 |
* at the block level. We ignore these errors if the |
|
763 |
* device is currently unavailable. |
|
764 |
*/ |
|
1732 | 765 |
if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) |
1544 | 766 |
zfs_ereport_post(FM_EREPORT_ZFS_IO, |
1732 | 767 |
zio->io_spa, vd, zio, 0, 0); |
789 | 768 |
|
1544 | 769 |
if ((zio->io_error == EIO || |
770 |
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) && |
|
771 |
zio->io_logical == zio) { |
|
772 |
/* |
|
773 |
* For root I/O requests, tell the SPA to log the error |
|
774 |
* appropriately. Also, generate a logical data |
|
775 |
* ereport. |
|
776 |
*/ |
|
777 |
spa_log_error(zio->io_spa, zio); |
|
778 |
||
779 |
zfs_ereport_post(FM_EREPORT_ZFS_DATA, |
|
780 |
zio->io_spa, NULL, zio, 0, 0); |
|
781 |
} |
|
789 | 782 |
|
1544 | 783 |
/* |
784 |
* For I/O requests that cannot fail, panic appropriately. |
|
785 |
*/ |
|
786 |
if (!(zio->io_flags & ZIO_FLAG_CANFAIL)) { |
|
787 |
sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, |
|
788 |
bp ? bp : &zio->io_bp_copy); |
|
789 |
panic("ZFS: %s (%s on %s off %llx: zio %p %s): error " |
|
790 |
"%d", zio->io_error == ECKSUM ? |
|
791 |
"bad checksum" : "I/O failure", |
|
792 |
zio_type_name[zio->io_type], |
|
793 |
vdev_description(vd), |
|
794 |
(u_longlong_t)zio->io_offset, |
|
795 |
zio, blkbuf, zio->io_error); |
|
796 |
} |
|
789 | 797 |
} |
798 |
||
799 |
zio_clear_transform_stack(zio); |
|
800 |
||
801 |
if (zio->io_done) |
|
802 |
zio->io_done(zio); |
|
803 |
||
804 |
ASSERT(zio->io_delegate_list == NULL); |
|
805 |
ASSERT(zio->io_delegate_next == NULL); |
|
806 |
||
807 |
if (pio != NULL) { |
|
808 |
zio_t *next, *prev; |
|
809 |
||
810 |
mutex_enter(&pio->io_lock); |
|
811 |
next = zio->io_sibling_next; |
|
812 |
prev = zio->io_sibling_prev; |
|
813 |
if (next != NULL) |
|
814 |
next->io_sibling_prev = prev; |
|
815 |
if (prev != NULL) |
|
816 |
prev->io_sibling_next = next; |
|
817 |
if (pio->io_child == zio) |
|
818 |
pio->io_child = next; |
|
819 |
mutex_exit(&pio->io_lock); |
|
820 |
||
821 |
zio_notify_parent(zio, ZIO_STAGE_WAIT_CHILDREN_DONE, |
|
822 |
&pio->io_children_notdone); |
|
823 |
} |
|
824 |
||
825 |
if (pio == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_HELD)) |
|
1544 | 826 |
spa_config_exit(spa, zio); |
789 | 827 |
|
828 |
if (zio->io_waiter != NULL) { |
|
829 |
mutex_enter(&zio->io_lock); |
|
830 |
ASSERT(zio->io_stage == ZIO_STAGE_DONE); |
|
831 |
zio->io_stalled = zio->io_stage; |
|
832 |
cv_broadcast(&zio->io_cv); |
|
833 |
mutex_exit(&zio->io_lock); |
|
834 |
} else { |
|
835 |
kmem_free(zio, sizeof (zio_t)); |
|
836 |
} |
|
837 |
} |
|
838 |
||
839 |
/* |
|
840 |
* ========================================================================== |
|
841 |
* Compression support |
|
842 |
* ========================================================================== |
|
843 |
*/ |
|
844 |
static void |
|
845 |
zio_write_compress(zio_t *zio) |
|
846 |
{ |
|
847 |
int compress = zio->io_compress; |
|
848 |
blkptr_t *bp = zio->io_bp; |
|
849 |
void *cbuf; |
|
850 |
uint64_t lsize = zio->io_size; |
|
851 |
uint64_t csize = lsize; |
|
852 |
uint64_t cbufsize = 0; |
|
853 |
int pass; |
|
854 |
||
855 |
if (bp->blk_birth == zio->io_txg) { |
|
856 |
/* |
|
857 |
* We're rewriting an existing block, which means we're |
|
858 |
* working on behalf of spa_sync(). For spa_sync() to |
|
859 |
* converge, it must eventually be the case that we don't |
|
860 |
* have to allocate new blocks. But compression changes |
|
861 |
* the blocksize, which forces a reallocate, and makes |
|
862 |
* convergence take longer. Therefore, after the first |
|
863 |
* few passes, stop compressing to ensure convergence. |
|
864 |
*/ |
|
865 |
pass = spa_sync_pass(zio->io_spa); |
|
866 |
if (pass > zio_sync_pass.zp_dontcompress) |
|
867 |
compress = ZIO_COMPRESS_OFF; |
|
868 |
} else { |
|
869 |
ASSERT(BP_IS_HOLE(bp)); |
|
870 |
pass = 1; |
|
871 |
} |
|
872 |
||
873 |
if (compress != ZIO_COMPRESS_OFF) |
|
874 |
if (!zio_compress_data(compress, zio->io_data, zio->io_size, |
|
875 |
&cbuf, &csize, &cbufsize)) |
|
876 |
compress = ZIO_COMPRESS_OFF; |
|
877 |
||
878 |
if (compress != ZIO_COMPRESS_OFF && csize != 0) |
|
879 |
zio_push_transform(zio, cbuf, csize, cbufsize); |
|
880 |
||
881 |
/* |
|
882 |
* The final pass of spa_sync() must be all rewrites, but the first |
|
883 |
* few passes offer a trade-off: allocating blocks defers convergence, |
|
884 |
* but newly allocated blocks are sequential, so they can be written |
|
885 |
* to disk faster. Therefore, we allow the first few passes of |
|
886 |
* spa_sync() to reallocate new blocks, but force rewrites after that. |
|
887 |
* There should only be a handful of blocks after pass 1 in any case. |
|
888 |
*/ |
|
889 |
if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == csize && |
|
890 |
pass > zio_sync_pass.zp_rewrite) { |
|
891 |
ASSERT(csize != 0); |
|
892 |
ASSERT3U(BP_GET_COMPRESS(bp), ==, compress); |
|
893 |
ASSERT3U(BP_GET_LSIZE(bp), ==, lsize); |
|
894 |
||
895 |
zio->io_pipeline = ZIO_REWRITE_PIPELINE; |
|
896 |
} else { |
|
897 |
if (bp->blk_birth == zio->io_txg) { |
|
898 |
ASSERT3U(BP_GET_LSIZE(bp), ==, lsize); |
|
899 |
bzero(bp, sizeof (blkptr_t)); |
|
900 |
} |
|
901 |
if (csize == 0) { |
|
902 |
BP_ZERO(bp); |
|
903 |
zio->io_pipeline = ZIO_WAIT_FOR_CHILDREN_PIPELINE; |
|
904 |
} else { |
|
905 |
BP_SET_LSIZE(bp, lsize); |
|
906 |
BP_SET_PSIZE(bp, csize); |
|
907 |
BP_SET_COMPRESS(bp, compress); |
|
908 |
zio->io_pipeline = ZIO_WRITE_ALLOCATE_PIPELINE; |
|
909 |
} |
|
910 |
} |
|
911 |
||
912 |
zio_next_stage(zio); |
|
913 |
} |
|
914 |
||
915 |
static void |
|
916 |
zio_read_decompress(zio_t *zio) |
|
917 |
{ |
|
918 |
blkptr_t *bp = zio->io_bp; |
|
919 |
void *data; |
|
920 |
uint64_t size; |
|
921 |
uint64_t bufsize; |
|
922 |
int compress = BP_GET_COMPRESS(bp); |
|
923 |
||
924 |
ASSERT(compress != ZIO_COMPRESS_OFF); |
|
925 |
||
926 |
zio_pop_transform(zio, &data, &size, &bufsize); |
|
927 |
||
928 |
if (zio_decompress_data(compress, data, size, |
|
929 |
zio->io_data, zio->io_size)) |
|
930 |
zio->io_error = EIO; |
|
931 |
||
932 |
zio_buf_free(data, bufsize); |
|
933 |
||
934 |
zio_next_stage(zio); |
|
935 |
} |
|
936 |
||
937 |
/* |
|
938 |
* ========================================================================== |
|
939 |
* Gang block support |
|
940 |
* ========================================================================== |
|
941 |
*/ |
|
942 |
static void |
|
943 |
zio_gang_pipeline(zio_t *zio) |
|
944 |
{ |
|
945 |
/* |
|
946 |
* By default, the pipeline assumes that we're dealing with a gang |
|
947 |
* block. If we're not, strip out any gang-specific stages. |
|
948 |
*/ |
|
949 |
if (!DVA_GET_GANG(ZIO_GET_DVA(zio))) |
|
950 |
zio->io_pipeline &= ~ZIO_GANG_STAGES; |
|
951 |
||
952 |
zio_next_stage(zio); |
|
953 |
} |
|
954 |
||
955 |
static void |
|
956 |
zio_gang_byteswap(zio_t *zio) |
|
957 |
{ |
|
958 |
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); |
|
959 |
||
960 |
if (BP_SHOULD_BYTESWAP(zio->io_bp)) |
|
961 |
byteswap_uint64_array(zio->io_data, zio->io_size); |
|
962 |
} |
|
963 |
||
964 |
static void |
|
965 |
zio_get_gang_header(zio_t *zio) |
|
966 |
{ |
|
967 |
blkptr_t *bp = zio->io_bp; |
|
968 |
uint64_t gsize = SPA_GANGBLOCKSIZE; |
|
969 |
void *gbuf = zio_buf_alloc(gsize); |
|
970 |
||
971 |
ASSERT(DVA_GET_GANG(ZIO_GET_DVA(zio))); |
|
972 |
||
973 |
zio_push_transform(zio, gbuf, gsize, gsize); |
|
974 |
||
975 |
zio_nowait(zio_create(zio, zio->io_spa, bp->blk_birth, bp, gbuf, gsize, |
|
976 |
NULL, NULL, ZIO_TYPE_READ, zio->io_priority, |
|
977 |
zio->io_flags & ZIO_FLAG_GANG_INHERIT, |
|
978 |
ZIO_STAGE_OPEN, ZIO_READ_PIPELINE)); |
|
979 |
||
980 |
zio_wait_children_done(zio); |
|
981 |
} |
|
982 |
||
983 |
static void |
|
984 |
zio_read_gang_members(zio_t *zio) |
|
985 |
{ |
|
986 |
zio_gbh_phys_t *gbh; |
|
987 |
uint64_t gsize, gbufsize, loff, lsize; |
|
988 |
int i; |
|
989 |
||
990 |
ASSERT(DVA_GET_GANG(ZIO_GET_DVA(zio))); |
|
991 |
||
992 |
zio_gang_byteswap(zio); |
|
993 |
zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); |
|
994 |
||
995 |
for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) { |
|
996 |
blkptr_t *gbp = &gbh->zg_blkptr[i]; |
|
997 |
lsize = BP_GET_PSIZE(gbp); |
|
998 |
||
999 |
ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF); |
|
1000 |
ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp)); |
|
1001 |
ASSERT3U(loff + lsize, <=, zio->io_size); |
|
1002 |
ASSERT(i < SPA_GBH_NBLKPTRS); |
|
1003 |
ASSERT(!BP_IS_HOLE(gbp)); |
|
1004 |
||
1005 |
zio_nowait(zio_read(zio, zio->io_spa, gbp, |
|
1006 |
(char *)zio->io_data + loff, lsize, NULL, NULL, |
|
1544 | 1007 |
zio->io_priority, zio->io_flags & ZIO_FLAG_GANG_INHERIT, |
1008 |
&zio->io_bookmark)); |
|
789 | 1009 |
} |
1010 |
||
1011 |
zio_buf_free(gbh, gbufsize); |
|
1012 |
zio_wait_children_done(zio); |
|
1013 |
} |
|
1014 |
||
1015 |
static void |
|
1016 |
zio_rewrite_gang_members(zio_t *zio) |
|
1017 |
{ |
|
1018 |
zio_gbh_phys_t *gbh; |
|
1019 |
uint64_t gsize, gbufsize, loff, lsize; |
|
1020 |
int i; |
|
1021 |
||
1022 |
ASSERT(DVA_GET_GANG(ZIO_GET_DVA(zio))); |
|
1023 |
ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE); |
|
1024 |
||
1025 |
zio_gang_byteswap(zio); |
|
1026 |
zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); |
|
1027 |
||
1028 |
ASSERT(gsize == gbufsize); |
|
1029 |
||
1030 |
for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) { |
|
1031 |
blkptr_t *gbp = &gbh->zg_blkptr[i]; |
|
1032 |
lsize = BP_GET_PSIZE(gbp); |
|
1033 |
||
1034 |
ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF); |
|
1035 |
ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp)); |
|
1036 |
ASSERT3U(loff + lsize, <=, zio->io_size); |
|
1037 |
ASSERT(i < SPA_GBH_NBLKPTRS); |
|
1038 |
ASSERT(!BP_IS_HOLE(gbp)); |
|
1039 |
||
1040 |
zio_nowait(zio_rewrite(zio, zio->io_spa, zio->io_checksum, |
|
1041 |
zio->io_txg, gbp, (char *)zio->io_data + loff, lsize, |
|
1544 | 1042 |
NULL, NULL, zio->io_priority, zio->io_flags, |
1043 |
&zio->io_bookmark)); |
|
789 | 1044 |
} |
1045 |
||
1046 |
zio_push_transform(zio, gbh, gsize, gbufsize); |
|
1047 |
zio_wait_children_ready(zio); |
|
1048 |
} |
|
1049 |
||
1050 |
static void |
|
1051 |
zio_free_gang_members(zio_t *zio) |
|
1052 |
{ |
|
1053 |
zio_gbh_phys_t *gbh; |
|
1054 |
uint64_t gsize, gbufsize; |
|
1055 |
int i; |
|
1056 |
||
1057 |
ASSERT(DVA_GET_GANG(ZIO_GET_DVA(zio))); |
|
1058 |
||
1059 |
zio_gang_byteswap(zio); |
|
1060 |
zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); |
|
1061 |
||
1062 |
for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { |
|
1063 |
blkptr_t *gbp = &gbh->zg_blkptr[i]; |
|
1064 |
||
1065 |
if (BP_IS_HOLE(gbp)) |
|
1066 |
continue; |
|
1067 |
zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, |
|
1068 |
gbp, NULL, NULL)); |
|
1069 |
} |
|
1070 |
||
1071 |
zio_buf_free(gbh, gbufsize); |
|
1072 |
zio_next_stage(zio); |
|
1073 |
} |
|
1074 |
||
1075 |
static void |
|
1076 |
zio_claim_gang_members(zio_t *zio) |
|
1077 |
{ |
|
1078 |
zio_gbh_phys_t *gbh; |
|
1079 |
uint64_t gsize, gbufsize; |
|
1080 |
int i; |
|
1081 |
||
1082 |
ASSERT(DVA_GET_GANG(ZIO_GET_DVA(zio))); |
|
1083 |
||
1084 |
zio_gang_byteswap(zio); |
|
1085 |
zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); |
|
1086 |
||
1087 |
for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { |
|
1088 |
blkptr_t *gbp = &gbh->zg_blkptr[i]; |
|
1089 |
if (BP_IS_HOLE(gbp)) |
|
1090 |
continue; |
|
1091 |
zio_nowait(zio_claim(zio, zio->io_spa, zio->io_txg, |
|
1092 |
gbp, NULL, NULL)); |
|
1093 |
} |
|
1094 |
||
1095 |
zio_buf_free(gbh, gbufsize); |
|
1096 |
zio_next_stage(zio); |
|
1097 |
} |
|
1098 |
||
1099 |
static void |
|
1100 |
zio_write_allocate_gang_member_done(zio_t *zio) |
|
1101 |
{ |
|
1102 |
zio_t *pio = zio->io_parent; |
|
1103 |
dva_t *cdva = ZIO_GET_DVA(zio); |
|
1104 |
dva_t *pdva = ZIO_GET_DVA(pio); |
|
1105 |
uint64_t asize; |
|
1106 |
||
1107 |
ASSERT(DVA_GET_GANG(pdva)); |
|
1108 |
||
1109 |
/* XXBP - Need to be careful here with multiple DVAs */ |
|
1110 |
mutex_enter(&pio->io_lock); |
|
1111 |
asize = DVA_GET_ASIZE(pdva); |
|
1112 |
asize += DVA_GET_ASIZE(cdva); |
|
1113 |
DVA_SET_ASIZE(pdva, asize); |
|
1114 |
mutex_exit(&pio->io_lock); |
|
1115 |
} |
|
1116 |
||
1117 |
static void |
|
1118 |
zio_write_allocate_gang_members(zio_t *zio) |
|
1119 |
{ |
|
1120 |
blkptr_t *bp = zio->io_bp; |
|
1121 |
dva_t *dva = ZIO_GET_DVA(zio); |
|
1122 |
zio_gbh_phys_t *gbh; |
|
1123 |
uint64_t resid = zio->io_size; |
|
1124 |
uint64_t maxalloc = P2ROUNDUP(zio->io_size >> 1, SPA_MINBLOCKSIZE); |
|
1125 |
uint64_t gsize, loff, lsize; |
|
1126 |
uint32_t gbps_left; |
|
1127 |
int error; |
|
1128 |
int i; |
|
1129 |
||
1130 |
gsize = SPA_GANGBLOCKSIZE; |
|
1131 |
gbps_left = SPA_GBH_NBLKPTRS; |
|
1132 |
||
1133 |
error = metaslab_alloc(zio->io_spa, gsize, dva, zio->io_txg); |
|
1134 |
if (error == ENOSPC) |
|
1135 |
panic("can't allocate gang block header"); |
|
1136 |
ASSERT(error == 0); |
|
1137 |
||
1138 |
DVA_SET_GANG(dva, 1); |
|
1139 |
||
1140 |
bp->blk_birth = zio->io_txg; |
|
1141 |
||
1142 |
gbh = zio_buf_alloc(gsize); |
|
1143 |
bzero(gbh, gsize); |
|
1144 |
||
1145 |
for (loff = 0, i = 0; loff != zio->io_size; |
|
1146 |
loff += lsize, resid -= lsize, gbps_left--, i++) { |
|
1147 |
blkptr_t *gbp = &gbh->zg_blkptr[i]; |
|
1148 |
dva = &gbp->blk_dva[0]; |
|
1149 |
||
1150 |
ASSERT(gbps_left != 0); |
|
1151 |
maxalloc = MIN(maxalloc, resid); |
|
1152 |
||
1153 |
while (resid <= maxalloc * gbps_left) { |
|
1154 |
error = metaslab_alloc(zio->io_spa, maxalloc, dva, |
|
1155 |
zio->io_txg); |
|
1156 |
if (error == 0) |
|
1157 |
break; |
|
1158 |
ASSERT3U(error, ==, ENOSPC); |
|
1159 |
if (maxalloc == SPA_MINBLOCKSIZE) |
|
1160 |
panic("really out of space"); |
|
1161 |
maxalloc = P2ROUNDUP(maxalloc >> 1, SPA_MINBLOCKSIZE); |
|
1162 |
} |
|
1163 |
||
1164 |
if (resid <= maxalloc * gbps_left) { |
|
1165 |
lsize = maxalloc; |
|
1166 |
BP_SET_LSIZE(gbp, lsize); |
|
1167 |
BP_SET_PSIZE(gbp, lsize); |
|
1168 |
BP_SET_COMPRESS(gbp, ZIO_COMPRESS_OFF); |
|
1169 |
gbp->blk_birth = zio->io_txg; |
|
1170 |
zio_nowait(zio_rewrite(zio, zio->io_spa, |
|
1171 |
zio->io_checksum, zio->io_txg, gbp, |
|
1172 |
(char *)zio->io_data + loff, lsize, |
|
1173 |
zio_write_allocate_gang_member_done, NULL, |
|
1544 | 1174 |
zio->io_priority, zio->io_flags, |
1175 |
&zio->io_bookmark)); |
|
789 | 1176 |
} else { |
1177 |
lsize = P2ROUNDUP(resid / gbps_left, SPA_MINBLOCKSIZE); |
|
1178 |
ASSERT(lsize != SPA_MINBLOCKSIZE); |
|
1179 |
zio_nowait(zio_write_allocate(zio, zio->io_spa, |
|
1180 |
zio->io_checksum, zio->io_txg, gbp, |
|
1181 |
(char *)zio->io_data + loff, lsize, |
|
1182 |
zio_write_allocate_gang_member_done, NULL, |
|
1183 |
zio->io_priority, zio->io_flags)); |
|
1184 |
} |
|
1185 |
} |
|
1186 |
||
1187 |
ASSERT(resid == 0 && loff == zio->io_size); |
|
1188 |
||
1189 |
zio->io_pipeline |= 1U << ZIO_STAGE_GANG_CHECKSUM_GENERATE; |
|
1190 |
||
1191 |
zio_push_transform(zio, gbh, gsize, gsize); |
|
1192 |
zio_wait_children_done(zio); |
|
1193 |
} |
|
1194 |
||
1195 |
/* |
|
1196 |
* ========================================================================== |
|
1197 |
* Allocate and free blocks |
|
1198 |
* ========================================================================== |
|
1199 |
*/ |
|
1200 |
static void |
|
1201 |
zio_dva_allocate(zio_t *zio) |
|
1202 |
{ |
|
1203 |
blkptr_t *bp = zio->io_bp; |
|
1204 |
dva_t *dva = ZIO_GET_DVA(zio); |
|
1205 |
int error; |
|
1206 |
||
1207 |
ASSERT(BP_IS_HOLE(bp)); |
|
1208 |
||
1209 |
/* For testing, make some blocks above a certain size be gang blocks */ |
|
1210 |
if (zio->io_size >= zio_gang_bang && (lbolt & 0x3) == 0) { |
|
1211 |
zio_write_allocate_gang_members(zio); |
|
1212 |
return; |
|
1213 |
} |
|
1214 |
||
1215 |
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); |
|
1216 |
||
1217 |
error = metaslab_alloc(zio->io_spa, zio->io_size, dva, zio->io_txg); |
|
1218 |
||
1219 |
if (error == 0) { |
|
1220 |
bp->blk_birth = zio->io_txg; |
|
1221 |
} else if (error == ENOSPC) { |
|
1222 |
if (zio->io_size == SPA_MINBLOCKSIZE) |
|
1223 |
panic("really, truly out of space"); |
|
1224 |
zio_write_allocate_gang_members(zio); |
|
1225 |
return; |
|
1226 |
} else { |
|
1227 |
zio->io_error = error; |
|
1228 |
} |
|
1229 |
zio_next_stage(zio); |
|
1230 |
} |
|
1231 |
||
1232 |
static void |
|
1233 |
zio_dva_free(zio_t *zio) |
|
1234 |
{ |
|
1235 |
blkptr_t *bp = zio->io_bp; |
|
1236 |
dva_t *dva = ZIO_GET_DVA(zio); |
|
1237 |
||
1238 |
ASSERT(!BP_IS_HOLE(bp)); |
|
1239 |
||
1732 | 1240 |
metaslab_free(zio->io_spa, dva, zio->io_txg, B_FALSE); |
789 | 1241 |
|
1242 |
BP_ZERO(bp); |
|
1243 |
||
1244 |
zio_next_stage(zio); |
|
1245 |
} |
|
1246 |
||
1247 |
static void |
|
1248 |
zio_dva_claim(zio_t *zio) |
|
1249 |
{ |
|
1250 |
blkptr_t *bp = zio->io_bp; |
|
1251 |
dva_t *dva = ZIO_GET_DVA(zio); |
|
1252 |
||
1253 |
ASSERT(!BP_IS_HOLE(bp)); |
|
1254 |
||
1255 |
zio->io_error = metaslab_claim(zio->io_spa, dva, zio->io_txg); |
|
1256 |
||
1257 |
zio_next_stage(zio); |
|
1258 |
} |
|
1259 |
||
1260 |
static void |
|
1261 |
zio_dva_translate(zio_t *zio) |
|
1262 |
{ |
|
1263 |
spa_t *spa = zio->io_spa; |
|
1264 |
dva_t *dva = ZIO_GET_DVA(zio); |
|
1265 |
uint64_t vdev = DVA_GET_VDEV(dva); |
|
1266 |
uint64_t offset = DVA_GET_OFFSET(dva); |
|
1267 |
||
1268 |
ASSERT3U(zio->io_size, ==, ZIO_GET_IOSIZE(zio)); |
|
1269 |
||
1270 |
zio->io_offset = offset; |
|
1271 |
||
1272 |
if ((zio->io_vd = vdev_lookup_top(spa, vdev)) == NULL) |
|
1273 |
zio->io_error = ENXIO; |
|
1274 |
else if (offset + zio->io_size > zio->io_vd->vdev_asize) |
|
1275 |
zio->io_error = EOVERFLOW; |
|
1276 |
||
1277 |
zio_next_stage(zio); |
|
1278 |
} |
|
1279 |
||
1280 |
/* |
|
1281 |
* ========================================================================== |
|
1282 |
* Read and write to physical devices |
|
1283 |
* ========================================================================== |
|
1284 |
*/ |
|
1285 |
||
1286 |
static void |
|
1287 |
zio_vdev_io_setup(zio_t *zio) |
|
1288 |
{ |
|
1289 |
vdev_t *vd = zio->io_vd; |
|
1732 | 1290 |
vdev_t *tvd = vd->vdev_top; |
1291 |
uint64_t align = 1ULL << tvd->vdev_ashift; |
|
789 | 1292 |
|
1293 |
/* XXPOLICY */ |
|
1732 | 1294 |
if (zio->io_retries == 0 && vd == tvd) |
789 | 1295 |
zio->io_flags |= ZIO_FLAG_FAILFAST; |
1296 |
||
1297 |
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && vd->vdev_children == 0) { |
|
1298 |
zio->io_flags |= ZIO_FLAG_PHYSICAL; |
|
1299 |
zio->io_offset += VDEV_LABEL_START_SIZE; |
|
1300 |
} |
|
1301 |
||
1732 | 1302 |
if (P2PHASE(zio->io_size, align) != 0) { |
1303 |
uint64_t asize = P2ROUNDUP(zio->io_size, align); |
|
1304 |
char *abuf = zio_buf_alloc(asize); |
|
1305 |
ASSERT(vd == tvd); |
|
1306 |
if (zio->io_type == ZIO_TYPE_WRITE) { |
|
1307 |
bcopy(zio->io_data, abuf, zio->io_size); |
|
1308 |
bzero(abuf + zio->io_size, asize - zio->io_size); |
|
1309 |
} |
|
1310 |
zio_push_transform(zio, abuf, asize, asize); |
|
1311 |
ASSERT(!(zio->io_flags & ZIO_FLAG_SUBBLOCK)); |
|
1312 |
zio->io_flags |= ZIO_FLAG_SUBBLOCK; |
|
1313 |
} |
|
1314 |
||
789 | 1315 |
zio_next_stage(zio); |
1316 |
} |
|
1317 |
||
1318 |
static void |
|
1319 |
zio_vdev_io_start(zio_t *zio) |
|
1320 |
{ |
|
1321 |
blkptr_t *bp = zio->io_bp; |
|
1732 | 1322 |
uint64_t align = 1ULL << zio->io_vd->vdev_top->vdev_ashift; |
789 | 1323 |
|
1732 | 1324 |
ASSERT(P2PHASE(zio->io_offset, align) == 0); |
1325 |
ASSERT(P2PHASE(zio->io_size, align) == 0); |
|
1326 |
ASSERT(bp == NULL || |
|
1327 |
P2ROUNDUP(ZIO_GET_IOSIZE(zio), align) == zio->io_size); |
|
789 | 1328 |
ASSERT(zio->io_type != ZIO_TYPE_WRITE || (spa_mode & FWRITE)); |
1329 |
||
1330 |
vdev_io_start(zio); |
|
1331 |
||
1332 |
/* zio_next_stage_async() gets called from io completion interrupt */ |
|
1333 |
} |
|
1334 |
||
1335 |
static void |
|
1336 |
zio_vdev_io_done(zio_t *zio) |
|
1337 |
{ |
|
1338 |
vdev_io_done(zio); |
|
1339 |
} |
|
1340 |
||
1341 |
/* XXPOLICY */ |
|
1544 | 1342 |
boolean_t |
789 | 1343 |
zio_should_retry(zio_t *zio) |
1344 |
{ |
|
1345 |
vdev_t *vd = zio->io_vd; |
|
1346 |
||
1347 |
if (zio->io_error == 0) |
|
1348 |
return (B_FALSE); |
|
1349 |
if (zio->io_delegate_list != NULL) |
|
1350 |
return (B_FALSE); |
|
1351 |
if (vd != vd->vdev_top) |
|
1352 |
return (B_FALSE); |
|
1353 |
if (zio->io_flags & ZIO_FLAG_DONT_RETRY) |
|
1354 |
return (B_FALSE); |
|
1544 | 1355 |
if (zio->io_retries > 0) |
789 | 1356 |
return (B_FALSE); |
1357 |
||
1358 |
return (B_TRUE); |
|
1359 |
} |
|
1360 |
||
1361 |
static void |
|
1362 |
zio_vdev_io_assess(zio_t *zio) |
|
1363 |
{ |
|
1364 |
vdev_t *vd = zio->io_vd; |
|
1365 |
vdev_t *tvd = vd->vdev_top; |
|
1366 |
||
1544 | 1367 |
ASSERT(zio->io_vsd == NULL); |
789 | 1368 |
|
1732 | 1369 |
if (zio->io_flags & ZIO_FLAG_SUBBLOCK) { |
1370 |
void *abuf; |
|
1371 |
uint64_t asize; |
|
1372 |
ASSERT(vd == tvd); |
|
1373 |
zio_pop_transform(zio, &abuf, &asize, &asize); |
|
1374 |
if (zio->io_type == ZIO_TYPE_READ) |
|
1375 |
bcopy(abuf, zio->io_data, zio->io_size); |
|
1376 |
zio_buf_free(abuf, asize); |
|
1377 |
zio->io_flags &= ~ZIO_FLAG_SUBBLOCK; |
|
1378 |
} |
|
1379 |
||
1544 | 1380 |
if (zio_injection_enabled && !zio->io_error) |
1381 |
zio->io_error = zio_handle_fault_injection(zio, EIO); |
|
789 | 1382 |
|
1383 |
/* |
|
1384 |
* If the I/O failed, determine whether we should attempt to retry it. |
|
1385 |
*/ |
|
1386 |
/* XXPOLICY */ |
|
1387 |
if (zio_should_retry(zio)) { |
|
1388 |
ASSERT(tvd == vd); |
|
1389 |
ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)); |
|
1390 |
||
1391 |
zio->io_retries++; |
|
1392 |
zio->io_error = 0; |
|
1393 |
zio->io_flags &= ZIO_FLAG_VDEV_INHERIT; |
|
1394 |
/* XXPOLICY */ |
|
1395 |
zio->io_flags &= ~ZIO_FLAG_FAILFAST; |
|
1396 |
zio->io_flags |= ZIO_FLAG_DONT_CACHE; |
|
1397 |
zio->io_stage = ZIO_STAGE_VDEV_IO_SETUP - 1; |
|
1398 |
||
1399 |
dprintf("retry #%d for %s to %s offset %llx\n", |
|
1400 |
zio->io_retries, zio_type_name[zio->io_type], |
|
1401 |
vdev_description(vd), zio->io_offset); |
|
1402 |
||
1544 | 1403 |
zio_next_stage_async(zio); |
1404 |
return; |
|
1405 |
} |
|
789 | 1406 |
|
1544 | 1407 |
if (zio->io_error != 0 && !(zio->io_flags & ZIO_FLAG_SPECULATIVE) && |
1408 |
zio->io_error != ECKSUM) { |
|
789 | 1409 |
/* |
1544 | 1410 |
* Poor man's hotplug support. Even if we're done retrying this |
1411 |
* I/O, try to reopen the vdev to see if it's still attached. |
|
1412 |
* To avoid excessive thrashing, we only try it once a minute. |
|
1413 |
* This also has the effect of detecting when missing devices |
|
1414 |
* have come back, by polling the device once a minute. |
|
1415 |
* |
|
1416 |
* We need to do this asynchronously because we can't grab |
|
1417 |
* all the necessary locks way down here. |
|
789 | 1418 |
*/ |
1544 | 1419 |
if (gethrtime() - vd->vdev_last_try > 60ULL * NANOSEC) { |
1420 |
vd->vdev_last_try = gethrtime(); |
|
1421 |
tvd->vdev_reopen_wanted = 1; |
|
1422 |
spa_async_request(vd->vdev_spa, SPA_ASYNC_REOPEN); |
|
1423 |
} |
|
789 | 1424 |
} |
1425 |
||
1426 |
zio_next_stage(zio); |
|
1427 |
} |
|
1428 |
||
1429 |
void |
|
1430 |
zio_vdev_io_reissue(zio_t *zio) |
|
1431 |
{ |
|
1432 |
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); |
|
1433 |
ASSERT(zio->io_error == 0); |
|
1434 |
||
1435 |
zio->io_stage--; |
|
1436 |
} |
|
1437 |
||
1438 |
void |
|
1439 |
zio_vdev_io_redone(zio_t *zio) |
|
1440 |
{ |
|
1441 |
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); |
|
1442 |
||
1443 |
zio->io_stage--; |
|
1444 |
} |
|
1445 |
||
1446 |
void |
|
1447 |
zio_vdev_io_bypass(zio_t *zio) |
|
1448 |
{ |
|
1449 |
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); |
|
1450 |
ASSERT(zio->io_error == 0); |
|
1451 |
||
1452 |
zio->io_flags |= ZIO_FLAG_IO_BYPASS; |
|
1453 |
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS - 1; |
|
1454 |
} |
|
1455 |
||
1456 |
/* |
|
1457 |
* ========================================================================== |
|
1458 |
* Generate and verify checksums |
|
1459 |
* ========================================================================== |
|
1460 |
*/ |
|
1461 |
static void |
|
1462 |
zio_checksum_generate(zio_t *zio) |
|
1463 |
{ |
|
1464 |
int checksum = zio->io_checksum; |
|
1465 |
blkptr_t *bp = zio->io_bp; |
|
1466 |
||
1467 |
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); |
|
1468 |
||
1469 |
BP_SET_CHECKSUM(bp, checksum); |
|
1470 |
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); |
|
1471 |
||
1472 |
zio_checksum(checksum, &bp->blk_cksum, zio->io_data, zio->io_size); |
|
1473 |
||
1474 |
zio_next_stage(zio); |
|
1475 |
} |
|
1476 |
||
1477 |
static void |
|
1478 |
zio_gang_checksum_generate(zio_t *zio) |
|
1479 |
{ |
|
1480 |
zio_cksum_t zc; |
|
1481 |
zio_gbh_phys_t *gbh = zio->io_data; |
|
1482 |
||
1483 |
ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE); |
|
1484 |
ASSERT(DVA_GET_GANG(ZIO_GET_DVA(zio))); |
|
1485 |
||
1486 |
zio_set_gang_verifier(zio, &gbh->zg_tail.zbt_cksum); |
|
1487 |
||
1488 |
zio_checksum(ZIO_CHECKSUM_GANG_HEADER, &zc, zio->io_data, zio->io_size); |
|
1489 |
||
1490 |
zio_next_stage(zio); |
|
1491 |
} |
|
1492 |
||
1493 |
static void |
|
1494 |
zio_checksum_verify(zio_t *zio) |
|
1495 |
{ |
|
1496 |
if (zio->io_bp != NULL) { |
|
1497 |
zio->io_error = zio_checksum_error(zio); |
|
1544 | 1498 |
if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) |
1499 |
zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM, |
|
1500 |
zio->io_spa, zio->io_vd, zio, 0, 0); |
|
789 | 1501 |
} |
1502 |
||
1503 |
zio_next_stage(zio); |
|
1504 |
} |
|
1505 |
||
1506 |
/* |
|
1507 |
* Called by RAID-Z to ensure we don't compute the checksum twice. |
|
1508 |
*/ |
|
1509 |
void |
|
1510 |
zio_checksum_verified(zio_t *zio) |
|
1511 |
{ |
|
1512 |
zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); |
|
1513 |
} |
|
1514 |
||
1515 |
/* |
|
1516 |
* Set the external verifier for a gang block based on stuff in the bp |
|
1517 |
*/ |
|
1518 |
void |
|
1519 |
zio_set_gang_verifier(zio_t *zio, zio_cksum_t *zcp) |
|
1520 |
{ |
|
1521 |
zcp->zc_word[0] = DVA_GET_VDEV(ZIO_GET_DVA(zio)); |
|
1522 |
zcp->zc_word[1] = DVA_GET_OFFSET(ZIO_GET_DVA(zio)); |
|
1523 |
zcp->zc_word[2] = zio->io_bp->blk_birth; |
|
1524 |
zcp->zc_word[3] = 0; |
|
1525 |
} |
|
1526 |
||
1527 |
/* |
|
1528 |
* ========================================================================== |
|
1529 |
* Define the pipeline |
|
1530 |
* ========================================================================== |
|
1531 |
*/ |
|
1532 |
typedef void zio_pipe_stage_t(zio_t *zio); |
|
1533 |
||
1534 |
static void |
|
1535 |
zio_badop(zio_t *zio) |
|
1536 |
{ |
|
1537 |
panic("Invalid I/O pipeline stage %u for zio %p", zio->io_stage, zio); |
|
1538 |
} |
|
1539 |
||
1540 |
zio_pipe_stage_t *zio_pipeline[ZIO_STAGE_DONE + 2] = { |
|
1541 |
zio_badop, |
|
1542 |
zio_wait_children_ready, |
|
1543 |
zio_write_compress, |
|
1544 |
zio_checksum_generate, |
|
1545 |
zio_gang_pipeline, |
|
1546 |
zio_get_gang_header, |
|
1547 |
zio_rewrite_gang_members, |
|
1548 |
zio_free_gang_members, |
|
1549 |
zio_claim_gang_members, |
|
1550 |
zio_dva_allocate, |
|
1551 |
zio_dva_free, |
|
1552 |
zio_dva_claim, |
|
1553 |
zio_gang_checksum_generate, |
|
1554 |
zio_ready, |
|
1555 |
zio_dva_translate, |
|
1556 |
zio_vdev_io_setup, |
|
1557 |
zio_vdev_io_start, |
|
1558 |
zio_vdev_io_done, |
|
1559 |
zio_vdev_io_assess, |
|
1560 |
zio_wait_children_done, |
|
1561 |
zio_checksum_verify, |
|
1562 |
zio_read_gang_members, |
|
1563 |
zio_read_decompress, |
|
1564 |
zio_done, |
|
1565 |
zio_badop |
|
1566 |
}; |
|
1567 |
||
1568 |
/* |
|
1569 |
* Move an I/O to the next stage of the pipeline and execute that stage. |
|
1570 |
* There's no locking on io_stage because there's no legitimate way for |
|
1571 |
* multiple threads to be attempting to process the same I/O. |
|
1572 |
*/ |
|
1573 |
void |
|
1574 |
zio_next_stage(zio_t *zio) |
|
1575 |
{ |
|
1576 |
uint32_t pipeline = zio->io_pipeline; |
|
1577 |
||
1578 |
ASSERT(!MUTEX_HELD(&zio->io_lock)); |
|
1579 |
||
1580 |
if (zio->io_error) { |
|
1581 |
dprintf("zio %p vdev %s offset %llx stage %d error %d\n", |
|
1582 |
zio, vdev_description(zio->io_vd), |
|
1583 |
zio->io_offset, zio->io_stage, zio->io_error); |
|
1584 |
if (((1U << zio->io_stage) & ZIO_VDEV_IO_PIPELINE) == 0) |
|
1585 |
pipeline &= ZIO_ERROR_PIPELINE_MASK; |
|
1586 |
} |
|
1587 |
||
1588 |
while (((1U << ++zio->io_stage) & pipeline) == 0) |
|
1589 |
continue; |
|
1590 |
||
1591 |
ASSERT(zio->io_stage <= ZIO_STAGE_DONE); |
|
1592 |
ASSERT(zio->io_stalled == 0); |
|
1593 |
||
1594 |
zio_pipeline[zio->io_stage](zio); |
|
1595 |
} |
|
1596 |
||
1597 |
void |
|
1598 |
zio_next_stage_async(zio_t *zio) |
|
1599 |
{ |
|
1600 |
taskq_t *tq; |
|
1601 |
uint32_t pipeline = zio->io_pipeline; |
|
1602 |
||
1603 |
ASSERT(!MUTEX_HELD(&zio->io_lock)); |
|
1604 |
||
1605 |
if (zio->io_error) { |
|
1606 |
dprintf("zio %p vdev %s offset %llx stage %d error %d\n", |
|
1607 |
zio, vdev_description(zio->io_vd), |
|
1608 |
zio->io_offset, zio->io_stage, zio->io_error); |
|
1609 |
if (((1U << zio->io_stage) & ZIO_VDEV_IO_PIPELINE) == 0) |
|
1610 |
pipeline &= ZIO_ERROR_PIPELINE_MASK; |
|
1611 |
} |
|
1612 |
||
1613 |
while (((1U << ++zio->io_stage) & pipeline) == 0) |
|
1614 |
continue; |
|
1615 |
||
1616 |
ASSERT(zio->io_stage <= ZIO_STAGE_DONE); |
|
1617 |
ASSERT(zio->io_stalled == 0); |
|
1618 |
||
1619 |
/* |
|
1620 |
* For performance, we'll probably want two sets of task queues: |
|
1621 |
* per-CPU issue taskqs and per-CPU completion taskqs. The per-CPU |
|
1622 |
* part is for read performance: since we have to make a pass over |
|
1623 |
* the data to checksum it anyway, we want to do this on the same CPU |
|
1624 |
* that issued the read, because (assuming CPU scheduling affinity) |
|
1625 |
* that thread is probably still there. Getting this optimization |
|
1626 |
* right avoids performance-hostile cache-to-cache transfers. |
|
1627 |
* |
|
1628 |
* Note that having two sets of task queues is also necessary for |
|
1629 |
* correctness: if all of the issue threads get bogged down waiting |
|
1630 |
* for dependent reads (e.g. metaslab freelist) to complete, then |
|
1631 |
* there won't be any threads available to service I/O completion |
|
1632 |
* interrupts. |
|
1633 |
*/ |
|
1634 |
if ((1U << zio->io_stage) & zio->io_async_stages) { |
|
1635 |
if (zio->io_stage < ZIO_STAGE_VDEV_IO_DONE) |
|
1636 |
tq = zio->io_spa->spa_zio_issue_taskq[zio->io_type]; |
|
1637 |
else |
|
1638 |
tq = zio->io_spa->spa_zio_intr_taskq[zio->io_type]; |
|
1639 |
(void) taskq_dispatch(tq, |
|
1640 |
(task_func_t *)zio_pipeline[zio->io_stage], zio, TQ_SLEEP); |
|
1641 |
} else { |
|
1642 |
zio_pipeline[zio->io_stage](zio); |
|
1643 |
} |
|
1644 |
} |
|
1645 |
||
1646 |
/* |
|
1647 |
* Try to allocate an intent log block. Return 0 on success, errno on failure. |
|
1648 |
*/ |
|
1649 |
int |
|
1650 |
zio_alloc_blk(spa_t *spa, int checksum, uint64_t size, blkptr_t *bp, |
|
1651 |
uint64_t txg) |
|
1652 |
{ |
|
1653 |
int error; |
|
1654 |
||
1544 | 1655 |
spa_config_enter(spa, RW_READER, FTAG); |
789 | 1656 |
|
1657 |
BP_ZERO(bp); |
|
1658 |
||
1659 |
error = metaslab_alloc(spa, size, BP_IDENTITY(bp), txg); |
|
1660 |
||
1661 |
if (error == 0) { |
|
1662 |
BP_SET_CHECKSUM(bp, checksum); |
|
1663 |
BP_SET_LSIZE(bp, size); |
|
1664 |
BP_SET_PSIZE(bp, size); |
|
1665 |
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); |
|
1666 |
BP_SET_TYPE(bp, DMU_OT_INTENT_LOG); |
|
1667 |
BP_SET_LEVEL(bp, 0); |
|
1668 |
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); |
|
1669 |
bp->blk_birth = txg; |
|
1670 |
} |
|
1671 |
||
1544 | 1672 |
spa_config_exit(spa, FTAG); |
789 | 1673 |
|
1674 |
return (error); |
|
1675 |
} |
|
1676 |
||
1677 |
/* |
|
1678 |
* Free an intent log block. We know it can't be a gang block, so there's |
|
1679 |
* nothing to do except metaslab_free() it. |
|
1680 |
*/ |
|
1681 |
void |
|
1682 |
zio_free_blk(spa_t *spa, blkptr_t *bp, uint64_t txg) |
|
1683 |
{ |
|
1684 |
ASSERT(DVA_GET_GANG(BP_IDENTITY(bp)) == 0); |
|
1685 |
||
1686 |
dprintf_bp(bp, "txg %llu: ", txg); |
|
1687 |
||
1544 | 1688 |
spa_config_enter(spa, RW_READER, FTAG); |
789 | 1689 |
|
1732 | 1690 |
metaslab_free(spa, BP_IDENTITY(bp), txg, B_FALSE); |
789 | 1691 |
|
1544 | 1692 |
spa_config_exit(spa, FTAG); |
789 | 1693 |
} |