author | maybee |
Tue, 18 Jul 2006 18:09:14 -0700 | |
changeset 2391 | 2fa3fd1db808 |
parent 2237 | 45affe88ed99 |
child 2417 | 694d5de97348 |
permissions | -rw-r--r-- |
789 | 1 |
/* |
2 |
* CDDL HEADER START |
|
3 |
* |
|
4 |
* The contents of this file are subject to the terms of the |
|
1491
bdcb30e07e7d
6389368 fat zap should use 16k blocks (with backwards compatability)
ahrens
parents:
1199
diff
changeset
|
5 |
* Common Development and Distribution License (the "License"). |
bdcb30e07e7d
6389368 fat zap should use 16k blocks (with backwards compatability)
ahrens
parents:
1199
diff
changeset
|
6 |
* You may not use this file except in compliance with the License. |
789 | 7 |
* |
8 |
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
|
9 |
* or http://www.opensolaris.org/os/licensing. |
|
10 |
* See the License for the specific language governing permissions |
|
11 |
* and limitations under the License. |
|
12 |
* |
|
13 |
* When distributing Covered Code, include this CDDL HEADER in each |
|
14 |
* file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
|
15 |
* If applicable, add the following below this CDDL HEADER, with the |
|
16 |
* fields enclosed by brackets "[]" replaced with your own identifying |
|
17 |
* information: Portions Copyright [yyyy] [name of copyright owner] |
|
18 |
* |
|
19 |
* CDDL HEADER END |
|
20 |
*/ |
|
21 |
/* |
|
1199 | 22 |
* Copyright 2006 Sun Microsystems, Inc. All rights reserved. |
789 | 23 |
* Use is subject to license terms. |
24 |
*/ |
|
25 |
||
26 |
#pragma ident "%Z%%M% %I% %E% SMI" |
|
27 |
||
28 |
#include <sys/zfs_context.h> |
|
29 |
#include <sys/dmu.h> |
|
30 |
#include <sys/dmu_impl.h> |
|
31 |
#include <sys/dbuf.h> |
|
32 |
#include <sys/dmu_objset.h> |
|
33 |
#include <sys/dsl_dataset.h> |
|
34 |
#include <sys/dsl_dir.h> |
|
35 |
#include <sys/dmu_tx.h> |
|
36 |
#include <sys/spa.h> |
|
37 |
#include <sys/zio.h> |
|
38 |
#include <sys/dmu_zfetch.h> |
|
39 |
||
40 |
static void dbuf_destroy(dmu_buf_impl_t *db); |
|
41 |
static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); |
|
42 |
static arc_done_func_t dbuf_write_done; |
|
43 |
||
44 |
/* |
|
45 |
* Global data structures and functions for the dbuf cache. |
|
46 |
*/ |
|
47 |
taskq_t *dbuf_tq; |
|
48 |
static kmem_cache_t *dbuf_cache; |
|
49 |
||
50 |
/* ARGSUSED */ |
|
51 |
static int |
|
52 |
dbuf_cons(void *vdb, void *unused, int kmflag) |
|
53 |
{ |
|
54 |
dmu_buf_impl_t *db = vdb; |
|
55 |
bzero(db, sizeof (dmu_buf_impl_t)); |
|
56 |
||
57 |
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); |
|
58 |
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); |
|
59 |
refcount_create(&db->db_holds); |
|
60 |
return (0); |
|
61 |
} |
|
62 |
||
63 |
/* ARGSUSED */ |
|
64 |
static void |
|
65 |
dbuf_dest(void *vdb, void *unused) |
|
66 |
{ |
|
67 |
dmu_buf_impl_t *db = vdb; |
|
68 |
mutex_destroy(&db->db_mtx); |
|
69 |
cv_destroy(&db->db_changed); |
|
70 |
refcount_destroy(&db->db_holds); |
|
71 |
} |
|
72 |
||
73 |
/* |
|
74 |
* dbuf hash table routines |
|
75 |
*/ |
|
76 |
static dbuf_hash_table_t dbuf_hash_table; |
|
77 |
||
78 |
static uint64_t dbuf_hash_count; |
|
79 |
||
80 |
static uint64_t |
|
81 |
dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) |
|
82 |
{ |
|
83 |
uintptr_t osv = (uintptr_t)os; |
|
84 |
uint64_t crc = -1ULL; |
|
85 |
||
86 |
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); |
|
87 |
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; |
|
88 |
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; |
|
89 |
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; |
|
90 |
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; |
|
91 |
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; |
|
92 |
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; |
|
93 |
||
94 |
crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); |
|
95 |
||
96 |
return (crc); |
|
97 |
} |
|
98 |
||
99 |
#define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid); |
|
100 |
||
101 |
#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ |
|
102 |
((dbuf)->db.db_object == (obj) && \ |
|
103 |
(dbuf)->db_objset == (os) && \ |
|
104 |
(dbuf)->db_level == (level) && \ |
|
105 |
(dbuf)->db_blkid == (blkid)) |
|
106 |
||
107 |
dmu_buf_impl_t * |
|
108 |
dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) |
|
109 |
{ |
|
110 |
dbuf_hash_table_t *h = &dbuf_hash_table; |
|
111 |
objset_impl_t *os = dn->dn_objset; |
|
112 |
uint64_t obj = dn->dn_object; |
|
113 |
uint64_t hv = DBUF_HASH(os, obj, level, blkid); |
|
114 |
uint64_t idx = hv & h->hash_table_mask; |
|
115 |
dmu_buf_impl_t *db; |
|
116 |
||
117 |
mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
|
118 |
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { |
|
119 |
if (DBUF_EQUAL(db, os, obj, level, blkid)) { |
|
120 |
mutex_enter(&db->db_mtx); |
|
1544 | 121 |
if (db->db_state != DB_EVICTING) { |
789 | 122 |
mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
123 |
return (db); |
|
124 |
} |
|
125 |
mutex_exit(&db->db_mtx); |
|
126 |
} |
|
127 |
} |
|
128 |
mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
|
129 |
return (NULL); |
|
130 |
} |
|
131 |
||
132 |
/* |
|
133 |
* Insert an entry into the hash table. If there is already an element |
|
134 |
* equal to elem in the hash table, then the already existing element |
|
135 |
* will be returned and the new element will not be inserted. |
|
136 |
* Otherwise returns NULL. |
|
137 |
*/ |
|
138 |
static dmu_buf_impl_t * |
|
139 |
dbuf_hash_insert(dmu_buf_impl_t *db) |
|
140 |
{ |
|
141 |
dbuf_hash_table_t *h = &dbuf_hash_table; |
|
142 |
objset_impl_t *os = db->db_objset; |
|
143 |
uint64_t obj = db->db.db_object; |
|
144 |
int level = db->db_level; |
|
145 |
uint64_t blkid = db->db_blkid; |
|
146 |
uint64_t hv = DBUF_HASH(os, obj, level, blkid); |
|
147 |
uint64_t idx = hv & h->hash_table_mask; |
|
148 |
dmu_buf_impl_t *dbf; |
|
149 |
||
150 |
mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
|
151 |
for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { |
|
152 |
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { |
|
153 |
mutex_enter(&dbf->db_mtx); |
|
1544 | 154 |
if (dbf->db_state != DB_EVICTING) { |
789 | 155 |
mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
156 |
return (dbf); |
|
157 |
} |
|
158 |
mutex_exit(&dbf->db_mtx); |
|
159 |
} |
|
160 |
} |
|
161 |
||
162 |
mutex_enter(&db->db_mtx); |
|
163 |
db->db_hash_next = h->hash_table[idx]; |
|
164 |
h->hash_table[idx] = db; |
|
165 |
mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
|
166 |
atomic_add_64(&dbuf_hash_count, 1); |
|
167 |
||
168 |
return (NULL); |
|
169 |
} |
|
170 |
||
171 |
/* |
|
172 |
* Remove an entry from the hash table. This operation will |
|
173 |
* fail if there are any existing holds on the db. |
|
174 |
*/ |
|
175 |
static void |
|
176 |
dbuf_hash_remove(dmu_buf_impl_t *db) |
|
177 |
{ |
|
178 |
dbuf_hash_table_t *h = &dbuf_hash_table; |
|
179 |
uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, |
|
180 |
db->db_level, db->db_blkid); |
|
181 |
uint64_t idx = hv & h->hash_table_mask; |
|
182 |
dmu_buf_impl_t *dbf, **dbp; |
|
183 |
||
184 |
/* |
|
185 |
* We musn't hold db_mtx to maintin lock ordering: |
|
186 |
* DBUF_HASH_MUTEX > db_mtx. |
|
187 |
*/ |
|
188 |
ASSERT(refcount_is_zero(&db->db_holds)); |
|
1544 | 189 |
ASSERT(db->db_state == DB_EVICTING); |
789 | 190 |
ASSERT(!MUTEX_HELD(&db->db_mtx)); |
191 |
||
192 |
mutex_enter(DBUF_HASH_MUTEX(h, idx)); |
|
193 |
dbp = &h->hash_table[idx]; |
|
194 |
while ((dbf = *dbp) != db) { |
|
195 |
dbp = &dbf->db_hash_next; |
|
196 |
ASSERT(dbf != NULL); |
|
197 |
} |
|
198 |
*dbp = db->db_hash_next; |
|
199 |
db->db_hash_next = NULL; |
|
200 |
mutex_exit(DBUF_HASH_MUTEX(h, idx)); |
|
201 |
atomic_add_64(&dbuf_hash_count, -1); |
|
202 |
} |
|
203 |
||
1544 | 204 |
static arc_evict_func_t dbuf_do_evict; |
789 | 205 |
|
206 |
static void |
|
207 |
dbuf_evict_user(dmu_buf_impl_t *db) |
|
208 |
{ |
|
209 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
|
210 |
||
211 |
if (db->db_level != 0 || db->db_d.db_evict_func == NULL) |
|
212 |
return; |
|
213 |
||
214 |
if (db->db_d.db_user_data_ptr_ptr) |
|
215 |
*db->db_d.db_user_data_ptr_ptr = db->db.db_data; |
|
216 |
db->db_d.db_evict_func(&db->db, db->db_d.db_user_ptr); |
|
217 |
db->db_d.db_user_ptr = NULL; |
|
218 |
db->db_d.db_user_data_ptr_ptr = NULL; |
|
219 |
db->db_d.db_evict_func = NULL; |
|
220 |
} |
|
221 |
||
222 |
void |
|
1544 | 223 |
dbuf_evict(dmu_buf_impl_t *db) |
224 |
{ |
|
225 |
int i; |
|
226 |
||
227 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
|
228 |
ASSERT(db->db_buf == NULL); |
|
229 |
||
230 |
#ifdef ZFS_DEBUG |
|
231 |
for (i = 0; i < TXG_SIZE; i++) { |
|
232 |
ASSERT(!list_link_active(&db->db_dirty_node[i])); |
|
233 |
ASSERT(db->db_level != 0 || db->db_d.db_data_old[i] == NULL); |
|
234 |
} |
|
235 |
#endif |
|
236 |
dbuf_clear(db); |
|
237 |
dbuf_destroy(db); |
|
238 |
} |
|
239 |
||
240 |
void |
|
789 | 241 |
dbuf_init(void) |
242 |
{ |
|
1544 | 243 |
uint64_t hsize = 1ULL << 16; |
789 | 244 |
dbuf_hash_table_t *h = &dbuf_hash_table; |
245 |
int i; |
|
246 |
||
247 |
/* |
|
248 |
* The hash table is big enough to fill all of physical memory |
|
1544 | 249 |
* with an average 4K block size. The table will take up |
250 |
* totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). |
|
789 | 251 |
*/ |
1544 | 252 |
while (hsize * 4096 < physmem * PAGESIZE) |
789 | 253 |
hsize <<= 1; |
254 |
||
1544 | 255 |
retry: |
789 | 256 |
h->hash_table_mask = hsize - 1; |
1544 | 257 |
h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); |
258 |
if (h->hash_table == NULL) { |
|
259 |
/* XXX - we should really return an error instead of assert */ |
|
260 |
ASSERT(hsize > (1ULL << 10)); |
|
261 |
hsize >>= 1; |
|
262 |
goto retry; |
|
263 |
} |
|
789 | 264 |
|
265 |
dbuf_cache = kmem_cache_create("dmu_buf_impl_t", |
|
266 |
sizeof (dmu_buf_impl_t), |
|
267 |
0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); |
|
268 |
dbuf_tq = taskq_create("dbuf_tq", 8, maxclsyspri, 50, INT_MAX, |
|
269 |
TASKQ_PREPOPULATE); |
|
270 |
||
271 |
for (i = 0; i < DBUF_MUTEXES; i++) |
|
272 |
mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); |
|
273 |
} |
|
274 |
||
275 |
void |
|
276 |
dbuf_fini(void) |
|
277 |
{ |
|
278 |
dbuf_hash_table_t *h = &dbuf_hash_table; |
|
279 |
int i; |
|
280 |
||
281 |
taskq_destroy(dbuf_tq); |
|
282 |
dbuf_tq = NULL; |
|
283 |
||
284 |
for (i = 0; i < DBUF_MUTEXES; i++) |
|
285 |
mutex_destroy(&h->hash_mutexes[i]); |
|
286 |
kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); |
|
287 |
kmem_cache_destroy(dbuf_cache); |
|
288 |
} |
|
289 |
||
290 |
/* |
|
291 |
* Other stuff. |
|
292 |
*/ |
|
293 |
||
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
294 |
#ifdef ZFS_DEBUG |
789 | 295 |
static void |
296 |
dbuf_verify(dmu_buf_impl_t *db) |
|
297 |
{ |
|
298 |
int i; |
|
299 |
dnode_t *dn = db->db_dnode; |
|
300 |
||
301 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
|
302 |
||
303 |
if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) |
|
304 |
return; |
|
305 |
||
306 |
ASSERT(db->db_objset != NULL); |
|
307 |
if (dn == NULL) { |
|
308 |
ASSERT(db->db_parent == NULL); |
|
309 |
ASSERT(db->db_blkptr == NULL); |
|
310 |
} else { |
|
311 |
ASSERT3U(db->db.db_object, ==, dn->dn_object); |
|
312 |
ASSERT3P(db->db_objset, ==, dn->dn_objset); |
|
313 |
ASSERT3U(db->db_level, <, dn->dn_nlevels); |
|
1544 | 314 |
ASSERT(db->db_blkid == DB_BONUS_BLKID || |
315 |
list_head(&dn->dn_dbufs)); |
|
789 | 316 |
} |
317 |
if (db->db_blkid == DB_BONUS_BLKID) { |
|
318 |
ASSERT(dn != NULL); |
|
319 |
ASSERT3U(db->db.db_size, ==, dn->dn_bonuslen); |
|
320 |
ASSERT3U(db->db.db_offset, ==, DB_BONUS_BLKID); |
|
321 |
} else { |
|
322 |
ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); |
|
323 |
} |
|
324 |
||
325 |
if (db->db_level == 0) { |
|
326 |
/* we can be momentarily larger in dnode_set_blksz() */ |
|
327 |
if (db->db_blkid != DB_BONUS_BLKID && dn) { |
|
328 |
ASSERT3U(db->db.db_size, >=, dn->dn_datablksz); |
|
329 |
} |
|
1544 | 330 |
if (db->db.db_object == DMU_META_DNODE_OBJECT) { |
789 | 331 |
for (i = 0; i < TXG_SIZE; i++) { |
332 |
/* |
|
333 |
* it should only be modified in syncing |
|
334 |
* context, so make sure we only have |
|
335 |
* one copy of the data. |
|
336 |
*/ |
|
337 |
ASSERT(db->db_d.db_data_old[i] == NULL || |
|
338 |
db->db_d.db_data_old[i] == db->db_buf); |
|
339 |
} |
|
340 |
} |
|
341 |
} |
|
342 |
||
343 |
/* verify db->db_blkptr */ |
|
344 |
if (db->db_blkptr) { |
|
345 |
if (db->db_parent == dn->dn_dbuf) { |
|
346 |
/* db is pointed to by the dnode */ |
|
347 |
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ |
|
1544 | 348 |
if (db->db.db_object == DMU_META_DNODE_OBJECT) |
789 | 349 |
ASSERT(db->db_parent == NULL); |
350 |
else |
|
351 |
ASSERT(db->db_parent != NULL); |
|
352 |
ASSERT3P(db->db_blkptr, ==, |
|
353 |
&dn->dn_phys->dn_blkptr[db->db_blkid]); |
|
354 |
} else { |
|
355 |
/* db is pointed to by an indirect block */ |
|
356 |
int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; |
|
357 |
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); |
|
358 |
ASSERT3U(db->db_parent->db.db_object, ==, |
|
359 |
db->db.db_object); |
|
360 |
/* |
|
361 |
* dnode_grow_indblksz() can make this fail if we don't |
|
362 |
* have the struct_rwlock. XXX indblksz no longer |
|
363 |
* grows. safe to do this now? |
|
364 |
*/ |
|
365 |
if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) { |
|
366 |
ASSERT3P(db->db_blkptr, ==, |
|
367 |
((blkptr_t *)db->db_parent->db.db_data + |
|
368 |
db->db_blkid % epb)); |
|
369 |
} |
|
370 |
} |
|
371 |
} |
|
372 |
if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && |
|
373 |
db->db.db_data && db->db_blkid != DB_BONUS_BLKID && |
|
374 |
db->db_state != DB_FILL && !dn->dn_free_txg) { |
|
375 |
/* |
|
376 |
* If the blkptr isn't set but they have nonzero data, |
|
377 |
* it had better be dirty, otherwise we'll lose that |
|
378 |
* data when we evict this buffer. |
|
379 |
*/ |
|
380 |
if (db->db_dirtycnt == 0) { |
|
381 |
uint64_t *buf = db->db.db_data; |
|
382 |
int i; |
|
383 |
||
384 |
for (i = 0; i < db->db.db_size >> 3; i++) { |
|
385 |
ASSERT(buf[i] == 0); |
|
386 |
} |
|
387 |
} |
|
388 |
} |
|
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
389 |
} |
789 | 390 |
#endif |
391 |
||
392 |
static void |
|
393 |
dbuf_update_data(dmu_buf_impl_t *db) |
|
394 |
{ |
|
395 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
|
396 |
if (db->db_level == 0 && db->db_d.db_user_data_ptr_ptr) { |
|
397 |
ASSERT(!refcount_is_zero(&db->db_holds)); |
|
398 |
*db->db_d.db_user_data_ptr_ptr = db->db.db_data; |
|
399 |
} |
|
400 |
} |
|
401 |
||
402 |
static void |
|
403 |
dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) |
|
404 |
{ |
|
405 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
|
1544 | 406 |
ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf)); |
789 | 407 |
db->db_buf = buf; |
1544 | 408 |
if (buf != NULL) { |
409 |
ASSERT(buf->b_data != NULL); |
|
410 |
db->db.db_data = buf->b_data; |
|
411 |
if (!arc_released(buf)) |
|
412 |
arc_set_callback(buf, dbuf_do_evict, db); |
|
413 |
dbuf_update_data(db); |
|
414 |
} else { |
|
415 |
dbuf_evict_user(db); |
|
416 |
db->db.db_data = NULL; |
|
417 |
db->db_state = DB_UNCACHED; |
|
418 |
} |
|
789 | 419 |
} |
420 |
||
421 |
uint64_t |
|
422 |
dbuf_whichblock(dnode_t *dn, uint64_t offset) |
|
423 |
{ |
|
424 |
if (dn->dn_datablkshift) { |
|
425 |
return (offset >> dn->dn_datablkshift); |
|
426 |
} else { |
|
427 |
ASSERT3U(offset, <, dn->dn_datablksz); |
|
428 |
return (0); |
|
429 |
} |
|
430 |
} |
|
431 |
||
432 |
static void |
|
433 |
dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) |
|
434 |
{ |
|
435 |
dmu_buf_impl_t *db = vdb; |
|
436 |
||
437 |
mutex_enter(&db->db_mtx); |
|
438 |
ASSERT3U(db->db_state, ==, DB_READ); |
|
439 |
/* |
|
440 |
* All reads are synchronous, so we must have a hold on the dbuf |
|
441 |
*/ |
|
442 |
ASSERT(refcount_count(&db->db_holds) > 0); |
|
1544 | 443 |
ASSERT(db->db_buf == NULL); |
789 | 444 |
ASSERT(db->db.db_data == NULL); |
445 |
if (db->db_level == 0 && db->db_d.db_freed_in_flight) { |
|
446 |
/* we were freed in flight; disregard any error */ |
|
447 |
arc_release(buf, db); |
|
448 |
bzero(buf->b_data, db->db.db_size); |
|
449 |
db->db_d.db_freed_in_flight = FALSE; |
|
450 |
dbuf_set_data(db, buf); |
|
451 |
db->db_state = DB_CACHED; |
|
452 |
} else if (zio == NULL || zio->io_error == 0) { |
|
453 |
dbuf_set_data(db, buf); |
|
454 |
db->db_state = DB_CACHED; |
|
455 |
} else { |
|
456 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
|
1544 | 457 |
ASSERT3P(db->db_buf, ==, NULL); |
458 |
VERIFY(arc_buf_remove_ref(buf, db) == 1); |
|
789 | 459 |
db->db_state = DB_UNCACHED; |
460 |
} |
|
461 |
cv_broadcast(&db->db_changed); |
|
462 |
mutex_exit(&db->db_mtx); |
|
1544 | 463 |
dbuf_rele(db, NULL); |
789 | 464 |
} |
465 |
||
1544 | 466 |
static void |
2391 | 467 |
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags) |
789 | 468 |
{ |
469 |
blkptr_t *bp; |
|
1544 | 470 |
zbookmark_t zb; |
2391 | 471 |
uint32_t aflags = ARC_NOWAIT; |
789 | 472 |
|
473 |
ASSERT(!refcount_is_zero(&db->db_holds)); |
|
474 |
/* We need the struct_rwlock to prevent db_blkptr from changing. */ |
|
475 |
ASSERT(RW_LOCK_HELD(&db->db_dnode->dn_struct_rwlock)); |
|
1544 | 476 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
477 |
ASSERT(db->db_state == DB_UNCACHED); |
|
478 |
ASSERT(db->db_buf == NULL); |
|
789 | 479 |
|
480 |
if (db->db_blkid == DB_BONUS_BLKID) { |
|
481 |
ASSERT3U(db->db_dnode->dn_bonuslen, ==, db->db.db_size); |
|
1544 | 482 |
db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); |
789 | 483 |
if (db->db.db_size < DN_MAX_BONUSLEN) |
1544 | 484 |
bzero(db->db.db_data, DN_MAX_BONUSLEN); |
485 |
bcopy(DN_BONUS(db->db_dnode->dn_phys), db->db.db_data, |
|
789 | 486 |
db->db.db_size); |
1544 | 487 |
dbuf_update_data(db); |
789 | 488 |
db->db_state = DB_CACHED; |
489 |
mutex_exit(&db->db_mtx); |
|
490 |
return; |
|
491 |
} |
|
492 |
||
493 |
if (db->db_level == 0 && dnode_block_freed(db->db_dnode, db->db_blkid)) |
|
494 |
bp = NULL; |
|
495 |
else |
|
496 |
bp = db->db_blkptr; |
|
497 |
||
498 |
if (bp == NULL) |
|
499 |
dprintf_dbuf(db, "blkptr: %s\n", "NULL"); |
|
500 |
else |
|
501 |
dprintf_dbuf_bp(db, bp, "%s", "blkptr:"); |
|
502 |
||
503 |
if (bp == NULL || BP_IS_HOLE(bp)) { |
|
504 |
ASSERT(bp == NULL || BP_IS_HOLE(bp)); |
|
505 |
dbuf_set_data(db, arc_buf_alloc(db->db_dnode->dn_objset->os_spa, |
|
506 |
db->db.db_size, db)); |
|
507 |
bzero(db->db.db_data, db->db.db_size); |
|
508 |
db->db_state = DB_CACHED; |
|
2391 | 509 |
*flags |= DB_RF_CACHED; |
789 | 510 |
mutex_exit(&db->db_mtx); |
511 |
return; |
|
512 |
} |
|
513 |
||
514 |
db->db_state = DB_READ; |
|
515 |
mutex_exit(&db->db_mtx); |
|
516 |
||
1544 | 517 |
zb.zb_objset = db->db_objset->os_dsl_dataset ? |
518 |
db->db_objset->os_dsl_dataset->ds_object : 0; |
|
519 |
zb.zb_object = db->db.db_object; |
|
520 |
zb.zb_level = db->db_level; |
|
521 |
zb.zb_blkid = db->db_blkid; |
|
522 |
||
523 |
dbuf_add_ref(db, NULL); |
|
789 | 524 |
/* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */ |
525 |
(void) arc_read(zio, db->db_dnode->dn_objset->os_spa, bp, |
|
526 |
db->db_level > 0 ? byteswap_uint64_array : |
|
527 |
dmu_ot[db->db_dnode->dn_type].ot_byteswap, |
|
528 |
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, |
|
2391 | 529 |
(*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, |
530 |
&aflags, &zb); |
|
531 |
if (aflags & ARC_CACHED) |
|
532 |
*flags |= DB_RF_CACHED; |
|
789 | 533 |
} |
534 |
||
1544 | 535 |
int |
536 |
dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) |
|
789 | 537 |
{ |
1544 | 538 |
int err = 0; |
539 |
int havepzio = (zio != NULL); |
|
2391 | 540 |
int prefetch; |
789 | 541 |
|
542 |
/* |
|
543 |
* We don't have to hold the mutex to check db_state because it |
|
544 |
* can't be freed while we have a hold on the buffer. |
|
545 |
*/ |
|
546 |
ASSERT(!refcount_is_zero(&db->db_holds)); |
|
547 |
||
1544 | 548 |
if ((flags & DB_RF_HAVESTRUCT) == 0) |
549 |
rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER); |
|
789 | 550 |
|
2391 | 551 |
prefetch = db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID && |
552 |
(flags & DB_RF_NOPREFETCH) == 0 && db->db_dnode != NULL; |
|
553 |
||
789 | 554 |
mutex_enter(&db->db_mtx); |
1544 | 555 |
if (db->db_state == DB_CACHED) { |
556 |
mutex_exit(&db->db_mtx); |
|
2391 | 557 |
if (prefetch) |
558 |
dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, |
|
559 |
db->db.db_size, TRUE); |
|
1544 | 560 |
if ((flags & DB_RF_HAVESTRUCT) == 0) |
561 |
rw_exit(&db->db_dnode->dn_struct_rwlock); |
|
562 |
} else if (db->db_state == DB_UNCACHED) { |
|
563 |
if (zio == NULL) { |
|
564 |
zio = zio_root(db->db_dnode->dn_objset->os_spa, |
|
565 |
NULL, NULL, ZIO_FLAG_CANFAIL); |
|
566 |
} |
|
2391 | 567 |
dbuf_read_impl(db, zio, &flags); |
568 |
||
1544 | 569 |
/* dbuf_read_impl has dropped db_mtx for us */ |
789 | 570 |
|
2391 | 571 |
if (prefetch) |
1544 | 572 |
dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, |
2391 | 573 |
db->db.db_size, flags & DB_RF_CACHED); |
789 | 574 |
|
1544 | 575 |
if ((flags & DB_RF_HAVESTRUCT) == 0) |
576 |
rw_exit(&db->db_dnode->dn_struct_rwlock); |
|
789 | 577 |
|
1544 | 578 |
if (!havepzio) |
579 |
err = zio_wait(zio); |
|
580 |
} else { |
|
2391 | 581 |
mutex_exit(&db->db_mtx); |
582 |
if (prefetch) |
|
583 |
dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, |
|
584 |
db->db.db_size, TRUE); |
|
1544 | 585 |
if ((flags & DB_RF_HAVESTRUCT) == 0) |
586 |
rw_exit(&db->db_dnode->dn_struct_rwlock); |
|
2391 | 587 |
|
588 |
mutex_enter(&db->db_mtx); |
|
1544 | 589 |
if ((flags & DB_RF_NEVERWAIT) == 0) { |
590 |
while (db->db_state == DB_READ || |
|
591 |
db->db_state == DB_FILL) { |
|
592 |
ASSERT(db->db_state == DB_READ || |
|
593 |
(flags & DB_RF_HAVESTRUCT) == 0); |
|
594 |
cv_wait(&db->db_changed, &db->db_mtx); |
|
595 |
} |
|
596 |
if (db->db_state == DB_UNCACHED) |
|
597 |
err = EIO; |
|
598 |
} |
|
599 |
mutex_exit(&db->db_mtx); |
|
600 |
} |
|
789 | 601 |
|
1544 | 602 |
ASSERT(err || havepzio || db->db_state == DB_CACHED); |
603 |
return (err); |
|
789 | 604 |
} |
605 |
||
606 |
static void |
|
607 |
dbuf_noread(dmu_buf_impl_t *db) |
|
608 |
{ |
|
609 |
ASSERT(!refcount_is_zero(&db->db_holds)); |
|
1544 | 610 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
789 | 611 |
mutex_enter(&db->db_mtx); |
612 |
while (db->db_state == DB_READ || db->db_state == DB_FILL) |
|
613 |
cv_wait(&db->db_changed, &db->db_mtx); |
|
614 |
if (db->db_state == DB_UNCACHED) { |
|
1544 | 615 |
ASSERT(db->db_buf == NULL); |
789 | 616 |
ASSERT(db->db.db_data == NULL); |
617 |
dbuf_set_data(db, arc_buf_alloc(db->db_dnode->dn_objset->os_spa, |
|
1544 | 618 |
db->db.db_size, db)); |
789 | 619 |
db->db_state = DB_FILL; |
620 |
} else { |
|
621 |
ASSERT3U(db->db_state, ==, DB_CACHED); |
|
622 |
} |
|
623 |
mutex_exit(&db->db_mtx); |
|
624 |
} |
|
625 |
||
626 |
/* |
|
627 |
* This is our just-in-time copy function. It makes a copy of |
|
628 |
* buffers, that have been modified in a previous transaction |
|
629 |
* group, before we modify them in the current active group. |
|
630 |
* |
|
631 |
* This function is used in two places: when we are dirtying a |
|
632 |
* buffer for the first time in a txg, and when we are freeing |
|
633 |
* a range in a dnode that includes this buffer. |
|
634 |
* |
|
635 |
* Note that when we are called from dbuf_free_range() we do |
|
636 |
* not put a hold on the buffer, we just traverse the active |
|
637 |
* dbuf list for the dnode. |
|
638 |
*/ |
|
639 |
static void |
|
640 |
dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) |
|
641 |
{ |
|
642 |
arc_buf_t **quiescing, **syncing; |
|
643 |
||
644 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
|
645 |
ASSERT(db->db.db_data != NULL); |
|
1544 | 646 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
789 | 647 |
|
1544 | 648 |
quiescing = (arc_buf_t **)&db->db_d.db_data_old[(txg-1)&TXG_MASK]; |
649 |
syncing = (arc_buf_t **)&db->db_d.db_data_old[(txg-2)&TXG_MASK]; |
|
789 | 650 |
|
651 |
/* |
|
652 |
* If this buffer is referenced from the current quiescing |
|
653 |
* transaction group: either make a copy and reset the reference |
|
654 |
* to point to the copy, or (if there a no active holders) just |
|
655 |
* null out the current db_data pointer. |
|
656 |
*/ |
|
657 |
if (*quiescing == db->db_buf) { |
|
658 |
/* |
|
659 |
* If the quiescing txg is "dirty", then we better not |
|
660 |
* be referencing the same buffer from the syncing txg. |
|
661 |
*/ |
|
662 |
ASSERT(*syncing != db->db_buf); |
|
663 |
if (refcount_count(&db->db_holds) > db->db_dirtycnt) { |
|
1544 | 664 |
int size = db->db.db_size; |
789 | 665 |
*quiescing = arc_buf_alloc( |
666 |
db->db_dnode->dn_objset->os_spa, size, db); |
|
667 |
bcopy(db->db.db_data, (*quiescing)->b_data, size); |
|
668 |
} else { |
|
1544 | 669 |
dbuf_set_data(db, NULL); |
789 | 670 |
} |
671 |
return; |
|
672 |
} |
|
673 |
||
674 |
/* |
|
675 |
* If this buffer is referenced from the current syncing |
|
676 |
* transaction group: either |
|
677 |
* 1 - make a copy and reset the reference, or |
|
678 |
* 2 - if there are no holders, just null the current db_data. |
|
679 |
*/ |
|
680 |
if (*syncing == db->db_buf) { |
|
681 |
ASSERT3P(*quiescing, ==, NULL); |
|
682 |
ASSERT3U(db->db_dirtycnt, ==, 1); |
|
683 |
if (refcount_count(&db->db_holds) > db->db_dirtycnt) { |
|
1544 | 684 |
int size = db->db.db_size; |
789 | 685 |
/* we can't copy if we have already started a write */ |
686 |
ASSERT(*syncing != db->db_data_pending); |
|
687 |
*syncing = arc_buf_alloc( |
|
688 |
db->db_dnode->dn_objset->os_spa, size, db); |
|
689 |
bcopy(db->db.db_data, (*syncing)->b_data, size); |
|
690 |
} else { |
|
1544 | 691 |
dbuf_set_data(db, NULL); |
789 | 692 |
} |
693 |
} |
|
694 |
} |
|
695 |
||
1544 | 696 |
/* |
697 |
* This is the "bonus buffer" version of the above routine |
|
698 |
*/ |
|
699 |
static void |
|
700 |
dbuf_fix_old_bonus_data(dmu_buf_impl_t *db, uint64_t txg) |
|
701 |
{ |
|
702 |
void **quiescing, **syncing; |
|
703 |
||
704 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
|
705 |
ASSERT(db->db.db_data != NULL); |
|
706 |
ASSERT(db->db_blkid == DB_BONUS_BLKID); |
|
707 |
||
708 |
quiescing = &db->db_d.db_data_old[(txg-1)&TXG_MASK]; |
|
709 |
syncing = &db->db_d.db_data_old[(txg-2)&TXG_MASK]; |
|
710 |
||
711 |
if (*quiescing == db->db.db_data) { |
|
712 |
ASSERT(*syncing != db->db.db_data); |
|
713 |
*quiescing = zio_buf_alloc(DN_MAX_BONUSLEN); |
|
714 |
bcopy(db->db.db_data, *quiescing, DN_MAX_BONUSLEN); |
|
715 |
} else if (*syncing == db->db.db_data) { |
|
716 |
ASSERT3P(*quiescing, ==, NULL); |
|
717 |
ASSERT3U(db->db_dirtycnt, ==, 1); |
|
718 |
*syncing = zio_buf_alloc(DN_MAX_BONUSLEN); |
|
719 |
bcopy(db->db.db_data, *syncing, DN_MAX_BONUSLEN); |
|
720 |
} |
|
721 |
} |
|
722 |
||
789 | 723 |
void |
724 |
dbuf_unoverride(dmu_buf_impl_t *db, uint64_t txg) |
|
725 |
{ |
|
1544 | 726 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
789 | 727 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
2237 | 728 |
ASSERT(db->db_d.db_overridden_by[txg&TXG_MASK] != IN_DMU_SYNC); |
729 |
||
730 |
if (db->db_d.db_overridden_by[txg&TXG_MASK] != NULL) { |
|
789 | 731 |
/* free this block */ |
732 |
ASSERT(list_link_active(&db->db_dirty_node[txg&TXG_MASK]) || |
|
733 |
db->db_dnode->dn_free_txg == txg); |
|
734 |
if (!BP_IS_HOLE(db->db_d.db_overridden_by[txg&TXG_MASK])) { |
|
735 |
/* XXX can get silent EIO here */ |
|
736 |
(void) arc_free(NULL, db->db_dnode->dn_objset->os_spa, |
|
737 |
txg, db->db_d.db_overridden_by[txg&TXG_MASK], |
|
738 |
NULL, NULL, ARC_WAIT); |
|
739 |
} |
|
740 |
kmem_free(db->db_d.db_overridden_by[txg&TXG_MASK], |
|
741 |
sizeof (blkptr_t)); |
|
742 |
db->db_d.db_overridden_by[txg&TXG_MASK] = NULL; |
|
743 |
/* release the already-written buffer */ |
|
744 |
arc_release(db->db_d.db_data_old[txg&TXG_MASK], db); |
|
745 |
} |
|
746 |
} |
|
747 |
||
748 |
void |
|
749 |
dbuf_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx) |
|
750 |
{ |
|
751 |
dmu_buf_impl_t *db, *db_next; |
|
752 |
uint64_t txg = tx->tx_txg; |
|
753 |
||
754 |
dprintf_dnode(dn, "blkid=%llu nblks=%llu\n", blkid, nblks); |
|
755 |
mutex_enter(&dn->dn_dbufs_mtx); |
|
756 |
for (db = list_head(&dn->dn_dbufs); db; db = db_next) { |
|
757 |
db_next = list_next(&dn->dn_dbufs, db); |
|
1544 | 758 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
759 |
if (db->db_level != 0) |
|
789 | 760 |
continue; |
761 |
dprintf_dbuf(db, "found buf %s\n", ""); |
|
762 |
if (db->db_blkid < blkid || |
|
763 |
db->db_blkid >= blkid+nblks) |
|
764 |
continue; |
|
765 |
||
766 |
/* found a level 0 buffer in the range */ |
|
767 |
if (dbuf_undirty(db, tx)) |
|
768 |
continue; |
|
769 |
||
770 |
mutex_enter(&db->db_mtx); |
|
1544 | 771 |
if (db->db_state == DB_UNCACHED || |
772 |
db->db_state == DB_EVICTING) { |
|
789 | 773 |
ASSERT(db->db.db_data == NULL); |
774 |
mutex_exit(&db->db_mtx); |
|
775 |
continue; |
|
776 |
} |
|
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
777 |
if (db->db_state == DB_READ || db->db_state == DB_FILL) { |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
778 |
/* will be handled in dbuf_read_done or dbuf_rele */ |
789 | 779 |
db->db_d.db_freed_in_flight = TRUE; |
780 |
mutex_exit(&db->db_mtx); |
|
781 |
continue; |
|
782 |
} |
|
1544 | 783 |
if (refcount_count(&db->db_holds) == 0) { |
784 |
ASSERT(db->db_buf); |
|
785 |
dbuf_clear(db); |
|
786 |
continue; |
|
787 |
} |
|
788 |
/* The dbuf is CACHED and referenced */ |
|
789 | 789 |
|
1544 | 790 |
if (!list_link_active(&db->db_dirty_node[txg & TXG_MASK])) { |
791 |
/* |
|
792 |
* This dbuf is not currently dirty. We will either |
|
793 |
* uncache it (if its not referenced in the open |
|
794 |
* context) or reset its contents to empty. |
|
795 |
*/ |
|
796 |
dbuf_fix_old_data(db, txg); |
|
797 |
} else if (db->db_d.db_overridden_by[txg & TXG_MASK] != NULL) { |
|
798 |
/* |
|
799 |
* This dbuf is overridden. Clear that state. |
|
800 |
*/ |
|
801 |
dbuf_unoverride(db, txg); |
|
802 |
} |
|
803 |
/* fill in with appropriate data */ |
|
804 |
if (db->db_state == DB_CACHED) { |
|
805 |
ASSERT(db->db.db_data != NULL); |
|
789 | 806 |
arc_release(db->db_buf, db); |
807 |
bzero(db->db.db_data, db->db.db_size); |
|
808 |
} |
|
1544 | 809 |
|
789 | 810 |
mutex_exit(&db->db_mtx); |
811 |
} |
|
812 |
mutex_exit(&dn->dn_dbufs_mtx); |
|
813 |
} |
|
814 |
||
815 |
static int |
|
1544 | 816 |
dbuf_new_block(dmu_buf_impl_t *db) |
789 | 817 |
{ |
818 |
dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; |
|
819 |
uint64_t birth_txg = 0; |
|
820 |
||
821 |
/* Don't count meta-objects */ |
|
822 |
if (ds == NULL) |
|
823 |
return (FALSE); |
|
824 |
||
825 |
/* |
|
826 |
* We don't need any locking to protect db_blkptr: |
|
827 |
* If it's syncing, then db_dirtied will be set so we'll |
|
828 |
* ignore db_blkptr. |
|
829 |
*/ |
|
830 |
ASSERT(MUTEX_HELD(&db->db_mtx)); /* XXX strictly necessary? */ |
|
831 |
/* If we have been dirtied since the last snapshot, its not new */ |
|
832 |
if (db->db_dirtied) |
|
833 |
birth_txg = db->db_dirtied; |
|
834 |
else if (db->db_blkptr) |
|
835 |
birth_txg = db->db_blkptr->blk_birth; |
|
836 |
||
837 |
if (birth_txg) |
|
1544 | 838 |
return (!dsl_dataset_block_freeable(ds, birth_txg)); |
789 | 839 |
else |
840 |
return (TRUE); |
|
841 |
} |
|
842 |
||
843 |
void |
|
844 |
dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) |
|
845 |
{ |
|
846 |
arc_buf_t *buf, *obuf; |
|
847 |
int osize = db->db.db_size; |
|
848 |
||
1544 | 849 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
850 |
||
789 | 851 |
/* XXX does *this* func really need the lock? */ |
852 |
ASSERT(RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)); |
|
853 |
||
854 |
/* |
|
855 |
* This call to dbuf_will_dirty() with the dn_struct_rwlock held |
|
856 |
* is OK, because there can be no other references to the db |
|
857 |
* when we are changing its size, so no concurrent DB_FILL can |
|
858 |
* be happening. |
|
859 |
*/ |
|
1544 | 860 |
/* |
861 |
* XXX we should be doing a dbuf_read, checking the return |
|
862 |
* value and returning that up to our callers |
|
863 |
*/ |
|
789 | 864 |
dbuf_will_dirty(db, tx); |
865 |
||
866 |
/* create the data buffer for the new block */ |
|
867 |
buf = arc_buf_alloc(db->db_dnode->dn_objset->os_spa, size, db); |
|
868 |
||
869 |
/* copy old block data to the new block */ |
|
870 |
obuf = db->db_buf; |
|
1491
bdcb30e07e7d
6389368 fat zap should use 16k blocks (with backwards compatability)
ahrens
parents:
1199
diff
changeset
|
871 |
bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); |
789 | 872 |
/* zero the remainder */ |
1491
bdcb30e07e7d
6389368 fat zap should use 16k blocks (with backwards compatability)
ahrens
parents:
1199
diff
changeset
|
873 |
if (size > osize) |
bdcb30e07e7d
6389368 fat zap should use 16k blocks (with backwards compatability)
ahrens
parents:
1199
diff
changeset
|
874 |
bzero((uint8_t *)buf->b_data + osize, size - osize); |
789 | 875 |
|
876 |
mutex_enter(&db->db_mtx); |
|
877 |
dbuf_set_data(db, buf); |
|
1544 | 878 |
VERIFY(arc_buf_remove_ref(obuf, db) == 1); |
789 | 879 |
db->db.db_size = size; |
880 |
||
881 |
if (db->db_level == 0) |
|
882 |
db->db_d.db_data_old[tx->tx_txg&TXG_MASK] = buf; |
|
883 |
mutex_exit(&db->db_mtx); |
|
884 |
||
885 |
dnode_willuse_space(db->db_dnode, size-osize, tx); |
|
886 |
} |
|
887 |
||
888 |
void |
|
889 |
dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) |
|
890 |
{ |
|
891 |
dnode_t *dn = db->db_dnode; |
|
892 |
objset_impl_t *os = dn->dn_objset; |
|
893 |
int drop_struct_lock = FALSE; |
|
894 |
int txgoff = tx->tx_txg & TXG_MASK; |
|
895 |
||
896 |
ASSERT(tx->tx_txg != 0); |
|
897 |
ASSERT(!refcount_is_zero(&db->db_holds)); |
|
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
898 |
DMU_TX_DIRTY_BUF(tx, db); |
789 | 899 |
|
900 |
/* |
|
901 |
* Shouldn't dirty a regular buffer in syncing context. Private |
|
902 |
* objects may be dirtied in syncing context, but only if they |
|
903 |
* were already pre-dirtied in open context. |
|
904 |
* XXX We may want to prohibit dirtying in syncing context even |
|
905 |
* if they did pre-dirty. |
|
906 |
*/ |
|
907 |
ASSERT(!(dmu_tx_is_syncing(tx) && |
|
908 |
!BP_IS_HOLE(&dn->dn_objset->os_rootbp) && |
|
1544 | 909 |
dn->dn_object != DMU_META_DNODE_OBJECT && |
789 | 910 |
dn->dn_objset->os_dsl_dataset != NULL && |
911 |
!dsl_dir_is_private( |
|
912 |
dn->dn_objset->os_dsl_dataset->ds_dir))); |
|
913 |
||
914 |
/* |
|
915 |
* We make this assert for private objects as well, but after we |
|
916 |
* check if we're already dirty. They are allowed to re-dirty |
|
917 |
* in syncing context. |
|
918 |
*/ |
|
1544 | 919 |
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || |
789 | 920 |
dn->dn_dirtyctx == DN_UNDIRTIED || |
921 |
dn->dn_dirtyctx == |
|
922 |
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); |
|
923 |
||
924 |
mutex_enter(&db->db_mtx); |
|
925 |
/* XXX make this true for indirects too? */ |
|
926 |
ASSERT(db->db_level != 0 || db->db_state == DB_CACHED || |
|
927 |
db->db_state == DB_FILL); |
|
928 |
||
929 |
/* |
|
930 |
* If this buffer is currently part of an "overridden" region, |
|
931 |
* we now need to remove it from that region. |
|
932 |
*/ |
|
933 |
if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID && |
|
934 |
db->db_d.db_overridden_by[txgoff] != NULL) { |
|
935 |
dbuf_unoverride(db, tx->tx_txg); |
|
936 |
} |
|
937 |
||
938 |
mutex_enter(&dn->dn_mtx); |
|
939 |
/* |
|
940 |
* Don't set dirtyctx to SYNC if we're just modifying this as we |
|
941 |
* initialize the objset. |
|
942 |
*/ |
|
943 |
if (dn->dn_dirtyctx == DN_UNDIRTIED && |
|
944 |
!BP_IS_HOLE(&dn->dn_objset->os_rootbp)) { |
|
945 |
dn->dn_dirtyctx = |
|
946 |
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); |
|
947 |
ASSERT(dn->dn_dirtyctx_firstset == NULL); |
|
948 |
dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); |
|
949 |
} |
|
950 |
mutex_exit(&dn->dn_mtx); |
|
951 |
||
952 |
/* |
|
953 |
* If this buffer is already dirty, we're done. |
|
954 |
*/ |
|
955 |
if (list_link_active(&db->db_dirty_node[txgoff])) { |
|
956 |
mutex_exit(&db->db_mtx); |
|
957 |
return; |
|
958 |
} |
|
959 |
||
960 |
/* |
|
961 |
* Only valid if not already dirty. |
|
962 |
*/ |
|
963 |
ASSERT(dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == |
|
964 |
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); |
|
965 |
||
966 |
ASSERT3U(dn->dn_nlevels, >, db->db_level); |
|
967 |
ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || |
|
968 |
dn->dn_phys->dn_nlevels > db->db_level || |
|
969 |
dn->dn_next_nlevels[txgoff] > db->db_level || |
|
970 |
dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || |
|
971 |
dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); |
|
972 |
||
973 |
/* |
|
974 |
* We should only be dirtying in syncing context if it's the |
|
975 |
* mos, a spa os, or we're initializing the os. However, we are |
|
976 |
* allowed to dirty in syncing context provided we already |
|
977 |
* dirtied it in open context. Hence we must make this |
|
978 |
* assertion only if we're not already dirty. |
|
979 |
*/ |
|
980 |
ASSERT(!dmu_tx_is_syncing(tx) || |
|
981 |
os->os_dsl_dataset == NULL || |
|
982 |
!dsl_dir_is_private(os->os_dsl_dataset->ds_dir) || |
|
983 |
!BP_IS_HOLE(&os->os_rootbp)); |
|
984 |
ASSERT(db->db.db_size != 0); |
|
985 |
||
986 |
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); |
|
987 |
||
1544 | 988 |
/* |
989 |
* If this buffer is dirty in an old transaction group we need |
|
990 |
* to make a copy of it so that the changes we make in this |
|
991 |
* transaction group won't leak out when we sync the older txg. |
|
992 |
*/ |
|
993 |
if (db->db_blkid == DB_BONUS_BLKID) { |
|
994 |
ASSERT(db->db.db_data != NULL); |
|
995 |
ASSERT(db->db_d.db_data_old[txgoff] == NULL); |
|
996 |
dbuf_fix_old_bonus_data(db, tx->tx_txg); |
|
997 |
db->db_d.db_data_old[txgoff] = db->db.db_data; |
|
998 |
} else if (db->db_level == 0) { |
|
789 | 999 |
/* |
1000 |
* Release the data buffer from the cache so that we |
|
1001 |
* can modify it without impacting possible other users |
|
1002 |
* of this cached data block. Note that indirect blocks |
|
1003 |
* and private objects are not released until the syncing |
|
1004 |
* state (since they are only modified then). |
|
1005 |
*/ |
|
1006 |
ASSERT(db->db_buf != NULL); |
|
1007 |
ASSERT(db->db_d.db_data_old[txgoff] == NULL); |
|
1544 | 1008 |
if (db->db.db_object != DMU_META_DNODE_OBJECT) { |
789 | 1009 |
arc_release(db->db_buf, db); |
1010 |
dbuf_fix_old_data(db, tx->tx_txg); |
|
1011 |
ASSERT(db->db_buf != NULL); |
|
1012 |
} |
|
1013 |
db->db_d.db_data_old[txgoff] = db->db_buf; |
|
1014 |
} |
|
1015 |
||
1016 |
mutex_enter(&dn->dn_mtx); |
|
1017 |
/* |
|
1018 |
* We could have been freed_in_flight between the dbuf_noread |
|
1019 |
* and dbuf_dirty. We win, as though the dbuf_noread() had |
|
1020 |
* happened after the free. |
|
1021 |
*/ |
|
1022 |
if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) { |
|
1023 |
dnode_clear_range(dn, db->db_blkid, 1, tx); |
|
1024 |
db->db_d.db_freed_in_flight = FALSE; |
|
1025 |
} |
|
1026 |
||
1027 |
db->db_dirtied = tx->tx_txg; |
|
1028 |
list_insert_tail(&dn->dn_dirty_dbufs[txgoff], db); |
|
1029 |
mutex_exit(&dn->dn_mtx); |
|
1030 |
||
1031 |
if (db->db_blkid != DB_BONUS_BLKID) { |
|
1544 | 1032 |
/* |
1033 |
* Update the accounting. |
|
1034 |
*/ |
|
1035 |
if (!dbuf_new_block(db) && db->db_blkptr) { |
|
789 | 1036 |
/* |
1037 |
* This is only a guess -- if the dbuf is dirty |
|
1038 |
* in a previous txg, we don't know how much |
|
1039 |
* space it will use on disk yet. We should |
|
1040 |
* really have the struct_rwlock to access |
|
1041 |
* db_blkptr, but since this is just a guess, |
|
1042 |
* it's OK if we get an odd answer. |
|
1043 |
*/ |
|
1044 |
dnode_willuse_space(dn, |
|
2082 | 1045 |
-bp_get_dasize(os->os_spa, db->db_blkptr), tx); |
789 | 1046 |
} |
1047 |
dnode_willuse_space(dn, db->db.db_size, tx); |
|
1048 |
} |
|
1049 |
||
1050 |
/* |
|
1051 |
* This buffer is now part of this txg |
|
1052 |
*/ |
|
1053 |
dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); |
|
1054 |
db->db_dirtycnt += 1; |
|
1055 |
ASSERT3U(db->db_dirtycnt, <=, 3); |
|
1056 |
||
1057 |
mutex_exit(&db->db_mtx); |
|
1058 |
||
1059 |
if (db->db_blkid == DB_BONUS_BLKID) { |
|
1060 |
dnode_setdirty(dn, tx); |
|
1061 |
return; |
|
1062 |
} |
|
1063 |
||
1064 |
if (db->db_level == 0) |
|
1065 |
dnode_new_blkid(dn, db->db_blkid, tx); |
|
1066 |
||
1067 |
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { |
|
1068 |
rw_enter(&dn->dn_struct_rwlock, RW_READER); |
|
1069 |
drop_struct_lock = TRUE; |
|
1070 |
} |
|
1071 |
||
1072 |
if (db->db_level < dn->dn_nlevels-1) { |
|
1073 |
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; |
|
1074 |
dmu_buf_impl_t *parent; |
|
1075 |
parent = dbuf_hold_level(dn, db->db_level+1, |
|
1076 |
db->db_blkid >> epbs, FTAG); |
|
1077 |
if (drop_struct_lock) |
|
1078 |
rw_exit(&dn->dn_struct_rwlock); |
|
1079 |
dbuf_dirty(parent, tx); |
|
1544 | 1080 |
dbuf_rele(parent, FTAG); |
789 | 1081 |
} else { |
1082 |
if (drop_struct_lock) |
|
1083 |
rw_exit(&dn->dn_struct_rwlock); |
|
1084 |
} |
|
1085 |
||
1086 |
dnode_setdirty(dn, tx); |
|
1087 |
} |
|
1088 |
||
1089 |
static int |
|
1090 |
dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) |
|
1091 |
{ |
|
1092 |
dnode_t *dn = db->db_dnode; |
|
1093 |
int txgoff = tx->tx_txg & TXG_MASK; |
|
1544 | 1094 |
int64_t holds; |
789 | 1095 |
|
1096 |
ASSERT(tx->tx_txg != 0); |
|
1544 | 1097 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
789 | 1098 |
|
1099 |
mutex_enter(&db->db_mtx); |
|
1100 |
||
1101 |
/* |
|
1102 |
* If this buffer is not dirty, we're done. |
|
1103 |
*/ |
|
1104 |
if (!list_link_active(&db->db_dirty_node[txgoff])) { |
|
1105 |
mutex_exit(&db->db_mtx); |
|
1106 |
return (0); |
|
1107 |
} |
|
1108 |
||
1109 |
/* |
|
1110 |
* If this buffer is currently held, we cannot undirty |
|
1111 |
* it, since one of the current holders may be in the |
|
1112 |
* middle of an update. Note that users of dbuf_undirty() |
|
1113 |
* should not place a hold on the dbuf before the call. |
|
1114 |
* XXX - this check assumes we are being called from |
|
1115 |
* dbuf_free_range(), perhaps we should move it there? |
|
1116 |
*/ |
|
1117 |
if (refcount_count(&db->db_holds) > db->db_dirtycnt) { |
|
1118 |
mutex_exit(&db->db_mtx); |
|
1119 |
mutex_enter(&dn->dn_mtx); |
|
1120 |
dnode_clear_range(dn, db->db_blkid, 1, tx); |
|
1121 |
mutex_exit(&dn->dn_mtx); |
|
1122 |
return (0); |
|
1123 |
} |
|
1124 |
||
1125 |
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); |
|
1126 |
||
1127 |
dbuf_unoverride(db, tx->tx_txg); |
|
1128 |
||
1129 |
ASSERT(db->db.db_size != 0); |
|
1130 |
if (db->db_level == 0) { |
|
1131 |
ASSERT(db->db_buf != NULL); |
|
1132 |
ASSERT(db->db_d.db_data_old[txgoff] != NULL); |
|
1133 |
if (db->db_d.db_data_old[txgoff] != db->db_buf) |
|
1544 | 1134 |
VERIFY(arc_buf_remove_ref( |
1135 |
db->db_d.db_data_old[txgoff], db) == 1); |
|
789 | 1136 |
db->db_d.db_data_old[txgoff] = NULL; |
1137 |
} |
|
1138 |
||
1139 |
/* XXX would be nice to fix up dn_towrite_space[] */ |
|
1140 |
/* XXX undo db_dirtied? but how? */ |
|
1141 |
/* db->db_dirtied = tx->tx_txg; */ |
|
1142 |
||
1143 |
mutex_enter(&dn->dn_mtx); |
|
1144 |
list_remove(&dn->dn_dirty_dbufs[txgoff], db); |
|
1145 |
mutex_exit(&dn->dn_mtx); |
|
1146 |
||
1147 |
ASSERT(db->db_dirtycnt > 0); |
|
1148 |
db->db_dirtycnt -= 1; |
|
1149 |
||
1544 | 1150 |
if ((holds = refcount_remove(&db->db_holds, |
1151 |
(void *)(uintptr_t)tx->tx_txg)) == 0) { |
|
1152 |
arc_buf_t *buf = db->db_buf; |
|
789 | 1153 |
|
1544 | 1154 |
ASSERT(arc_released(buf)); |
1155 |
dbuf_set_data(db, NULL); |
|
1156 |
VERIFY(arc_buf_remove_ref(buf, db) == 1); |
|
789 | 1157 |
dbuf_evict(db); |
1158 |
return (1); |
|
1159 |
} |
|
1544 | 1160 |
ASSERT(holds > 0); |
789 | 1161 |
|
1162 |
mutex_exit(&db->db_mtx); |
|
1163 |
return (0); |
|
1164 |
} |
|
1165 |
||
1166 |
#pragma weak dmu_buf_will_dirty = dbuf_will_dirty |
|
1167 |
void |
|
1168 |
dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) |
|
1169 |
{ |
|
1170 |
int rf = DB_RF_MUST_SUCCEED; |
|
1171 |
||
1172 |
ASSERT(tx->tx_txg != 0); |
|
1173 |
ASSERT(!refcount_is_zero(&db->db_holds)); |
|
1174 |
||
1175 |
if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) |
|
1176 |
rf |= DB_RF_HAVESTRUCT; |
|
1544 | 1177 |
(void) dbuf_read(db, NULL, rf); |
789 | 1178 |
dbuf_dirty(db, tx); |
1179 |
} |
|
1180 |
||
1181 |
void |
|
1544 | 1182 |
dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) |
789 | 1183 |
{ |
1544 | 1184 |
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
1185 |
||
1186 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
|
789 | 1187 |
ASSERT(tx->tx_txg != 0); |
1188 |
ASSERT(db->db_level == 0); |
|
1189 |
ASSERT(!refcount_is_zero(&db->db_holds)); |
|
1190 |
||
1544 | 1191 |
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || |
789 | 1192 |
dmu_tx_private_ok(tx)); |
1193 |
||
1194 |
dbuf_noread(db); |
|
1195 |
dbuf_dirty(db, tx); |
|
1196 |
} |
|
1197 |
||
1198 |
#pragma weak dmu_buf_fill_done = dbuf_fill_done |
|
1199 |
/* ARGSUSED */ |
|
1200 |
void |
|
1201 |
dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) |
|
1202 |
{ |
|
1203 |
mutex_enter(&db->db_mtx); |
|
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
1204 |
DBUF_VERIFY(db); |
789 | 1205 |
|
1206 |
if (db->db_state == DB_FILL) { |
|
1207 |
if (db->db_level == 0 && db->db_d.db_freed_in_flight) { |
|
1544 | 1208 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
789 | 1209 |
/* we were freed while filling */ |
1210 |
/* XXX dbuf_undirty? */ |
|
1211 |
bzero(db->db.db_data, db->db.db_size); |
|
1212 |
db->db_d.db_freed_in_flight = FALSE; |
|
1213 |
} |
|
1214 |
db->db_state = DB_CACHED; |
|
1215 |
cv_broadcast(&db->db_changed); |
|
1216 |
} |
|
1217 |
mutex_exit(&db->db_mtx); |
|
1218 |
} |
|
1219 |
||
1544 | 1220 |
/* |
1221 |
* "Clear" the contents of this dbuf. This will mark the dbuf |
|
1222 |
* EVICTING and clear *most* of its references. Unfortunetely, |
|
1223 |
* when we are not holding the dn_dbufs_mtx, we can't clear the |
|
1224 |
* entry in the dn_dbufs list. We have to wait until dbuf_destroy() |
|
1225 |
* in this case. For callers from the DMU we will usually see: |
|
1226 |
* dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy() |
|
1227 |
* For the arc callback, we will usually see: |
|
1228 |
* dbuf_do_evict()->dbuf_clear();dbuf_destroy() |
|
1229 |
* Sometimes, though, we will get a mix of these two: |
|
1230 |
* DMU: dbuf_clear()->arc_buf_evict() |
|
1231 |
* ARC: dbuf_do_evict()->dbuf_destroy() |
|
1232 |
*/ |
|
1233 |
void |
|
789 | 1234 |
dbuf_clear(dmu_buf_impl_t *db) |
1235 |
{ |
|
1236 |
dnode_t *dn = db->db_dnode; |
|
1544 | 1237 |
dmu_buf_impl_t *parent = db->db_parent; |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1238 |
dmu_buf_impl_t *dndb = dn->dn_dbuf; |
1544 | 1239 |
int dbuf_gone = FALSE; |
789 | 1240 |
|
1241 |
ASSERT(MUTEX_HELD(&db->db_mtx)); |
|
1242 |
ASSERT(refcount_is_zero(&db->db_holds)); |
|
1243 |
||
1544 | 1244 |
dbuf_evict_user(db); |
1245 |
||
789 | 1246 |
if (db->db_state == DB_CACHED) { |
1544 | 1247 |
ASSERT(db->db.db_data != NULL); |
1248 |
if (db->db_blkid == DB_BONUS_BLKID) |
|
1249 |
zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); |
|
789 | 1250 |
db->db.db_data = NULL; |
1251 |
db->db_state = DB_UNCACHED; |
|
1252 |
} |
|
1253 |
||
1254 |
ASSERT3U(db->db_state, ==, DB_UNCACHED); |
|
1255 |
ASSERT(db->db_data_pending == NULL); |
|
1256 |
||
1544 | 1257 |
db->db_state = DB_EVICTING; |
1258 |
db->db_blkptr = NULL; |
|
1259 |
||
1260 |
if (db->db_blkid != DB_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) { |
|
1261 |
list_remove(&dn->dn_dbufs, db); |
|
1262 |
dnode_rele(dn, db); |
|
1263 |
} |
|
1264 |
||
1265 |
if (db->db_buf) |
|
1266 |
dbuf_gone = arc_buf_evict(db->db_buf); |
|
1267 |
||
1268 |
if (!dbuf_gone) |
|
1269 |
mutex_exit(&db->db_mtx); |
|
789 | 1270 |
|
1271 |
/* |
|
1272 |
* If this dbuf is referened from an indirect dbuf, |
|
1273 |
* decrement the ref count on the indirect dbuf. |
|
1274 |
*/ |
|
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1275 |
if (parent && parent != dndb) |
1544 | 1276 |
dbuf_rele(parent, db); |
789 | 1277 |
} |
1278 |
||
1279 |
static int |
|
1280 |
dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, |
|
1281 |
dmu_buf_impl_t **parentp, blkptr_t **bpp) |
|
1282 |
{ |
|
1283 |
int nlevels, epbs; |
|
1284 |
||
1544 | 1285 |
ASSERT(blkid != DB_BONUS_BLKID); |
1286 |
||
789 | 1287 |
if (dn->dn_phys->dn_nlevels == 0) |
1288 |
nlevels = 1; |
|
1289 |
else |
|
1290 |
nlevels = dn->dn_phys->dn_nlevels; |
|
1291 |
||
1292 |
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; |
|
1293 |
||
1294 |
ASSERT3U(level * epbs, <, 64); |
|
1295 |
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
|
1544 | 1296 |
if (level >= nlevels || |
789 | 1297 |
(blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { |
1298 |
/* the buffer has no parent yet */ |
|
1299 |
*parentp = NULL; |
|
1300 |
*bpp = NULL; |
|
1301 |
return (ENOENT); |
|
1302 |
} else if (level < nlevels-1) { |
|
1303 |
/* this block is referenced from an indirect block */ |
|
1304 |
int err = dbuf_hold_impl(dn, level+1, |
|
1305 |
blkid >> epbs, fail_sparse, NULL, parentp); |
|
1306 |
if (err) |
|
1307 |
return (err); |
|
1544 | 1308 |
err = dbuf_read(*parentp, NULL, |
1309 |
(DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); |
|
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1310 |
if (err) { |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1311 |
dbuf_rele(*parentp, NULL); |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1312 |
*parentp = NULL; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1313 |
return (err); |
1544 | 1314 |
} |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1315 |
*bpp = ((blkptr_t *)(*parentp)->db.db_data) + |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1316 |
(blkid & ((1ULL << epbs) - 1)); |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1317 |
return (0); |
789 | 1318 |
} else { |
1319 |
/* the block is referenced from the dnode */ |
|
1320 |
ASSERT3U(level, ==, nlevels-1); |
|
1321 |
ASSERT(dn->dn_phys->dn_nblkptr == 0 || |
|
1322 |
blkid < dn->dn_phys->dn_nblkptr); |
|
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1323 |
if (dn->dn_dbuf) { |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1324 |
dbuf_add_ref(dn->dn_dbuf, NULL); |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1325 |
*parentp = dn->dn_dbuf; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1326 |
} |
789 | 1327 |
*bpp = &dn->dn_phys->dn_blkptr[blkid]; |
1328 |
return (0); |
|
1329 |
} |
|
1330 |
} |
|
1331 |
||
1332 |
static dmu_buf_impl_t * |
|
1333 |
dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, |
|
1334 |
dmu_buf_impl_t *parent, blkptr_t *blkptr) |
|
1335 |
{ |
|
1336 |
objset_impl_t *os = dn->dn_objset; |
|
1337 |
dmu_buf_impl_t *db, *odb; |
|
1338 |
||
1339 |
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
|
1340 |
ASSERT(dn->dn_type != DMU_OT_NONE); |
|
1341 |
||
1342 |
db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); |
|
1343 |
||
1344 |
db->db_objset = os; |
|
1345 |
db->db.db_object = dn->dn_object; |
|
1346 |
db->db_level = level; |
|
1347 |
db->db_blkid = blkid; |
|
1544 | 1348 |
db->db_dirtied = 0; |
1349 |
db->db_dirtycnt = 0; |
|
1350 |
db->db_dnode = dn; |
|
1351 |
db->db_parent = parent; |
|
1352 |
db->db_blkptr = blkptr; |
|
789 | 1353 |
|
1544 | 1354 |
bzero(&db->db_d, sizeof (db->db_d)); |
1355 |
||
1356 |
if (blkid == DB_BONUS_BLKID) { |
|
1357 |
ASSERT3P(parent, ==, dn->dn_dbuf); |
|
789 | 1358 |
db->db.db_size = dn->dn_bonuslen; |
1359 |
db->db.db_offset = DB_BONUS_BLKID; |
|
1544 | 1360 |
db->db_state = DB_UNCACHED; |
1361 |
/* the bonus dbuf is not placed in the hash table */ |
|
1362 |
return (db); |
|
789 | 1363 |
} else { |
1364 |
int blocksize = |
|
1365 |
db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz; |
|
1366 |
db->db.db_size = blocksize; |
|
1367 |
db->db.db_offset = db->db_blkid * blocksize; |
|
1368 |
} |
|
1369 |
||
1370 |
/* |
|
1371 |
* Hold the dn_dbufs_mtx while we get the new dbuf |
|
1372 |
* in the hash table *and* added to the dbufs list. |
|
1373 |
* This prevents a possible deadlock with someone |
|
1374 |
* trying to look up this dbuf before its added to the |
|
1375 |
* dn_dbufs list. |
|
1376 |
*/ |
|
1377 |
mutex_enter(&dn->dn_dbufs_mtx); |
|
1544 | 1378 |
db->db_state = DB_EVICTING; |
789 | 1379 |
if ((odb = dbuf_hash_insert(db)) != NULL) { |
1380 |
/* someone else inserted it first */ |
|
1381 |
kmem_cache_free(dbuf_cache, db); |
|
1382 |
mutex_exit(&dn->dn_dbufs_mtx); |
|
1383 |
return (odb); |
|
1384 |
} |
|
1385 |
list_insert_head(&dn->dn_dbufs, db); |
|
1544 | 1386 |
db->db_state = DB_UNCACHED; |
789 | 1387 |
mutex_exit(&dn->dn_dbufs_mtx); |
1388 |
||
1389 |
if (parent && parent != dn->dn_dbuf) |
|
1390 |
dbuf_add_ref(parent, db); |
|
1391 |
||
1544 | 1392 |
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || |
1393 |
refcount_count(&dn->dn_holds) > 0); |
|
789 | 1394 |
(void) refcount_add(&dn->dn_holds, db); |
1395 |
||
1396 |
dprintf_dbuf(db, "db=%p\n", db); |
|
1397 |
||
1398 |
return (db); |
|
1399 |
} |
|
1400 |
||
1401 |
static int |
|
1544 | 1402 |
dbuf_do_evict(void *private) |
789 | 1403 |
{ |
1544 | 1404 |
arc_buf_t *buf = private; |
1405 |
dmu_buf_impl_t *db = buf->b_private; |
|
789 | 1406 |
|
1544 | 1407 |
if (!MUTEX_HELD(&db->db_mtx)) |
1408 |
mutex_enter(&db->db_mtx); |
|
789 | 1409 |
|
1544 | 1410 |
ASSERT(refcount_is_zero(&db->db_holds)); |
789 | 1411 |
|
1544 | 1412 |
if (db->db_state != DB_EVICTING) { |
1413 |
ASSERT(db->db_state == DB_CACHED); |
|
1414 |
DBUF_VERIFY(db); |
|
1415 |
db->db_buf = NULL; |
|
1416 |
dbuf_evict(db); |
|
1417 |
} else { |
|
1418 |
mutex_exit(&db->db_mtx); |
|
1419 |
dbuf_destroy(db); |
|
789 | 1420 |
} |
1544 | 1421 |
return (0); |
789 | 1422 |
} |
1423 |
||
1424 |
static void |
|
1425 |
dbuf_destroy(dmu_buf_impl_t *db) |
|
1426 |
{ |
|
1427 |
ASSERT(refcount_is_zero(&db->db_holds)); |
|
1428 |
||
1544 | 1429 |
if (db->db_blkid != DB_BONUS_BLKID) { |
1430 |
dnode_t *dn = db->db_dnode; |
|
1431 |
||
1432 |
/* |
|
1433 |
* If this dbuf is still on the dn_dbufs list, |
|
1434 |
* remove it from that list. |
|
1435 |
*/ |
|
1436 |
if (list_link_active(&db->db_link)) { |
|
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1437 |
mutex_enter(&dn->dn_dbufs_mtx); |
1544 | 1438 |
list_remove(&dn->dn_dbufs, db); |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1439 |
mutex_exit(&dn->dn_dbufs_mtx); |
1544 | 1440 |
|
1441 |
dnode_rele(dn, db); |
|
1442 |
} |
|
1443 |
dbuf_hash_remove(db); |
|
1444 |
} |
|
1445 |
db->db_parent = NULL; |
|
1446 |
db->db_dnode = NULL; |
|
1447 |
db->db_buf = NULL; |
|
1448 |
||
789 | 1449 |
ASSERT(db->db.db_data == NULL); |
1450 |
ASSERT(db->db_hash_next == NULL); |
|
1451 |
ASSERT(db->db_blkptr == NULL); |
|
1452 |
ASSERT(db->db_data_pending == NULL); |
|
1453 |
||
1454 |
kmem_cache_free(dbuf_cache, db); |
|
1455 |
} |
|
1456 |
||
1457 |
void |
|
1458 |
dbuf_prefetch(dnode_t *dn, uint64_t blkid) |
|
1459 |
{ |
|
2391 | 1460 |
dmu_buf_impl_t *db = NULL; |
789 | 1461 |
blkptr_t *bp = NULL; |
1462 |
||
1463 |
ASSERT(blkid != DB_BONUS_BLKID); |
|
1464 |
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
|
1465 |
||
1466 |
if (dnode_block_freed(dn, blkid)) |
|
1467 |
return; |
|
1468 |
||
1469 |
/* dbuf_find() returns with db_mtx held */ |
|
1470 |
if (db = dbuf_find(dn, 0, blkid)) { |
|
2391 | 1471 |
if (refcount_count(&db->db_holds) > 0) { |
1472 |
/* |
|
1473 |
* This dbuf is active. We assume that it is |
|
1474 |
* already CACHED, or else about to be either |
|
1475 |
* read or filled. |
|
1476 |
*/ |
|
1477 |
mutex_exit(&db->db_mtx); |
|
1478 |
return; |
|
1479 |
} |
|
789 | 1480 |
mutex_exit(&db->db_mtx); |
1481 |
} |
|
1482 |
||
2391 | 1483 |
if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) { |
789 | 1484 |
if (bp && !BP_IS_HOLE(bp)) { |
2391 | 1485 |
uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; |
1544 | 1486 |
zbookmark_t zb; |
1487 |
zb.zb_objset = dn->dn_objset->os_dsl_dataset ? |
|
1488 |
dn->dn_objset->os_dsl_dataset->ds_object : 0; |
|
1489 |
zb.zb_object = dn->dn_object; |
|
1490 |
zb.zb_level = 0; |
|
1491 |
zb.zb_blkid = blkid; |
|
1492 |
||
789 | 1493 |
(void) arc_read(NULL, dn->dn_objset->os_spa, bp, |
1494 |
dmu_ot[dn->dn_type].ot_byteswap, |
|
1495 |
NULL, NULL, ZIO_PRIORITY_ASYNC_READ, |
|
1496 |
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, |
|
2391 | 1497 |
&aflags, &zb); |
789 | 1498 |
} |
2391 | 1499 |
if (db) |
1500 |
dbuf_rele(db, NULL); |
|
789 | 1501 |
} |
1502 |
} |
|
1503 |
||
1504 |
/* |
|
1505 |
* Returns with db_holds incremented, and db_mtx not held. |
|
1506 |
* Note: dn_struct_rwlock must be held. |
|
1507 |
*/ |
|
1508 |
int |
|
1509 |
dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, |
|
1510 |
void *tag, dmu_buf_impl_t **dbp) |
|
1511 |
{ |
|
1512 |
dmu_buf_impl_t *db, *parent = NULL; |
|
1513 |
||
1544 | 1514 |
ASSERT(blkid != DB_BONUS_BLKID); |
789 | 1515 |
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); |
1516 |
ASSERT3U(dn->dn_nlevels, >, level); |
|
1517 |
||
1518 |
*dbp = NULL; |
|
1544 | 1519 |
top: |
789 | 1520 |
/* dbuf_find() returns with db_mtx held */ |
1521 |
db = dbuf_find(dn, level, blkid); |
|
1522 |
||
1523 |
if (db == NULL) { |
|
1524 |
blkptr_t *bp = NULL; |
|
1525 |
int err; |
|
1526 |
||
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1527 |
ASSERT3P(parent, ==, NULL); |
789 | 1528 |
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); |
1529 |
if (fail_sparse) { |
|
1530 |
if (err == 0 && bp && BP_IS_HOLE(bp)) |
|
1531 |
err = ENOENT; |
|
1532 |
if (err) { |
|
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1533 |
if (parent) |
1544 | 1534 |
dbuf_rele(parent, NULL); |
789 | 1535 |
return (err); |
1536 |
} |
|
1537 |
} |
|
1544 | 1538 |
if (err && err != ENOENT) |
1539 |
return (err); |
|
789 | 1540 |
db = dbuf_create(dn, level, blkid, parent, bp); |
1541 |
} |
|
1542 |
||
1544 | 1543 |
if (db->db_buf && refcount_is_zero(&db->db_holds)) { |
1544 |
arc_buf_add_ref(db->db_buf, db); |
|
1545 |
if (db->db_buf->b_data == NULL) { |
|
1546 |
dbuf_clear(db); |
|
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1547 |
if (parent) { |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1548 |
dbuf_rele(parent, NULL); |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1549 |
parent = NULL; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1550 |
} |
1544 | 1551 |
goto top; |
1552 |
} |
|
1553 |
ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); |
|
1554 |
} |
|
1555 |
||
1556 |
ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); |
|
1557 |
||
789 | 1558 |
/* |
1559 |
* If this buffer is currently syncing out, and we are |
|
1560 |
* are still referencing it from db_data, we need to make |
|
1561 |
* a copy of it in case we decide we want to dirty it |
|
1562 |
* again in this txg. |
|
1563 |
*/ |
|
1564 |
if (db->db_level == 0 && db->db_state == DB_CACHED && |
|
1544 | 1565 |
dn->dn_object != DMU_META_DNODE_OBJECT && |
789 | 1566 |
db->db_data_pending == db->db_buf) { |
1567 |
int size = (db->db_blkid == DB_BONUS_BLKID) ? |
|
1568 |
DN_MAX_BONUSLEN : db->db.db_size; |
|
1569 |
||
1570 |
dbuf_set_data(db, arc_buf_alloc(db->db_dnode->dn_objset->os_spa, |
|
1571 |
size, db)); |
|
1572 |
bcopy(db->db_data_pending->b_data, db->db.db_data, |
|
1573 |
db->db.db_size); |
|
1574 |
} |
|
1575 |
||
1544 | 1576 |
(void) refcount_add(&db->db_holds, tag); |
789 | 1577 |
dbuf_update_data(db); |
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
1578 |
DBUF_VERIFY(db); |
789 | 1579 |
mutex_exit(&db->db_mtx); |
1580 |
||
1581 |
/* NOTE: we can't rele the parent until after we drop the db_mtx */ |
|
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
1582 |
if (parent) |
1544 | 1583 |
dbuf_rele(parent, NULL); |
789 | 1584 |
|
1585 |
ASSERT3P(db->db_dnode, ==, dn); |
|
1586 |
ASSERT3U(db->db_blkid, ==, blkid); |
|
1587 |
ASSERT3U(db->db_level, ==, level); |
|
1588 |
*dbp = db; |
|
1589 |
||
1590 |
return (0); |
|
1591 |
} |
|
1592 |
||
1593 |
dmu_buf_impl_t * |
|
1544 | 1594 |
dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) |
789 | 1595 |
{ |
1596 |
dmu_buf_impl_t *db; |
|
1544 | 1597 |
int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db); |
1598 |
return (err ? NULL : db); |
|
789 | 1599 |
} |
1600 |
||
1601 |
dmu_buf_impl_t * |
|
1602 |
dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) |
|
1603 |
{ |
|
1604 |
dmu_buf_impl_t *db; |
|
1544 | 1605 |
int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db); |
1606 |
return (err ? NULL : db); |
|
789 | 1607 |
} |
1608 |
||
1609 |
dmu_buf_impl_t * |
|
1544 | 1610 |
dbuf_create_bonus(dnode_t *dn) |
789 | 1611 |
{ |
1544 | 1612 |
dmu_buf_impl_t *db = dn->dn_bonus; |
1613 |
||
1614 |
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); |
|
1615 |
||
1616 |
ASSERT(dn->dn_bonus == NULL); |
|
1617 |
db = dbuf_create(dn, 0, DB_BONUS_BLKID, dn->dn_dbuf, NULL); |
|
789 | 1618 |
return (db); |
1619 |
} |
|
1620 |
||
1544 | 1621 |
#pragma weak dmu_buf_add_ref = dbuf_add_ref |
789 | 1622 |
void |
1623 |
dbuf_add_ref(dmu_buf_impl_t *db, void *tag) |
|
1624 |
{ |
|
1544 | 1625 |
int64_t holds = refcount_add(&db->db_holds, tag); |
1626 |
ASSERT(holds > 1); |
|
789 | 1627 |
} |
1628 |
||
1544 | 1629 |
#pragma weak dmu_buf_rele = dbuf_rele |
789 | 1630 |
void |
1544 | 1631 |
dbuf_rele(dmu_buf_impl_t *db, void *tag) |
789 | 1632 |
{ |
1633 |
int64_t holds; |
|
1634 |
||
1635 |
mutex_enter(&db->db_mtx); |
|
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
1636 |
DBUF_VERIFY(db); |
789 | 1637 |
|
1638 |
holds = refcount_remove(&db->db_holds, tag); |
|
1544 | 1639 |
ASSERT(holds >= 0); |
1640 |
||
1641 |
if (holds == db->db_dirtycnt && |
|
1642 |
db->db_level == 0 && db->db_d.db_immediate_evict) |
|
1643 |
dbuf_evict_user(db); |
|
789 | 1644 |
|
1645 |
if (holds == 0) { |
|
1544 | 1646 |
if (db->db_blkid == DB_BONUS_BLKID) { |
1647 |
mutex_exit(&db->db_mtx); |
|
1648 |
dnode_rele(db->db_dnode, db); |
|
1649 |
} else if (db->db_buf == NULL) { |
|
1650 |
/* |
|
1651 |
* This is a special case: we never associated this |
|
1652 |
* dbuf with any data allocated from the ARC. |
|
1653 |
*/ |
|
1654 |
ASSERT3U(db->db_state, ==, DB_UNCACHED); |
|
1655 |
dbuf_evict(db); |
|
1656 |
} else if (arc_released(db->db_buf)) { |
|
1657 |
arc_buf_t *buf = db->db_buf; |
|
1658 |
/* |
|
1659 |
* This dbuf has anonymous data associated with it. |
|
1660 |
*/ |
|
1661 |
dbuf_set_data(db, NULL); |
|
1662 |
VERIFY(arc_buf_remove_ref(buf, db) == 1); |
|
1663 |
dbuf_evict(db); |
|
1664 |
} else { |
|
1665 |
VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0); |
|
1666 |
mutex_exit(&db->db_mtx); |
|
1667 |
} |
|
789 | 1668 |
} else { |
1669 |
mutex_exit(&db->db_mtx); |
|
1670 |
} |
|
1671 |
} |
|
1672 |
||
1673 |
#pragma weak dmu_buf_refcount = dbuf_refcount |
|
1674 |
uint64_t |
|
1675 |
dbuf_refcount(dmu_buf_impl_t *db) |
|
1676 |
{ |
|
1677 |
return (refcount_count(&db->db_holds)); |
|
1678 |
} |
|
1679 |
||
1680 |
void * |
|
1681 |
dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, |
|
1682 |
dmu_buf_evict_func_t *evict_func) |
|
1683 |
{ |
|
1684 |
return (dmu_buf_update_user(db_fake, NULL, user_ptr, |
|
1685 |
user_data_ptr_ptr, evict_func)); |
|
1686 |
} |
|
1687 |
||
1688 |
void * |
|
1689 |
dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, |
|
1690 |
dmu_buf_evict_func_t *evict_func) |
|
1691 |
{ |
|
1692 |
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
|
1693 |
||
1694 |
db->db_d.db_immediate_evict = TRUE; |
|
1695 |
return (dmu_buf_update_user(db_fake, NULL, user_ptr, |
|
1696 |
user_data_ptr_ptr, evict_func)); |
|
1697 |
} |
|
1698 |
||
1699 |
void * |
|
1700 |
dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr, |
|
1701 |
void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func) |
|
1702 |
{ |
|
1703 |
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
|
1704 |
ASSERT(db->db_level == 0); |
|
1705 |
||
1706 |
ASSERT((user_ptr == NULL) == (evict_func == NULL)); |
|
1707 |
||
1708 |
mutex_enter(&db->db_mtx); |
|
1709 |
||
1710 |
if (db->db_d.db_user_ptr == old_user_ptr) { |
|
1711 |
db->db_d.db_user_ptr = user_ptr; |
|
1712 |
db->db_d.db_user_data_ptr_ptr = user_data_ptr_ptr; |
|
1713 |
db->db_d.db_evict_func = evict_func; |
|
1714 |
||
1715 |
dbuf_update_data(db); |
|
1716 |
} else { |
|
1717 |
old_user_ptr = db->db_d.db_user_ptr; |
|
1718 |
} |
|
1719 |
||
1720 |
mutex_exit(&db->db_mtx); |
|
1721 |
return (old_user_ptr); |
|
1722 |
} |
|
1723 |
||
1724 |
void * |
|
1725 |
dmu_buf_get_user(dmu_buf_t *db_fake) |
|
1726 |
{ |
|
1727 |
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; |
|
1728 |
ASSERT(!refcount_is_zero(&db->db_holds)); |
|
1729 |
||
1730 |
return (db->db_d.db_user_ptr); |
|
1731 |
} |
|
1732 |
||
1733 |
void |
|
1734 |
dbuf_sync(dmu_buf_impl_t *db, zio_t *zio, dmu_tx_t *tx) |
|
1735 |
{ |
|
1736 |
arc_buf_t **data; |
|
1737 |
uint64_t txg = tx->tx_txg; |
|
1738 |
dnode_t *dn = db->db_dnode; |
|
1739 |
objset_impl_t *os = dn->dn_objset; |
|
1163
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1740 |
int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; |
1544 | 1741 |
int checksum, compress; |
1742 |
zbookmark_t zb; |
|
789 | 1743 |
int blksz; |
1744 |
||
1745 |
ASSERT(dmu_tx_is_syncing(tx)); |
|
1746 |
||
1747 |
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); |
|
1748 |
||
1749 |
mutex_enter(&db->db_mtx); |
|
1750 |
/* |
|
1751 |
* To be synced, we must be dirtied. But we |
|
1752 |
* might have been freed after the dirty. |
|
1753 |
*/ |
|
1754 |
if (db->db_state == DB_UNCACHED) { |
|
1755 |
/* This buffer has been freed since it was dirtied */ |
|
1756 |
ASSERT(db->db.db_data == NULL); |
|
1757 |
} else if (db->db_state == DB_FILL) { |
|
1758 |
/* This buffer was freed and is now being re-filled */ |
|
1759 |
ASSERT(db->db.db_data != db->db_d.db_data_old[txg&TXG_MASK]); |
|
1760 |
} else { |
|
1761 |
ASSERT3U(db->db_state, ==, DB_CACHED); |
|
1762 |
} |
|
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
1763 |
DBUF_VERIFY(db); |
789 | 1764 |
|
1765 |
/* |
|
1766 |
* Don't need a lock on db_dirty (dn_mtx), because it can't |
|
1767 |
* be modified yet. |
|
1768 |
*/ |
|
1769 |
||
1544 | 1770 |
if (db->db_blkid == DB_BONUS_BLKID) { |
1771 |
void **datap = &db->db_d.db_data_old[txg&TXG_MASK]; |
|
1772 |
/* |
|
1773 |
* Simply copy the bonus data into the dnode. It will |
|
1774 |
* be written out when the dnode is synced (and it will |
|
1775 |
* be synced, since it must have been dirty for dbuf_sync |
|
1776 |
* to be called). |
|
1777 |
*/ |
|
1778 |
/* |
|
1779 |
* Use dn_phys->dn_bonuslen since db.db_size is the length |
|
1780 |
* of the bonus buffer in the open transaction rather than |
|
1781 |
* the syncing transaction. |
|
1782 |
*/ |
|
1783 |
ASSERT(*datap != NULL); |
|
1784 |
ASSERT3U(db->db_level, ==, 0); |
|
1785 |
ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); |
|
1786 |
bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); |
|
1787 |
if (*datap != db->db.db_data) |
|
1788 |
zio_buf_free(*datap, DN_MAX_BONUSLEN); |
|
1789 |
db->db_d.db_data_old[txg&TXG_MASK] = NULL; |
|
1790 |
db->db_data_pending = NULL; |
|
1791 |
if (db->db_dirtied == txg) |
|
1792 |
db->db_dirtied = 0; |
|
1793 |
ASSERT(db->db_dirtycnt > 0); |
|
1794 |
db->db_dirtycnt -= 1; |
|
1795 |
mutex_exit(&db->db_mtx); |
|
1796 |
dbuf_rele(db, (void *)(uintptr_t)txg); |
|
1797 |
return; |
|
1798 |
} |
|
1799 |
||
789 | 1800 |
if (db->db_level == 0) { |
1544 | 1801 |
data = (arc_buf_t **)&db->db_d.db_data_old[txg&TXG_MASK]; |
789 | 1802 |
blksz = arc_buf_size(*data); |
2237 | 1803 |
|
1804 |
/* |
|
1805 |
* This buffer is in the middle of an immdiate write. |
|
1806 |
* Wait for the synchronous IO to complete. |
|
1807 |
*/ |
|
1808 |
while (db->db_d.db_overridden_by[txg&TXG_MASK] == IN_DMU_SYNC) { |
|
1809 |
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); |
|
1810 |
cv_wait(&db->db_changed, &db->db_mtx); |
|
1811 |
ASSERT(db->db_d.db_overridden_by[txg&TXG_MASK]); |
|
1812 |
} |
|
789 | 1813 |
/* |
1814 |
* If this buffer is currently "in use" (i.e., there are |
|
1815 |
* active holds and db_data still references it), then make |
|
1816 |
* a copy before we start the write so that any modifications |
|
1817 |
* from the open txg will not leak into this write. |
|
1818 |
* |
|
1819 |
* NOTE: this copy does not need to be made for objects only |
|
1820 |
* modified in the syncing context (e.g. DNONE_DNODE blocks) |
|
1821 |
* or if there is no actual write involved (bonus blocks). |
|
1822 |
*/ |
|
1544 | 1823 |
if (dn->dn_object != DMU_META_DNODE_OBJECT && |
1824 |
db->db_d.db_overridden_by[txg&TXG_MASK] == NULL) { |
|
789 | 1825 |
if (refcount_count(&db->db_holds) > 1 && |
1826 |
*data == db->db_buf) { |
|
1544 | 1827 |
*data = arc_buf_alloc(os->os_spa, blksz, db); |
789 | 1828 |
bcopy(db->db.db_data, (*data)->b_data, blksz); |
1829 |
} |
|
1830 |
db->db_data_pending = *data; |
|
1544 | 1831 |
} else if (dn->dn_object == DMU_META_DNODE_OBJECT) { |
789 | 1832 |
/* |
1833 |
* Private object buffers are released here rather |
|
1834 |
* than in dbuf_dirty() since they are only modified |
|
1835 |
* in the syncing context and we don't want the |
|
1836 |
* overhead of making multiple copies of the data. |
|
1837 |
*/ |
|
1838 |
arc_release(db->db_buf, db); |
|
1839 |
} |
|
1840 |
} else { |
|
1841 |
data = &db->db_buf; |
|
1842 |
if (*data == NULL) { |
|
1843 |
/* |
|
1844 |
* This can happen if we dirty and then free |
|
1845 |
* the level-0 data blocks in the same txg. So |
|
1846 |
* this indirect remains unchanged. |
|
1847 |
*/ |
|
1848 |
if (db->db_dirtied == txg) |
|
1849 |
db->db_dirtied = 0; |
|
1850 |
ASSERT(db->db_dirtycnt > 0); |
|
1851 |
db->db_dirtycnt -= 1; |
|
1852 |
mutex_exit(&db->db_mtx); |
|
1544 | 1853 |
dbuf_rele(db, (void *)(uintptr_t)txg); |
789 | 1854 |
return; |
1855 |
} |
|
1856 |
blksz = db->db.db_size; |
|
1857 |
ASSERT3U(blksz, ==, 1<<dn->dn_phys->dn_indblkshift); |
|
1858 |
} |
|
1859 |
||
1860 |
ASSERT(*data != NULL); |
|
1861 |
||
1544 | 1862 |
if (db->db_level > 0 && !arc_released(db->db_buf)) { |
789 | 1863 |
/* |
1864 |
* This indirect buffer was marked dirty, but |
|
1865 |
* never modified (if it had been modified, then |
|
1866 |
* we would have released the buffer). There is |
|
1867 |
* no reason to write anything. |
|
1868 |
*/ |
|
1869 |
db->db_data_pending = NULL; |
|
1870 |
if (db->db_dirtied == txg) |
|
1871 |
db->db_dirtied = 0; |
|
1872 |
ASSERT(db->db_dirtycnt > 0); |
|
1873 |
db->db_dirtycnt -= 1; |
|
1874 |
mutex_exit(&db->db_mtx); |
|
1544 | 1875 |
dbuf_rele(db, (void *)(uintptr_t)txg); |
789 | 1876 |
return; |
1877 |
} else if (db->db_blkptr == NULL && |
|
1878 |
db->db_level == dn->dn_phys->dn_nlevels-1 && |
|
1879 |
db->db_blkid < dn->dn_phys->dn_nblkptr) { |
|
1880 |
/* |
|
1881 |
* This buffer was allocated at a time when there was |
|
1882 |
* no available blkptrs from the dnode, or it was |
|
1883 |
* inappropriate to hook it in (i.e., nlevels mis-match). |
|
1884 |
*/ |
|
1885 |
ASSERT(db->db_blkptr == NULL); |
|
1886 |
ASSERT(db->db_parent == NULL); |
|
1887 |
db->db_parent = dn->dn_dbuf; |
|
1888 |
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; |
|
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
1889 |
DBUF_VERIFY(db); |
789 | 1890 |
mutex_exit(&db->db_mtx); |
1891 |
} else if (db->db_blkptr == NULL) { |
|
1892 |
dmu_buf_impl_t *parent = db->db_parent; |
|
1893 |
||
1894 |
mutex_exit(&db->db_mtx); |
|
1895 |
ASSERT(dn->dn_phys->dn_nlevels > 1); |
|
1896 |
if (parent == NULL) { |
|
1897 |
rw_enter(&dn->dn_struct_rwlock, RW_READER); |
|
1898 |
(void) dbuf_hold_impl(dn, db->db_level+1, |
|
1544 | 1899 |
db->db_blkid >> epbs, FALSE, FTAG, &parent); |
789 | 1900 |
rw_exit(&dn->dn_struct_rwlock); |
1901 |
dbuf_add_ref(parent, db); |
|
1902 |
db->db_parent = parent; |
|
1544 | 1903 |
dbuf_rele(parent, FTAG); |
789 | 1904 |
} |
1544 | 1905 |
(void) dbuf_read(parent, NULL, DB_RF_MUST_SUCCEED); |
789 | 1906 |
} else { |
1907 |
mutex_exit(&db->db_mtx); |
|
1908 |
} |
|
1909 |
||
1544 | 1910 |
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || db->db_parent != NULL); |
789 | 1911 |
|
1163
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1912 |
if (db->db_level > 0 && |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1913 |
db->db_blkid > dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)) { |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1914 |
/* |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1915 |
* Don't write indirect blocks past EOF. |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1916 |
* We get these when we truncate a file *after* dirtying |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1917 |
* blocks in the truncate range (we undirty the level 0 |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1918 |
* blocks in dbuf_free_range(), but not the indirects). |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1919 |
*/ |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1920 |
#ifdef ZFS_DEBUG |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1921 |
/* |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1922 |
* Verify that this indirect block is empty. |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1923 |
*/ |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1924 |
blkptr_t *bplist; |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1925 |
int i; |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1926 |
|
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1927 |
mutex_enter(&db->db_mtx); |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1928 |
bplist = db->db.db_data; |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1929 |
for (i = 0; i < (1 << epbs); i++) { |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1930 |
if (!BP_IS_HOLE(&bplist[i])) { |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1931 |
panic("data past EOF: " |
1199 | 1932 |
"db=%p level=%d id=%llu i=%d\n", |
1933 |
db, db->db_level, |
|
1934 |
(u_longlong_t)db->db_blkid, i); |
|
1163
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1935 |
} |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1936 |
} |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1937 |
mutex_exit(&db->db_mtx); |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1938 |
#endif |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1939 |
ASSERT(db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)); |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1940 |
mutex_enter(&db->db_mtx); |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1941 |
db->db_dirtycnt -= 1; |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1942 |
mutex_exit(&db->db_mtx); |
1544 | 1943 |
dbuf_rele(db, (void *)(uintptr_t)txg); |
1163
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1944 |
return; |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1945 |
} |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
1946 |
|
789 | 1947 |
if (db->db_parent != dn->dn_dbuf) { |
1948 |
dmu_buf_impl_t *parent = db->db_parent; |
|
1949 |
||
1950 |
mutex_enter(&db->db_mtx); |
|
1951 |
ASSERT(db->db_level == parent->db_level-1); |
|
1952 |
ASSERT(list_link_active(&parent->db_dirty_node[txg&TXG_MASK])); |
|
1953 |
/* |
|
1544 | 1954 |
* We may have read this indirect block after we dirtied it, |
789 | 1955 |
* so never released it from the cache. |
1956 |
*/ |
|
1544 | 1957 |
arc_release(parent->db_buf, db->db_parent); |
789 | 1958 |
|
1959 |
db->db_blkptr = (blkptr_t *)parent->db.db_data + |
|
1960 |
(db->db_blkid & ((1ULL << epbs) - 1)); |
|
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
1961 |
DBUF_VERIFY(db); |
789 | 1962 |
mutex_exit(&db->db_mtx); |
1963 |
#ifdef ZFS_DEBUG |
|
1544 | 1964 |
} else { |
789 | 1965 |
/* |
1966 |
* We don't need to dnode_setdirty(dn) because if we got |
|
1967 |
* here then the parent is already dirty. |
|
1968 |
*/ |
|
1969 |
ASSERT(db->db_level == dn->dn_phys->dn_nlevels-1); |
|
1970 |
ASSERT3P(db->db_blkptr, ==, |
|
1971 |
&dn->dn_phys->dn_blkptr[db->db_blkid]); |
|
1544 | 1972 |
#endif |
789 | 1973 |
} |
1544 | 1974 |
ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); |
1975 |
||
789 | 1976 |
if (db->db_level == 0 && |
1977 |
db->db_d.db_overridden_by[txg&TXG_MASK] != NULL) { |
|
1544 | 1978 |
arc_buf_t **old = |
1979 |
(arc_buf_t **)&db->db_d.db_data_old[txg&TXG_MASK]; |
|
789 | 1980 |
blkptr_t **bpp = &db->db_d.db_overridden_by[txg&TXG_MASK]; |
2082 | 1981 |
int old_size = bp_get_dasize(os->os_spa, db->db_blkptr); |
1982 |
int new_size = bp_get_dasize(os->os_spa, *bpp); |
|
789 | 1983 |
|
1984 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
|
1985 |
||
1986 |
dnode_diduse_space(dn, new_size-old_size); |
|
1987 |
mutex_enter(&dn->dn_mtx); |
|
1988 |
if (db->db_blkid > dn->dn_phys->dn_maxblkid) |
|
1989 |
dn->dn_phys->dn_maxblkid = db->db_blkid; |
|
1990 |
mutex_exit(&dn->dn_mtx); |
|
1991 |
||
1992 |
dsl_dataset_block_born(os->os_dsl_dataset, *bpp, tx); |
|
1993 |
if (!BP_IS_HOLE(db->db_blkptr)) |
|
1994 |
dsl_dataset_block_kill(os->os_dsl_dataset, |
|
1995 |
db->db_blkptr, os->os_synctx); |
|
1996 |
||
1997 |
mutex_enter(&db->db_mtx); |
|
1998 |
*db->db_blkptr = **bpp; |
|
1999 |
kmem_free(*bpp, sizeof (blkptr_t)); |
|
2000 |
*bpp = NULL; |
|
2001 |
||
2002 |
if (*old != db->db_buf) |
|
1544 | 2003 |
VERIFY(arc_buf_remove_ref(*old, db) == 1); |
2004 |
else if (!BP_IS_HOLE(db->db_blkptr)) |
|
2005 |
arc_set_callback(db->db_buf, dbuf_do_evict, db); |
|
2006 |
else |
|
2007 |
ASSERT(arc_released(db->db_buf)); |
|
789 | 2008 |
*old = NULL; |
2009 |
db->db_data_pending = NULL; |
|
2010 |
||
2011 |
cv_broadcast(&db->db_changed); |
|
2012 |
||
2013 |
ASSERT(db->db_dirtycnt > 0); |
|
2014 |
db->db_dirtycnt -= 1; |
|
2015 |
mutex_exit(&db->db_mtx); |
|
1544 | 2016 |
dbuf_rele(db, (void *)(uintptr_t)txg); |
2017 |
return; |
|
2018 |
} |
|
789 | 2019 |
|
1544 | 2020 |
if (db->db_level > 0) { |
2021 |
/* |
|
2022 |
* XXX -- we should design a compression algorithm |
|
2023 |
* that specializes in arrays of bps. |
|
2024 |
*/ |
|
2025 |
checksum = ZIO_CHECKSUM_FLETCHER_4; |
|
2026 |
compress = ZIO_COMPRESS_LZJB; |
|
2027 |
} else { |
|
2028 |
/* |
|
2029 |
* Allow dnode settings to override objset settings, |
|
2030 |
* except for metadata checksums. |
|
2031 |
*/ |
|
2032 |
if (dmu_ot[dn->dn_type].ot_metadata) { |
|
2033 |
checksum = os->os_md_checksum; |
|
2034 |
compress = zio_compress_select(dn->dn_compress, |
|
2035 |
os->os_md_compress); |
|
789 | 2036 |
} else { |
1544 | 2037 |
checksum = zio_checksum_select(dn->dn_checksum, |
2038 |
os->os_checksum); |
|
2039 |
compress = zio_compress_select(dn->dn_compress, |
|
2040 |
os->os_compress); |
|
789 | 2041 |
} |
1544 | 2042 |
} |
789 | 2043 |
#ifdef ZFS_DEBUG |
1544 | 2044 |
if (db->db_parent) { |
2045 |
ASSERT(list_link_active( |
|
2046 |
&db->db_parent->db_dirty_node[txg&TXG_MASK])); |
|
2047 |
ASSERT(db->db_parent == dn->dn_dbuf || |
|
2048 |
db->db_parent->db_level > 0); |
|
2049 |
if (dn->dn_object == DMU_META_DNODE_OBJECT || db->db_level > 0) |
|
2050 |
ASSERT(*data == db->db_buf); |
|
2051 |
} |
|
789 | 2052 |
#endif |
1544 | 2053 |
ASSERT3U(db->db_blkptr->blk_birth, <=, tx->tx_txg); |
2054 |
zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; |
|
2055 |
zb.zb_object = db->db.db_object; |
|
2056 |
zb.zb_level = db->db_level; |
|
2057 |
zb.zb_blkid = db->db_blkid; |
|
1775
e51e26b432c0
6410698 ZFS metadata needs to be more highly replicated (ditto blocks)
billm
parents:
1596
diff
changeset
|
2058 |
|
e51e26b432c0
6410698 ZFS metadata needs to be more highly replicated (ditto blocks)
billm
parents:
1596
diff
changeset
|
2059 |
(void) arc_write(zio, os->os_spa, checksum, compress, |
e51e26b432c0
6410698 ZFS metadata needs to be more highly replicated (ditto blocks)
billm
parents:
1596
diff
changeset
|
2060 |
dmu_get_replication_level(os->os_spa, &zb, dn->dn_type), txg, |
1544 | 2061 |
db->db_blkptr, *data, dbuf_write_done, db, |
2062 |
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, ARC_NOWAIT, &zb); |
|
2063 |
/* |
|
2064 |
* We can't access db after arc_write, since it could finish |
|
2065 |
* and be freed, and we have no locks on it. |
|
2066 |
*/ |
|
789 | 2067 |
} |
2068 |
||
2069 |
struct dbuf_arg { |
|
2070 |
objset_impl_t *os; |
|
2071 |
blkptr_t bp; |
|
2072 |
}; |
|
2073 |
||
2074 |
static void |
|
2075 |
dbuf_do_born(void *arg) |
|
2076 |
{ |
|
2077 |
struct dbuf_arg *da = arg; |
|
2078 |
dsl_dataset_block_born(da->os->os_dsl_dataset, |
|
2079 |
&da->bp, da->os->os_synctx); |
|
2080 |
kmem_free(da, sizeof (struct dbuf_arg)); |
|
2081 |
} |
|
2082 |
||
2083 |
static void |
|
2084 |
dbuf_do_kill(void *arg) |
|
2085 |
{ |
|
2086 |
struct dbuf_arg *da = arg; |
|
2087 |
dsl_dataset_block_kill(da->os->os_dsl_dataset, |
|
2088 |
&da->bp, da->os->os_synctx); |
|
2089 |
kmem_free(da, sizeof (struct dbuf_arg)); |
|
2090 |
} |
|
2091 |
||
2092 |
/* ARGSUSED */ |
|
2093 |
static void |
|
2094 |
dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) |
|
2095 |
{ |
|
2096 |
dmu_buf_impl_t *db = vdb; |
|
2097 |
dnode_t *dn = db->db_dnode; |
|
2098 |
objset_impl_t *os = dn->dn_objset; |
|
2099 |
uint64_t txg = zio->io_txg; |
|
2100 |
uint64_t fill = 0; |
|
2101 |
int i; |
|
2102 |
int old_size, new_size; |
|
2103 |
||
2104 |
ASSERT3U(zio->io_error, ==, 0); |
|
2105 |
||
2106 |
dprintf_dbuf_bp(db, &zio->io_bp_orig, "bp_orig: %s", ""); |
|
2107 |
||
2082 | 2108 |
old_size = bp_get_dasize(os->os_spa, &zio->io_bp_orig); |
2109 |
new_size = bp_get_dasize(os->os_spa, zio->io_bp); |
|
789 | 2110 |
|
2111 |
dnode_diduse_space(dn, new_size-old_size); |
|
2112 |
||
2113 |
mutex_enter(&db->db_mtx); |
|
2114 |
||
2237 | 2115 |
ASSERT(db->db_d.db_overridden_by[txg&TXG_MASK] == NULL); |
2116 |
||
789 | 2117 |
if (db->db_dirtied == txg) |
2118 |
db->db_dirtied = 0; |
|
2119 |
||
2120 |
if (db->db_level == 0) { |
|
1544 | 2121 |
arc_buf_t **old = |
2122 |
(arc_buf_t **)&db->db_d.db_data_old[txg&TXG_MASK]; |
|
789 | 2123 |
|
2124 |
ASSERT(db->db_blkid != DB_BONUS_BLKID); |
|
2125 |
||
2126 |
if (*old != db->db_buf) |
|
1544 | 2127 |
VERIFY(arc_buf_remove_ref(*old, db) == 1); |
2128 |
else if (!BP_IS_HOLE(db->db_blkptr)) |
|
2129 |
arc_set_callback(db->db_buf, dbuf_do_evict, db); |
|
2130 |
else |
|
2131 |
ASSERT(arc_released(db->db_buf)); |
|
789 | 2132 |
*old = NULL; |
2133 |
db->db_data_pending = NULL; |
|
2134 |
||
2135 |
mutex_enter(&dn->dn_mtx); |
|
2136 |
if (db->db_blkid > dn->dn_phys->dn_maxblkid && |
|
2137 |
!BP_IS_HOLE(db->db_blkptr)) |
|
2138 |
dn->dn_phys->dn_maxblkid = db->db_blkid; |
|
2139 |
mutex_exit(&dn->dn_mtx); |
|
2140 |
||
2141 |
if (dn->dn_type == DMU_OT_DNODE) { |
|
2142 |
dnode_phys_t *dnp = db->db.db_data; |
|
2143 |
for (i = db->db.db_size >> DNODE_SHIFT; i > 0; |
|
2144 |
i--, dnp++) { |
|
2145 |
if (dnp->dn_type != DMU_OT_NONE) |
|
2146 |
fill++; |
|
2147 |
} |
|
2148 |
} else { |
|
2149 |
if (!BP_IS_HOLE(db->db_blkptr)) |
|
2150 |
fill = 1; |
|
2151 |
} |
|
2152 |
} else { |
|
2153 |
blkptr_t *bp = db->db.db_data; |
|
2154 |
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); |
|
2155 |
if (!BP_IS_HOLE(db->db_blkptr)) { |
|
1163
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
2156 |
int epbs = |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
2157 |
dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; |
789 | 2158 |
ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, db->db.db_size); |
2159 |
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, |
|
2160 |
db->db.db_size); |
|
1163
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
2161 |
ASSERT3U(dn->dn_phys->dn_maxblkid |
4ba797920cc2
6343073 buf[i] == 0 assertion failure when running zvol_pstress
maybee
parents:
982
diff
changeset
|
2162 |
>> (db->db_level * epbs), >=, db->db_blkid); |
1544 | 2163 |
arc_set_callback(db->db_buf, dbuf_do_evict, db); |
789 | 2164 |
} |
2165 |
for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, bp++) { |
|
2166 |
if (BP_IS_HOLE(bp)) |
|
2167 |
continue; |
|
2168 |
ASSERT3U(BP_GET_LSIZE(bp), ==, |
|
2169 |
db->db_level == 1 ? dn->dn_datablksz : |
|
2170 |
(1<<dn->dn_phys->dn_indblkshift)); |
|
2171 |
fill += bp->blk_fill; |
|
2172 |
} |
|
2173 |
} |
|
2174 |
||
2175 |
if (!BP_IS_HOLE(db->db_blkptr)) { |
|
2176 |
db->db_blkptr->blk_fill = fill; |
|
2177 |
BP_SET_TYPE(db->db_blkptr, dn->dn_type); |
|
2178 |
BP_SET_LEVEL(db->db_blkptr, db->db_level); |
|
2179 |
} else { |
|
2180 |
ASSERT3U(fill, ==, 0); |
|
2181 |
ASSERT3U(db->db_blkptr->blk_fill, ==, 0); |
|
2182 |
} |
|
2183 |
||
2184 |
dprintf_dbuf_bp(db, db->db_blkptr, |
|
2185 |
"wrote %llu bytes to blkptr:", zio->io_size); |
|
2186 |
||
2187 |
ASSERT(db->db_parent == NULL || |
|
2188 |
list_link_active(&db->db_parent->db_dirty_node[txg&TXG_MASK])); |
|
2189 |
cv_broadcast(&db->db_changed); |
|
2190 |
ASSERT(db->db_dirtycnt > 0); |
|
2191 |
db->db_dirtycnt -= 1; |
|
2192 |
mutex_exit(&db->db_mtx); |
|
2193 |
||
2194 |
/* We must do this after we've set the bp's type and level */ |
|
2195 |
if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp), |
|
2196 |
BP_IDENTITY(&zio->io_bp_orig))) { |
|
2197 |
struct dbuf_arg *da; |
|
2198 |
da = kmem_alloc(sizeof (struct dbuf_arg), KM_SLEEP); |
|
2199 |
da->os = os; |
|
2200 |
da->bp = *zio->io_bp; |
|
2201 |
(void) taskq_dispatch(dbuf_tq, dbuf_do_born, da, 0); |
|
2202 |
if (!BP_IS_HOLE(&zio->io_bp_orig)) { |
|
2203 |
da = kmem_alloc(sizeof (struct dbuf_arg), KM_SLEEP); |
|
2204 |
da->os = os; |
|
2205 |
da->bp = zio->io_bp_orig; |
|
2206 |
(void) taskq_dispatch(dbuf_tq, dbuf_do_kill, da, 0); |
|
2207 |
} |
|
2208 |
} |
|
2209 |
||
1544 | 2210 |
dbuf_rele(db, (void *)(uintptr_t)txg); |
789 | 2211 |
} |