usr/src/uts/common/fs/zfs/dmu_send.c
changeset 13512 060607df0c9d
parent 13509 04570f5cbeca
child 13524 f0e12b33f77c
--- a/usr/src/uts/common/fs/zfs/dmu_send.c	Tue Nov 08 16:22:36 2011 -0500
+++ b/usr/src/uts/common/fs/zfs/dmu_send.c	Tue Nov 08 17:01:06 2011 -0500
@@ -23,7 +23,6 @@
  */
 /*
  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
- * Copyright (c) 2011 by Delphix. All rights reserved.
  */
 
 #include <sys/dmu.h>
@@ -48,9 +47,6 @@
 #include <sys/ddt.h>
 #include <sys/zfs_onexit.h>
 
-/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
-int zfs_send_corrupt_data = B_FALSE;
-
 static char *dmu_recv_tag = "dmu_recv_tag";
 
 /*
@@ -372,20 +368,8 @@
 
 		if (dsl_read(NULL, spa, bp, pbuf,
 		    arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
-		    ZIO_FLAG_CANFAIL, &aflags, zb) != 0) {
-			if (zfs_send_corrupt_data) {
-				/* Send a block filled with 0x"zfs badd bloc" */
-				abuf = arc_buf_alloc(spa, blksz, &abuf,
-				    ARC_BUFC_DATA);
-				uint64_t *ptr;
-				for (ptr = abuf->b_data;
-				    (char *)ptr < (char *)abuf->b_data + blksz;
-				    ptr++)
-					*ptr = 0x2f5baddb10c;
-			} else {
-				return (EIO);
-			}
-		}
+		    ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
+			return (EIO);
 
 		err = dump_data(ba, type, zb->zb_object, zb->zb_blkid * blksz,
 		    blksz, bp, abuf->b_data);
@@ -514,86 +498,6 @@
 	return (0);
 }
 
-int
-dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
-    uint64_t *sizep)
-{
-	dsl_dataset_t *ds = tosnap->os_dsl_dataset;
-	dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
-	dsl_pool_t *dp = ds->ds_dir->dd_pool;
-	int err;
-	uint64_t size;
-
-	/* tosnap must be a snapshot */
-	if (ds->ds_phys->ds_next_snap_obj == 0)
-		return (EINVAL);
-
-	/* fromsnap must be an earlier snapshot from the same fs as tosnap */
-	if (fromds && (ds->ds_dir != fromds->ds_dir ||
-	    fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
-		return (EXDEV);
-
-	if (fromorigin) {
-		if (fromsnap)
-			return (EINVAL);
-
-		if (dsl_dir_is_clone(ds->ds_dir)) {
-			rw_enter(&dp->dp_config_rwlock, RW_READER);
-			err = dsl_dataset_hold_obj(dp,
-			    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
-			rw_exit(&dp->dp_config_rwlock);
-			if (err)
-				return (err);
-		} else {
-			fromorigin = B_FALSE;
-		}
-	}
-
-	/* Get uncompressed size estimate of changed data. */
-	if (fromds == NULL) {
-		size = ds->ds_phys->ds_uncompressed_bytes;
-	} else {
-		uint64_t used, comp;
-		err = dsl_dataset_space_written(fromds, ds,
-		    &used, &comp, &size);
-		if (fromorigin)
-			dsl_dataset_rele(fromds, FTAG);
-		if (err)
-			return (err);
-	}
-
-	/*
-	 * Assume that space (both on-disk and in-stream) is dominated by
-	 * data.  We will adjust for indirect blocks and the copies property,
-	 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
-	 */
-
-	/*
-	 * Subtract out approximate space used by indirect blocks.
-	 * Assume most space is used by data blocks (non-indirect, non-dnode).
-	 * Assume all blocks are recordsize.  Assume ditto blocks and
-	 * internal fragmentation counter out compression.
-	 *
-	 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
-	 * block, which we observe in practice.
-	 */
-	uint64_t recordsize;
-	rw_enter(&dp->dp_config_rwlock, RW_READER);
-	err = dsl_prop_get_ds(ds, "recordsize",
-	    sizeof (recordsize), 1, &recordsize, NULL);
-	rw_exit(&dp->dp_config_rwlock);
-	if (err)
-		return (err);
-	size -= size / recordsize * sizeof (blkptr_t);
-
-	/* Add in the space for the record associated with each block. */
-	size += size / recordsize * sizeof (dmu_replay_record_t);
-
-	*sizep = size;
-
-	return (0);
-}
-
 struct recvbeginsyncarg {
 	const char *tofs;
 	const char *tosnap;