PSARC 2009/497 zfs checksum ereport payload additions
authorJonathan Adams <Jonathan.Adams@Sun.COM>
Tue, 22 Sep 2009 17:11:54 -0700
changeset 10614 4f397871da47
parent 10613 97ab7c18ce44
child 10615 4bb212e117c7
PSARC 2009/497 zfs checksum ereport payload additions 6867188 zfs checksum ereports could be more informative
usr/src/uts/common/fs/zfs/sys/zio.h
usr/src/uts/common/fs/zfs/sys/zio_checksum.h
usr/src/uts/common/fs/zfs/vdev_disk.c
usr/src/uts/common/fs/zfs/vdev_mirror.c
usr/src/uts/common/fs/zfs/vdev_raidz.c
usr/src/uts/common/fs/zfs/zfs_fm.c
usr/src/uts/common/fs/zfs/zio.c
usr/src/uts/common/fs/zfs/zio_checksum.c
usr/src/uts/common/sys/fm/fs/zfs.h
--- a/usr/src/uts/common/fs/zfs/sys/zio.h	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/fs/zfs/sys/zio.h	Tue Sep 22 17:11:54 2009 -0700
@@ -243,6 +243,39 @@
 	uint8_t			zp_ndvas;
 } zio_prop_t;
 
+typedef struct zio_cksum_report zio_cksum_report_t;
+
+typedef void zio_cksum_finish_f(zio_cksum_report_t *rep,
+    const void *good_data);
+typedef void zio_cksum_free_f(void *cbdata, size_t size);
+
+struct zio_bad_cksum;				/* defined in zio_checksum.h */
+
+struct zio_cksum_report {
+	struct zio_cksum_report *zcr_next;
+	nvlist_t		*zcr_ereport;
+	nvlist_t		*zcr_detector;
+
+	void			*zcr_cbdata;
+	size_t			zcr_cbinfo;	/* passed to zcr_free() */
+	uint64_t		zcr_length;
+	zio_cksum_finish_f	*zcr_finish;
+	zio_cksum_free_f	*zcr_free;
+
+	/* internal use only */
+	struct zio_bad_cksum	*zcr_ckinfo;	/* information from failure */
+};
+
+typedef void zio_vsd_cksum_report_f(zio_t *zio, zio_cksum_report_t *zcr,
+    void *arg);
+
+zio_vsd_cksum_report_f	zio_vsd_default_cksum_report;
+
+typedef struct zio_vsd_ops {
+	zio_done_func_t		*vsd_free;
+	zio_vsd_cksum_report_f	*vsd_cksum_report;
+} zio_vsd_ops_t;
+
 typedef struct zio_gang_node {
 	zio_gbh_phys_t		*gn_gbh;
 	struct zio_gang_node	*gn_child[SPA_GBH_NBLKPTRS];
@@ -313,7 +346,8 @@
 	/* Stuff for the vdev stack */
 	vdev_t		*io_vd;
 	void		*io_vsd;
-	zio_done_func_t	*io_vsd_free;
+	const zio_vsd_ops_t *io_vsd_ops;
+
 	uint64_t	io_offset;
 	uint64_t	io_deadline;
 	avl_node_t	io_offset_node;
@@ -339,6 +373,7 @@
 	kcondvar_t	io_cv;
 
 	/* FMA state */
+	zio_cksum_report_t *io_cksum_report;
 	uint64_t	io_ena;
 };
 
@@ -447,6 +482,22 @@
 extern int zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error);
 extern int zio_handle_label_injection(zio_t *zio, int error);
 
+/*
+ * Checksum ereport functions
+ */
+extern void zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, struct zio *zio,
+    uint64_t offset, uint64_t length, void *arg, struct zio_bad_cksum *info);
+extern void zfs_ereport_finish_checksum(zio_cksum_report_t *report,
+    const void *good_data, const void *bad_data, boolean_t drop_if_identical);
+
+extern void zfs_ereport_send_interim_checksum(zio_cksum_report_t *report);
+extern void zfs_ereport_free_checksum(zio_cksum_report_t *report);
+
+/* If we have the good data in hand, this function can be used */
+extern void zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
+    struct zio *zio, uint64_t offset, uint64_t length,
+    const void *good_data, const void *bad_data, struct zio_bad_cksum *info);
+
 #ifdef	__cplusplus
 }
 #endif
--- a/usr/src/uts/common/fs/zfs/sys/zio_checksum.h	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/fs/zfs/sys/zio_checksum.h	Tue Sep 22 17:11:54 2009 -0700
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -47,6 +47,15 @@
 	char		*ci_name;	/* descriptive name */
 } zio_checksum_info_t;
 
+typedef struct zio_bad_cksum {
+	zio_cksum_t		zbc_expected;
+	zio_cksum_t		zbc_actual;
+	const char		*zbc_checksum_name;
+	uint8_t			zbc_byteswapped;
+	uint8_t			zbc_injected;
+	uint8_t			zbc_has_cksum;	/* expected/actual valid */
+} zio_bad_cksum_t;
+
 extern zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS];
 
 /*
@@ -64,7 +73,7 @@
 
 extern void zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
     void *data, uint64_t size);
-extern int zio_checksum_error(zio_t *zio);
+extern int zio_checksum_error(zio_t *zio, zio_bad_cksum_t *out);
 
 #ifdef	__cplusplus
 }
--- a/usr/src/uts/common/fs/zfs/vdev_disk.c	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/fs/zfs/vdev_disk.c	Tue Sep 22 17:11:54 2009 -0700
@@ -318,6 +318,11 @@
 	kmem_free(zio->io_vsd, sizeof (struct dk_callback));
 }
 
+static const zio_vsd_ops_t vdev_disk_vsd_ops = {
+	vdev_disk_ioctl_free,
+	zio_vsd_default_cksum_report
+};
+
 static void
 vdev_disk_ioctl_done(void *zio_arg, int error)
 {
@@ -358,7 +363,7 @@
 			}
 
 			zio->io_vsd = dkc = kmem_alloc(sizeof (*dkc), KM_SLEEP);
-			zio->io_vsd_free = vdev_disk_ioctl_free;
+			zio->io_vsd_ops = &vdev_disk_vsd_ops;
 
 			dkc->dkc_callback = vdev_disk_ioctl_done;
 			dkc->dkc_flag = FLUSH_VOLATILE;
--- a/usr/src/uts/common/fs/zfs/vdev_mirror.c	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/fs/zfs/vdev_mirror.c	Tue Sep 22 17:11:54 2009 -0700
@@ -60,6 +60,11 @@
 	kmem_free(mm, offsetof(mirror_map_t, mm_child[mm->mm_children]));
 }
 
+static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
+	vdev_mirror_map_free,
+	zio_vsd_default_cksum_report
+};
+
 static mirror_map_t *
 vdev_mirror_map_alloc(zio_t *zio)
 {
@@ -117,7 +122,7 @@
 	}
 
 	zio->io_vsd = mm;
-	zio->io_vsd_free = vdev_mirror_map_free;
+	zio->io_vsd_ops = &vdev_mirror_vsd_ops;
 	return (mm);
 }
 
--- a/usr/src/uts/common/fs/zfs/vdev_raidz.c	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/fs/zfs/vdev_raidz.c	Tue Sep 22 17:11:54 2009 -0700
@@ -103,6 +103,7 @@
 	uint64_t rc_offset;		/* device offset */
 	uint64_t rc_size;		/* I/O size */
 	void *rc_data;			/* I/O data */
+	void *rc_gdata;			/* used to store the "good" version */
 	int rc_error;			/* I/O error for this device */
 	uint8_t rc_tried;		/* Did we attempt this I/O column? */
 	uint8_t rc_skipped;		/* Did we skip this I/O column? */
@@ -118,6 +119,11 @@
 	uint64_t rm_firstdatacol;	/* First data column/parity count */
 	uint64_t rm_nskip;		/* Skipped sectors for padding */
 	uint64_t rm_skipstart;	/* Column index of padding start */
+	void *rm_datacopy;		/* rm_asize-buffer of copied data */
+	uintptr_t rm_reports;		/* # of referencing checksum reports */
+	uint8_t	rm_freed;		/* map no longer has referencing ZIO */
+	uint8_t	rm_ecksuminjected;	/* checksum error was injected */
+	uint64_t rm_skipped;		/* Skipped sectors for padding */
 	raidz_col_t rm_col[1];		/* Flexible array of I/O columns */
 } raidz_map_t;
 
@@ -227,6 +233,8 @@
 	0x74, 0xd6, 0xf4, 0xea, 0xa8, 0x50, 0x58, 0xaf,
 };
 
+static void vdev_raidz_generate_parity(raidz_map_t *rm);
+
 /*
  * Multiply a given number by 2 raised to the given power.
  */
@@ -247,17 +255,184 @@
 }
 
 static void
-vdev_raidz_map_free(zio_t *zio)
+vdev_raidz_map_free(raidz_map_t *rm)
 {
-	raidz_map_t *rm = zio->io_vsd;
 	int c;
+	size_t size = rm->rm_asize;	/* will hold data-size after the loop */
 
-	for (c = 0; c < rm->rm_firstdatacol; c++)
+	for (c = 0; c < rm->rm_firstdatacol; c++) {
+		size -= rm->rm_col[c].rc_size;
+
 		zio_buf_free(rm->rm_col[c].rc_data, rm->rm_col[c].rc_size);
 
+		if (rm->rm_col[c].rc_gdata != NULL)
+			zio_buf_free(rm->rm_col[c].rc_gdata,
+			    rm->rm_col[c].rc_size);
+	}
+
+	if (rm->rm_datacopy != NULL)
+		zio_buf_free(rm->rm_datacopy, size);
+
 	kmem_free(rm, offsetof(raidz_map_t, rm_col[rm->rm_scols]));
 }
 
+static void
+vdev_raidz_map_free_vsd(zio_t *zio)
+{
+	raidz_map_t *rm = zio->io_vsd;
+
+	ASSERT3U(rm->rm_freed, ==, 0);
+	rm->rm_freed = 1;
+
+	if (rm->rm_reports == 0)
+		vdev_raidz_map_free(rm);
+}
+
+/*ARGSUSED*/
+static void
+vdev_raidz_cksum_free(void *arg, size_t ignored)
+{
+	raidz_map_t *rm = arg;
+
+	ASSERT3U(rm->rm_reports, >, 0);
+	ASSERT3U(rm->rm_freed, !=, 0);
+
+	if (--rm->rm_reports == 0)
+		vdev_raidz_map_free(rm);
+}
+
+static void
+vdev_raidz_cksum_finish(zio_cksum_report_t *zcr, const void *good_data)
+{
+	raidz_map_t *rm = zcr->zcr_cbdata;
+	size_t c = zcr->zcr_cbinfo;
+	size_t x;
+
+	const char *good = NULL;
+	const char *bad = rm->rm_col[c].rc_data;
+
+	if (good_data == NULL) {
+		zfs_ereport_finish_checksum(zcr, NULL, NULL, B_FALSE);
+		return;
+	}
+
+	if (c < rm->rm_firstdatacol) {
+		/*
+		 * The first time through, calculate the parity blocks for
+		 * the good data (this relies on the fact that the good
+		 * data never changes for a given logical ZIO)
+		 */
+		if (rm->rm_col[0].rc_gdata == NULL) {
+			char *bad_parity[VDEV_RAIDZ_MAXPARITY];
+			char *buf;
+
+			/*
+			 * Set up the rm_col[]s to generate the parity for
+			 * good_data, first saving the parity bufs and
+			 * replacing them with buffers to hold the result.
+			 */
+			for (x = 0; x < rm->rm_firstdatacol; x++) {
+				bad_parity[x] = rm->rm_col[x].rc_data;
+				rm->rm_col[x].rc_data = rm->rm_col[x].rc_gdata =
+				    zio_buf_alloc(rm->rm_col[x].rc_size);
+			}
+
+			/* fill in the data columns from good_data */
+			buf = (char *)good_data;
+			for (; x < rm->rm_cols; x++) {
+				rm->rm_col[x].rc_data = buf;
+				buf += rm->rm_col[x].rc_size;
+			}
+
+			/*
+			 * Construct the parity from the good data.
+			 */
+			vdev_raidz_generate_parity(rm);
+
+			/* restore everything back to its original state */
+			for (x = 0; x < rm->rm_firstdatacol; x++)
+				rm->rm_col[x].rc_data = bad_parity[x];
+
+			buf = rm->rm_datacopy;
+			for (x = rm->rm_firstdatacol; x < rm->rm_cols; x++) {
+				rm->rm_col[x].rc_data = buf;
+				buf += rm->rm_col[x].rc_size;
+			}
+		}
+
+		ASSERT3P(rm->rm_col[c].rc_gdata, !=, NULL);
+		good = rm->rm_col[c].rc_gdata;
+	} else {
+		/* adjust good_data to point at the start of our column */
+		good = good_data;
+
+		for (x = rm->rm_firstdatacol; x < c; x++)
+			good += rm->rm_col[x].rc_size;
+	}
+
+	/* we drop the ereport if it ends up that the data was good */
+	zfs_ereport_finish_checksum(zcr, good, bad, B_TRUE);
+}
+
+/*
+ * Invoked indirectly by zfs_ereport_start_checksum(), called
+ * below when our read operation fails completely.  The main point
+ * is to keep a copy of everything we read from disk, so that at
+ * vdev_raidz_cksum_finish() time we can compare it with the good data.
+ */
+static void
+vdev_raidz_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *arg)
+{
+	size_t c = (size_t)(uintptr_t)arg;
+	caddr_t buf;
+
+	raidz_map_t *rm = zio->io_vsd;
+	size_t size;
+
+	/* set up the report and bump the refcount  */
+	zcr->zcr_cbdata = rm;
+	zcr->zcr_cbinfo = c;
+	zcr->zcr_finish = vdev_raidz_cksum_finish;
+	zcr->zcr_free = vdev_raidz_cksum_free;
+
+	rm->rm_reports++;
+	ASSERT3U(rm->rm_reports, >, 0);
+
+	if (rm->rm_reports != 1)
+		return;
+
+	/*
+	 * It's the first time we're called, so we need to copy the data
+	 * aside; there's no guarantee that our zio's buffer won't be
+	 * re-used for something else.
+	 *
+	 * Our parity data is already in seperate buffers, so there's no need
+	 * to copy them.
+	 */
+	ASSERT3P(rm->rm_datacopy, ==, NULL);
+
+	/* rm_asize includes the parity blocks; subtract them out */
+	size = rm->rm_asize;
+	for (c = 0; c < rm->rm_firstdatacol; c++)
+		size -= rm->rm_col[c].rc_size;
+
+	buf = rm->rm_datacopy = zio_buf_alloc(size);
+	for (; c < rm->rm_cols; c++) {
+		raidz_col_t *col = &rm->rm_col[c];
+
+		bcopy(col->rc_data, buf, col->rc_size);
+		col->rc_data = buf;
+
+		buf += col->rc_size;
+	}
+	ASSERT3P(buf - (caddr_t)rm->rm_datacopy, ==, size);
+}
+
+static const zio_vsd_ops_t vdev_raidz_vsd_ops = {
+	vdev_raidz_map_free_vsd,
+	vdev_raidz_cksum_report
+};
+
 static raidz_map_t *
 vdev_raidz_map_alloc(zio_t *zio, uint64_t unit_shift, uint64_t dcols,
     uint64_t nparity)
@@ -293,6 +468,10 @@
 	rm->rm_missingdata = 0;
 	rm->rm_missingparity = 0;
 	rm->rm_firstdatacol = nparity;
+	rm->rm_datacopy = NULL;
+	rm->rm_reports = 0;
+	rm->rm_freed = 0;
+	rm->rm_ecksuminjected = 0;
 
 	asize = 0;
 
@@ -306,6 +485,7 @@
 		rm->rm_col[c].rc_devidx = col;
 		rm->rm_col[c].rc_offset = coff;
 		rm->rm_col[c].rc_data = NULL;
+		rm->rm_col[c].rc_gdata = NULL;
 		rm->rm_col[c].rc_error = 0;
 		rm->rm_col[c].rc_tried = 0;
 		rm->rm_col[c].rc_skipped = 0;
@@ -371,7 +551,7 @@
 	}
 
 	zio->io_vsd = rm;
-	zio->io_vsd_free = vdev_raidz_map_free;
+	zio->io_vsd_ops = &vdev_raidz_vsd_ops;
 	return (rm);
 }
 
@@ -1431,19 +1611,42 @@
  * Report a checksum error for a child of a RAID-Z device.
  */
 static void
-raidz_checksum_error(zio_t *zio, raidz_col_t *rc)
+raidz_checksum_error(zio_t *zio, raidz_col_t *rc, void *bad_data)
 {
 	vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
 
 	if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
+		zio_bad_cksum_t zbc;
+		raidz_map_t *rm = zio->io_vsd;
+
 		mutex_enter(&vd->vdev_stat_lock);
 		vd->vdev_stat.vs_checksum_errors++;
 		mutex_exit(&vd->vdev_stat_lock);
+
+		zbc.zbc_has_cksum = 0;
+		zbc.zbc_injected = rm->rm_ecksuminjected;
+
+		zfs_ereport_post_checksum(zio->io_spa, vd, zio,
+		    rc->rc_offset, rc->rc_size, rc->rc_data, bad_data,
+		    &zbc);
 	}
+}
 
-	if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE))
-		zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM,
-		    zio->io_spa, vd, zio, rc->rc_offset, rc->rc_size);
+/*
+ * We keep track of whether or not there were any injected errors, so that
+ * any ereports we generate can note it.
+ */
+static int
+raidz_checksum_verify(zio_t *zio)
+{
+	zio_bad_cksum_t zbc;
+	raidz_map_t *rm = zio->io_vsd;
+
+	int ret = zio_checksum_error(zio, &zbc);
+	if (ret != 0 && zbc.zbc_injected != 0)
+		rm->rm_ecksuminjected = 1;
+
+	return (ret);
 }
 
 /*
@@ -1474,7 +1677,7 @@
 		if (!rc->rc_tried || rc->rc_error != 0)
 			continue;
 		if (bcmp(orig[c], rc->rc_data, rc->rc_size) != 0) {
-			raidz_checksum_error(zio, rc);
+			raidz_checksum_error(zio, rc, orig[c]);
 			rc->rc_error = ECKSUM;
 			ret++;
 		}
@@ -1589,19 +1792,16 @@
 			 * success.
 			 */
 			code = vdev_raidz_reconstruct(rm, tgts, n);
-			if (zio_checksum_error(zio) == 0) {
+			if (raidz_checksum_verify(zio) == 0) {
 				atomic_inc_64(&raidz_corrected[code]);
 
 				for (i = 0; i < n; i++) {
 					c = tgts[i];
 					rc = &rm->rm_col[c];
 					ASSERT(rc->rc_error == 0);
-					if (rc->rc_tried) {
-						if (bcmp(orig[i], rc->rc_data,
-						    rc->rc_size) == 0)
-							continue;
-						raidz_checksum_error(zio, rc);
-					}
+					if (rc->rc_tried)
+						raidz_checksum_error(zio, rc,
+						    orig[i]);
 					rc->rc_error = ECKSUM;
 				}
 
@@ -1738,7 +1938,7 @@
 	 */
 	if (total_errors <= rm->rm_firstdatacol - parity_untried) {
 		if (data_errors == 0) {
-			if (zio_checksum_error(zio) == 0) {
+			if (raidz_checksum_verify(zio) == 0) {
 				/*
 				 * If we read parity information (unnecessarily
 				 * as it happens since no reconstruction was
@@ -1784,7 +1984,7 @@
 
 			code = vdev_raidz_reconstruct(rm, tgts, n);
 
-			if (zio_checksum_error(zio) == 0) {
+			if (raidz_checksum_verify(zio) == 0) {
 				atomic_inc_64(&raidz_corrected[code]);
 
 				/*
@@ -1853,18 +2053,11 @@
 	 * reconstruction over all possible combinations. If that fails,
 	 * we're cooked.
 	 */
-	if (total_errors >= rm->rm_firstdatacol) {
+	if (total_errors > rm->rm_firstdatacol) {
 		zio->io_error = vdev_raidz_worst_error(rm);
-		/*
-		 * If there were exactly as many device errors as parity
-		 * columns, yet we couldn't reconstruct the data, then at
-		 * least one device must have returned bad data silently.
-		 */
-		if (total_errors == rm->rm_firstdatacol)
-			zio->io_error = zio_worst_error(zio->io_error, ECKSUM);
 
-	} else if ((code = vdev_raidz_combrec(zio, total_errors,
-	    data_errors)) != 0) {
+	} else if (total_errors < rm->rm_firstdatacol &&
+	    (code = vdev_raidz_combrec(zio, total_errors, data_errors)) != 0) {
 		/*
 		 * If we didn't use all the available parity for the
 		 * combinatorial reconstruction, verify that the remaining
@@ -1874,17 +2067,30 @@
 			(void) raidz_parity_verify(zio, rm);
 	} else {
 		/*
-		 * All combinations failed to checksum. Generate checksum
-		 * ereports for all children.
+		 * We're here because either:
+		 *
+		 *	total_errors == rm_first_datacol, or
+		 *	vdev_raidz_combrec() failed
+		 *
+		 * In either case, there is enough bad data to prevent
+		 * reconstruction.
+		 *
+		 * Start checksum ereports for all children which haven't
+		 * failed.
 		 */
 		zio->io_error = ECKSUM;
 
-		if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
-			for (c = 0; c < rm->rm_cols; c++) {
-				rc = &rm->rm_col[c];
-				zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM,
+		for (c = 0; c < rm->rm_cols; c++) {
+			rc = &rm->rm_col[c];
+			if (rc->rc_error == 0) {
+				zio_bad_cksum_t zbc;
+				zbc.zbc_has_cksum = 0;
+				zbc.zbc_injected = rm->rm_ecksuminjected;
+
+				zfs_ereport_start_checksum(
 				    zio->io_spa, vd->vdev_child[rc->rc_devidx],
-				    zio, rc->rc_offset, rc->rc_size);
+				    zio, rc->rc_offset, rc->rc_size,
+				    (void *)(uintptr_t)c, &zbc);
 			}
 		}
 	}
--- a/usr/src/uts/common/fs/zfs/zfs_fm.c	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/fs/zfs/zfs_fm.c	Tue Sep 22 17:11:54 2009 -0700
@@ -28,6 +28,7 @@
 #include <sys/vdev.h>
 #include <sys/vdev_impl.h>
 #include <sys/zio.h>
+#include <sys/zio_checksum.h>
 
 #include <sys/fm/fs/zfs.h>
 #include <sys/fm/protocol.h>
@@ -87,13 +88,23 @@
  * this pointer is set to NULL, and no ereport will be generated (since it
  * doesn't actually correspond to any particular device or piece of data,
  * and the caller will always retry without caching or queueing anyway).
+ *
+ * For checksum errors, we want to include more information about the actual
+ * error which occurs.  Accordingly, we build an ereport when the error is
+ * noticed, but instead of sending it in immediately, we hang it off of the
+ * io_cksum_report field of the logical IO.  When the logical IO completes
+ * (successfully or not), zfs_ereport_finish_checksum() is called with the
+ * good and bad versions of the buffer (if available), and we annotate the
+ * ereport with information about the differences.
  */
-void
-zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
+#ifdef _KERNEL
+static void
+zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
+    const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
     uint64_t stateoroffset, uint64_t size)
 {
-#ifdef _KERNEL
 	nvlist_t *ereport, *detector;
+
 	uint64_t ena;
 	char class[64];
 
@@ -331,6 +342,336 @@
 	}
 	mutex_exit(&spa->spa_errlist_lock);
 
+	*ereport_out = ereport;
+	*detector_out = detector;
+}
+
+/* if it's <= 128 bytes, save the corruption directly */
+#define	ZFM_MAX_INLINE		(128 / sizeof (uint64_t))
+
+#define	MAX_RANGES		16
+
+typedef struct zfs_ecksum_info {
+	/* histograms of set and cleared bits by bit number in a 64-bit word */
+	uint16_t zei_histogram_set[sizeof (uint64_t) * NBBY];
+	uint16_t zei_histogram_cleared[sizeof (uint64_t) * NBBY];
+
+	/* inline arrays of bits set and cleared. */
+	uint64_t zei_bits_set[ZFM_MAX_INLINE];
+	uint64_t zei_bits_cleared[ZFM_MAX_INLINE];
+
+	/*
+	 * for each range, the number of bits set and cleared.  The Hamming
+	 * distance between the good and bad buffers is the sum of them all.
+	 */
+	uint32_t zei_range_sets[MAX_RANGES];
+	uint32_t zei_range_clears[MAX_RANGES];
+
+	struct zei_ranges {
+		uint32_t	zr_start;
+		uint32_t	zr_end;
+	} zei_ranges[MAX_RANGES];
+
+	size_t	zei_range_count;
+	uint32_t zei_mingap;
+	uint32_t zei_allowed_mingap;
+
+} zfs_ecksum_info_t;
+
+static void
+update_histogram(uint64_t value_arg, uint16_t *hist, uint32_t *count)
+{
+	size_t i;
+	size_t bits = 0;
+	uint64_t value = BE_64(value_arg);
+
+	/* We store the bits in big-endian (largest-first) order */
+	for (i = 0; i < 64; i++) {
+		if (value & (1ull << i)) {
+			hist[63 - i]++;
+			++bits;
+		}
+	}
+	/* update the count of bits changed */
+	*count += bits;
+}
+
+/*
+ * We've now filled up the range array, and need to increase "mingap" and
+ * shrink the range list accordingly.  zei_mingap is always the smallest
+ * distance between array entries, so we set the new_allowed_gap to be
+ * one greater than that.  We then go through the list, joining together
+ * any ranges which are closer than the new_allowed_gap.
+ *
+ * By construction, there will be at least one.  We also update zei_mingap
+ * to the new smallest gap, to prepare for our next invocation.
+ */
+static void
+shrink_ranges(zfs_ecksum_info_t *eip)
+{
+	uint32_t mingap = UINT32_MAX;
+	uint32_t new_allowed_gap = eip->zei_mingap + 1;
+
+	size_t idx, output;
+	size_t max = eip->zei_range_count;
+
+	struct zei_ranges *r = eip->zei_ranges;
+
+	ASSERT3U(eip->zei_range_count, >, 0);
+	ASSERT3U(eip->zei_range_count, <=, MAX_RANGES);
+
+	output = idx = 0;
+	while (idx < max - 1) {
+		uint32_t start = r[idx].zr_start;
+		uint32_t end = r[idx].zr_end;
+
+		while (idx < max - 1) {
+			idx++;
+
+			uint32_t nstart = r[idx].zr_start;
+			uint32_t nend = r[idx].zr_end;
+
+			uint32_t gap = nstart - end;
+			if (gap < new_allowed_gap) {
+				end = nend;
+				continue;
+			}
+			if (gap < mingap)
+				mingap = gap;
+			break;
+		}
+		r[output].zr_start = start;
+		r[output].zr_end = end;
+		output++;
+	}
+	ASSERT3U(output, <, eip->zei_range_count);
+	eip->zei_range_count = output;
+	eip->zei_mingap = mingap;
+	eip->zei_allowed_mingap = new_allowed_gap;
+}
+
+static void
+add_range(zfs_ecksum_info_t *eip, int start, int end)
+{
+	struct zei_ranges *r = eip->zei_ranges;
+	size_t count = eip->zei_range_count;
+
+	if (count >= MAX_RANGES) {
+		shrink_ranges(eip);
+		count = eip->zei_range_count;
+	}
+	if (count == 0) {
+		eip->zei_mingap = UINT32_MAX;
+		eip->zei_allowed_mingap = 1;
+	} else {
+		int gap = start - r[count - 1].zr_end;
+
+		if (gap < eip->zei_allowed_mingap) {
+			r[count - 1].zr_end = end;
+			return;
+		}
+		if (gap < eip->zei_mingap)
+			eip->zei_mingap = gap;
+	}
+	r[count].zr_start = start;
+	r[count].zr_end = end;
+	eip->zei_range_count++;
+}
+
+static size_t
+range_total_size(zfs_ecksum_info_t *eip)
+{
+	struct zei_ranges *r = eip->zei_ranges;
+	size_t count = eip->zei_range_count;
+	size_t result = 0;
+	size_t idx;
+
+	for (idx = 0; idx < count; idx++)
+		result += (r[idx].zr_end - r[idx].zr_start);
+
+	return (result);
+}
+
+static zfs_ecksum_info_t *
+annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
+    const uint8_t *goodbuf, const uint8_t *badbuf, size_t size,
+    boolean_t drop_if_identical)
+{
+	const uint64_t *good = (const uint64_t *)goodbuf;
+	const uint64_t *bad = (const uint64_t *)badbuf;
+
+	uint64_t allset = 0;
+	uint64_t allcleared = 0;
+
+	size_t nui64s = size / sizeof (uint64_t);
+
+	size_t inline_size;
+	int no_inline = 0;
+	size_t idx;
+	size_t range;
+
+	size_t offset = 0;
+	ssize_t start = -1;
+
+	zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
+
+	/* don't do any annotation for injected checksum errors */
+	if (info != NULL && info->zbc_injected)
+		return (eip);
+
+	if (info != NULL && info->zbc_has_cksum) {
+		fm_payload_set(ereport,
+		    FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED,
+		    DATA_TYPE_UINT64_ARRAY,
+		    sizeof (info->zbc_expected) / sizeof (uint64_t),
+		    (uint64_t *)&info->zbc_expected,
+		    FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL,
+		    DATA_TYPE_UINT64_ARRAY,
+		    sizeof (info->zbc_actual) / sizeof (uint64_t),
+		    (uint64_t *)&info->zbc_actual,
+		    FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO,
+		    DATA_TYPE_STRING,
+		    info->zbc_checksum_name,
+		    NULL);
+
+		if (info->zbc_byteswapped) {
+			fm_payload_set(ereport,
+			    FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP,
+			    DATA_TYPE_BOOLEAN, 1,
+			    NULL);
+		}
+	}
+
+	if (badbuf == NULL || goodbuf == NULL)
+		return (eip);
+
+	ASSERT3U(nui64s, <=, UINT16_MAX);
+	ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
+	ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
+	ASSERT3U(size, <=, UINT32_MAX);
+
+	/* build up the range list by comparing the two buffers. */
+	for (idx = 0; idx < nui64s; idx++) {
+		if (good[idx] == bad[idx]) {
+			if (start == -1)
+				continue;
+
+			add_range(eip, start, idx);
+			start = -1;
+		} else {
+			if (start != -1)
+				continue;
+
+			start = idx;
+		}
+	}
+	if (start != -1)
+		add_range(eip, start, idx);
+
+	/* See if it will fit in our inline buffers */
+	inline_size = range_total_size(eip);
+	if (inline_size > ZFM_MAX_INLINE)
+		no_inline = 1;
+
+	/*
+	 * If there is no change and we want to drop if the buffers are
+	 * identical, do so.
+	 */
+	if (inline_size == 0 && drop_if_identical) {
+		kmem_free(eip, sizeof (*eip));
+		return (NULL);
+	}
+
+	/*
+	 * Now walk through the ranges, filling in the details of the
+	 * differences.  Also convert our uint64_t-array offsets to byte
+	 * offsets.
+	 */
+	for (range = 0; range < eip->zei_range_count; range++) {
+		size_t start = eip->zei_ranges[range].zr_start;
+		size_t end = eip->zei_ranges[range].zr_end;
+
+		for (idx = start; idx < end; idx++) {
+			uint64_t set, cleared;
+
+			// bits set in bad, but not in good
+			set = ((~good[idx]) & bad[idx]);
+			// bits set in good, but not in bad
+			cleared = (good[idx] & (~bad[idx]));
+
+			allset |= set;
+			allcleared |= cleared;
+
+			if (!no_inline) {
+				ASSERT3U(offset, <, inline_size);
+				eip->zei_bits_set[offset] = set;
+				eip->zei_bits_cleared[offset] = cleared;
+				offset++;
+			}
+
+			update_histogram(set, eip->zei_histogram_set,
+			    &eip->zei_range_sets[range]);
+			update_histogram(cleared, eip->zei_histogram_cleared,
+			    &eip->zei_range_clears[range]);
+		}
+
+		/* convert to byte offsets */
+		eip->zei_ranges[range].zr_start	*= sizeof (uint64_t);
+		eip->zei_ranges[range].zr_end	*= sizeof (uint64_t);
+	}
+	eip->zei_allowed_mingap	*= sizeof (uint64_t);
+	inline_size		*= sizeof (uint64_t);
+
+	/* fill in ereport */
+	fm_payload_set(ereport,
+	    FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES,
+	    DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count,
+	    (uint32_t *)eip->zei_ranges,
+	    FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP,
+	    DATA_TYPE_UINT32, eip->zei_allowed_mingap,
+	    FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS,
+	    DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets,
+	    FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS,
+	    DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears,
+	    NULL);
+
+	if (!no_inline) {
+		fm_payload_set(ereport,
+		    FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS,
+		    DATA_TYPE_UINT8_ARRAY,
+		    inline_size, (uint8_t *)eip->zei_bits_set,
+		    FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS,
+		    DATA_TYPE_UINT8_ARRAY,
+		    inline_size, (uint8_t *)eip->zei_bits_cleared,
+		    NULL);
+	} else {
+		fm_payload_set(ereport,
+		    FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM,
+		    DATA_TYPE_UINT16_ARRAY,
+		    NBBY * sizeof (uint64_t), eip->zei_histogram_set,
+		    FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM,
+		    DATA_TYPE_UINT16_ARRAY,
+		    NBBY * sizeof (uint64_t), eip->zei_histogram_cleared,
+		    NULL);
+	}
+	return (eip);
+}
+#endif
+
+void
+zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
+    uint64_t stateoroffset, uint64_t size)
+{
+#ifdef _KERNEL
+	nvlist_t *ereport = NULL;
+	nvlist_t *detector = NULL;
+
+	zfs_ereport_start(&ereport, &detector,
+	    subclass, spa, vd, zio, stateoroffset, size);
+
+	if (ereport == NULL)
+		return;
+
 	fm_ereport_post(ereport, EVCH_SLEEP);
 
 	fm_nvlist_destroy(ereport, FM_NVA_FREE);
@@ -338,6 +679,121 @@
 #endif
 }
 
+void
+zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
+    struct zio *zio, uint64_t offset, uint64_t length, void *arg,
+    zio_bad_cksum_t *info)
+{
+	zio_cksum_report_t *report = kmem_zalloc(sizeof (*report), KM_SLEEP);
+
+	if (zio->io_vsd != NULL)
+		zio->io_vsd_ops->vsd_cksum_report(zio, report, arg);
+	else
+		zio_vsd_default_cksum_report(zio, report, arg);
+
+	/* copy the checksum failure information if it was provided */
+	if (info != NULL) {
+		report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
+		bcopy(info, report->zcr_ckinfo, sizeof (*info));
+	}
+
+	report->zcr_length = length;
+
+#ifdef _KERNEL
+	zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector,
+	    FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
+
+	if (report->zcr_ereport == NULL) {
+		report->zcr_free(report->zcr_cbdata, report->zcr_cbinfo);
+		kmem_free(report, sizeof (*report));
+		return;
+	}
+#endif
+
+	mutex_enter(&spa->spa_errlist_lock);
+	report->zcr_next = zio->io_logical->io_cksum_report;
+	zio->io_logical->io_cksum_report = report;
+	mutex_exit(&spa->spa_errlist_lock);
+}
+
+void
+zfs_ereport_finish_checksum(zio_cksum_report_t *report,
+    const void *good_data, const void *bad_data, boolean_t drop_if_identical)
+{
+#ifdef _KERNEL
+	zfs_ecksum_info_t *info = NULL;
+	info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
+	    good_data, bad_data, report->zcr_length, drop_if_identical);
+
+	if (info != NULL)
+		fm_ereport_post(report->zcr_ereport, EVCH_SLEEP);
+
+	fm_nvlist_destroy(report->zcr_ereport, FM_NVA_FREE);
+	fm_nvlist_destroy(report->zcr_detector, FM_NVA_FREE);
+	report->zcr_ereport = report->zcr_detector = NULL;
+
+	if (info != NULL)
+		kmem_free(info, sizeof (*info));
+#endif
+}
+
+void
+zfs_ereport_free_checksum(zio_cksum_report_t *rpt)
+{
+#ifdef _KERNEL
+	if (rpt->zcr_ereport != NULL) {
+		fm_nvlist_destroy(rpt->zcr_ereport,
+		    FM_NVA_FREE);
+		fm_nvlist_destroy(rpt->zcr_detector,
+		    FM_NVA_FREE);
+	}
+#endif
+	rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo);
+
+	if (rpt->zcr_ckinfo != NULL)
+		kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo));
+
+	kmem_free(rpt, sizeof (*rpt));
+}
+
+void
+zfs_ereport_send_interim_checksum(zio_cksum_report_t *report)
+{
+#ifdef _KERNEL
+	fm_ereport_post(report->zcr_ereport, EVCH_SLEEP);
+#endif
+}
+
+void
+zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
+    struct zio *zio, uint64_t offset, uint64_t length,
+    const void *good_data, const void *bad_data, zio_bad_cksum_t *zbc)
+{
+#ifdef _KERNEL
+	nvlist_t *ereport = NULL;
+	nvlist_t *detector = NULL;
+	zfs_ecksum_info_t *info;
+
+	zfs_ereport_start(&ereport, &detector,
+	    FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
+
+	if (ereport == NULL)
+		return;
+
+	info = annotate_ecksum(ereport, zbc, good_data, bad_data, length,
+	    B_FALSE);
+
+	if (info != NULL)
+		fm_ereport_post(ereport, EVCH_SLEEP);
+
+	fm_nvlist_destroy(ereport, FM_NVA_FREE);
+	fm_nvlist_destroy(detector, FM_NVA_FREE);
+
+	if (info != NULL)
+		kmem_free(info, sizeof (*info));
+#endif
+}
+
 static void
 zfs_post_common(spa_t *spa, vdev_t *vd, const char *name)
 {
--- a/usr/src/uts/common/fs/zfs/zio.c	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/fs/zfs/zio.c	Tue Sep 22 17:11:54 2009 -0700
@@ -1879,6 +1879,32 @@
 	return (ZIO_PIPELINE_CONTINUE);
 }
 
+/*
+ * For non-raidz ZIOs, we can just copy aside the bad data read from the
+ * disk, and use that to finish the checksum ereport later.
+ */
+static void
+zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
+    const void *good_buf)
+{
+	/* no processing needed */
+	zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
+}
+
+/*ARGSUSED*/
+void
+zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored)
+{
+	void *buf = zio_buf_alloc(zio->io_size);
+
+	bcopy(zio->io_data, buf, zio->io_size);
+
+	zcr->zcr_cbinfo = zio->io_size;
+	zcr->zcr_cbdata = buf;
+	zcr->zcr_finish = zio_vsd_default_cksum_finish;
+	zcr->zcr_free = zio_buf_free;
+}
+
 static int
 zio_vdev_io_assess(zio_t *zio)
 {
@@ -1891,7 +1917,7 @@
 		spa_config_exit(zio->io_spa, SCL_ZIO, zio);
 
 	if (zio->io_vsd != NULL) {
-		zio->io_vsd_free(zio);
+		zio->io_vsd_ops->vsd_free(zio);
 		zio->io_vsd = NULL;
 	}
 
@@ -2001,6 +2027,8 @@
 static int
 zio_checksum_verify(zio_t *zio)
 {
+	zio_bad_cksum_t info;
+
 	blkptr_t *bp = zio->io_bp;
 	int error;
 
@@ -2015,11 +2043,12 @@
 		ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL);
 	}
 
-	if ((error = zio_checksum_error(zio)) != 0) {
+	if ((error = zio_checksum_error(zio, &info)) != 0) {
 		zio->io_error = error;
 		if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
-			zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM,
-			    zio->io_spa, zio->io_vd, zio, 0, 0);
+			zfs_ereport_start_checksum(zio->io_spa,
+			    zio->io_vd, zio, zio->io_offset,
+			    zio->io_size, NULL, &info);
 		}
 	}
 
@@ -2201,6 +2230,14 @@
 
 		if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
 			zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
+
+		/*
+		 * Here is a possibly good place to attempt to do
+		 * either combinatorial reconstruction or error correction
+		 * based on checksums.  It also might be a good place
+		 * to send out preliminary ereports before we suspend
+		 * processing.
+		 */
 	}
 
 	/*
@@ -2297,6 +2334,20 @@
 	ASSERT(zio->io_reexecute == 0);
 	ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
 
+	/* Report any checksum errors, since the IO is complete */
+	while (zio->io_cksum_report != NULL) {
+		zio_cksum_report_t *rpt = zio->io_cksum_report;
+
+		zio->io_cksum_report = rpt->zcr_next;
+		rpt->zcr_next = NULL;
+
+		/* only pass in our data buffer if we've succeeded. */
+		rpt->zcr_finish(rpt,
+		    (zio->io_error == 0) ? zio->io_data : NULL);
+
+		zfs_ereport_free_checksum(rpt);
+	}
+
 	/*
 	 * It is the responsibility of the done callback to ensure that this
 	 * particular zio is no longer discoverable for adoption, and as
--- a/usr/src/uts/common/fs/zfs/zio_checksum.c	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/fs/zfs/zio_checksum.c	Tue Sep 22 17:11:54 2009 -0700
@@ -19,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -151,16 +151,17 @@
 }
 
 int
-zio_checksum_error(zio_t *zio)
+zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
 {
 	blkptr_t *bp = zio->io_bp;
 	uint_t checksum = (bp == NULL ? zio->io_prop.zp_checksum :
 	    (BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
 	int byteswap;
-	void *data = zio->io_data;
+	int error;
 	uint64_t size = (bp == NULL ? zio->io_size :
 	    (BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp)));
 	uint64_t offset = zio->io_offset;
+	void *data = zio->io_data;
 	zio_block_tail_t *zbt = (zio_block_tail_t *)((char *)data + size) - 1;
 	zio_checksum_info_t *ci = &zio_checksum_table[checksum];
 	zio_cksum_t actual_cksum, expected_cksum, verifier;
@@ -196,11 +197,22 @@
 		ci->ci_func[byteswap](data, size, &actual_cksum);
 	}
 
+	info->zbc_expected = expected_cksum;
+	info->zbc_actual = actual_cksum;
+	info->zbc_checksum_name = ci->ci_name;
+	info->zbc_byteswapped = byteswap;
+	info->zbc_injected = 0;
+	info->zbc_has_cksum = 1;
+
 	if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
 		return (ECKSUM);
 
-	if (zio_injection_enabled && !zio->io_error)
-		return (zio_handle_fault_injection(zio, ECKSUM));
+	if (zio_injection_enabled && !zio->io_error &&
+	    (error = zio_handle_fault_injection(zio, ECKSUM)) != 0) {
+
+		info->zbc_injected = 1;
+		return (error);
+	}
 
 	return (0);
 }
--- a/usr/src/uts/common/sys/fm/fs/zfs.h	Tue Sep 22 17:11:45 2009 -0700
+++ b/usr/src/uts/common/sys/fm/fs/zfs.h	Tue Sep 22 17:11:54 2009 -0700
@@ -68,6 +68,18 @@
 #define	FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET	"zio_offset"
 #define	FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE		"zio_size"
 #define	FM_EREPORT_PAYLOAD_ZFS_PREV_STATE	"prev_state"
+#define	FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED	"cksum_expected"
+#define	FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL	"cksum_actual"
+#define	FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO	"cksum_algorithm"
+#define	FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP	"cksum_byteswap"
+#define	FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES "bad_ranges"
+#define	FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP "bad_ranges_min_gap"
+#define	FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS	"bad_range_sets"
+#define	FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS	"bad_range_clears"
+#define	FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS	"bad_set_bits"
+#define	FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS	"bad_cleared_bits"
+#define	FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM "bad_set_histogram"
+#define	FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM "bad_cleared_histogram"
 
 #define	FM_EREPORT_FAILMODE_WAIT		"wait"
 #define	FM_EREPORT_FAILMODE_CONTINUE		"continue"