usr/src/uts/common/fs/zfs/zil.c
changeset 10879 420834d9266b
parent 10800 469478b180d9
child 10921 8aac17999e4d
equal deleted inserted replaced
10878:6ecfd56a37e4 10879:420834d9266b
   367 			zio_free_blk(zilog->zl_spa, &blk, txg);
   367 			zio_free_blk(zilog->zl_spa, &blk, txg);
   368 			BP_ZERO(&blk);
   368 			BP_ZERO(&blk);
   369 		}
   369 		}
   370 
   370 
   371 		error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
   371 		error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
   372 		    NULL, txg, zilog->zl_logbias != ZFS_LOGBIAS_LATENCY);
   372 		    NULL, txg, zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
   373 
   373 
   374 		if (error == 0)
   374 		if (error == 0)
   375 			zil_init_log_chain(zilog, &blk);
   375 			zil_init_log_chain(zilog, &blk);
   376 	}
   376 	}
   377 
   377 
   754 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
   754 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
   755 	}
   755 	}
   756 }
   756 }
   757 
   757 
   758 /*
   758 /*
       
   759  * Use the slog as long as the logbias is 'latency' and the current commit size
       
   760  * is less than the limit or the total list size is less than 2X the limit.
       
   761  * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
       
   762  */
       
   763 uint64_t zil_slog_limit = 1024 * 1024;
       
   764 #define	USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
       
   765 	(((zilog)->zl_cur_used < zil_slog_limit) || \
       
   766 	((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
       
   767 
       
   768 /*
   759  * Start a log block write and advance to the next log block.
   769  * Start a log block write and advance to the next log block.
   760  * Calls are serialized.
   770  * Calls are serialized.
   761  */
   771  */
   762 static lwb_t *
   772 static lwb_t *
   763 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
   773 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
   795 		zil_blksz = ZIL_MAX_BLKSZ;
   805 		zil_blksz = ZIL_MAX_BLKSZ;
   796 
   806 
   797 	BP_ZERO(bp);
   807 	BP_ZERO(bp);
   798 	/* pass the old blkptr in order to spread log blocks across devs */
   808 	/* pass the old blkptr in order to spread log blocks across devs */
   799 	error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg,
   809 	error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg,
   800 	    zilog->zl_logbias != ZFS_LOGBIAS_LATENCY);
   810 	    USE_SLOG(zilog));
   801 	if (error) {
   811 	if (error) {
   802 		dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
   812 		dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
   803 
   813 
   804 		/*
   814 		/*
   805 		 * We dirty the dataset to ensure that zil_sync() will
   815 		 * We dirty the dataset to ensure that zil_sync() will
  1040 	mutex_enter(&zilog->zl_lock);
  1050 	mutex_enter(&zilog->zl_lock);
  1041 	itx = list_head(&zilog->zl_itx_list);
  1051 	itx = list_head(&zilog->zl_itx_list);
  1042 	if ((itx != NULL) &&
  1052 	if ((itx != NULL) &&
  1043 	    (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
  1053 	    (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
  1044 		(void) taskq_dispatch(zilog->zl_clean_taskq,
  1054 		(void) taskq_dispatch(zilog->zl_clean_taskq,
  1045 		    (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP);
  1055 		    (task_func_t *)zil_itx_clean, zilog, TQ_NOSLEEP);
  1046 	}
  1056 	}
  1047 	mutex_exit(&zilog->zl_lock);
  1057 	mutex_exit(&zilog->zl_lock);
  1048 }
  1058 }
  1049 
  1059 
  1050 static void
  1060 static void