usr/src/uts/common/os/kmem.c
changeset 12093 fa0c0f5bf466
parent 11178 d671c0911ce4
child 12681 2d92cdff89ce
equal deleted inserted replaced
12092:d8c71dc8ec0d 12093:fa0c0f5bf466
    17  * information: Portions Copyright [yyyy] [name of copyright owner]
    17  * information: Portions Copyright [yyyy] [name of copyright owner]
    18  *
    18  *
    19  * CDDL HEADER END
    19  * CDDL HEADER END
    20  */
    20  */
    21 /*
    21 /*
    22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
    22  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
    23  * Use is subject to license terms.
       
    24  */
    23  */
    25 
    24 
    26 /*
    25 /*
    27  * Kernel memory allocator, as described in the following two papers and a
    26  * Kernel memory allocator, as described in the following two papers and a
    28  * statement about the consolidator:
    27  * statement about the consolidator:
  1131 static kmem_cache_t	*kmem_move_cache;
  1130 static kmem_cache_t	*kmem_move_cache;
  1132 static taskq_t		*kmem_move_taskq;
  1131 static taskq_t		*kmem_move_taskq;
  1133 
  1132 
  1134 static void kmem_cache_scan(kmem_cache_t *);
  1133 static void kmem_cache_scan(kmem_cache_t *);
  1135 static void kmem_cache_defrag(kmem_cache_t *);
  1134 static void kmem_cache_defrag(kmem_cache_t *);
       
  1135 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
  1136 
  1136 
  1137 
  1137 
  1138 kmem_log_header_t	*kmem_transaction_log;
  1138 kmem_log_header_t	*kmem_transaction_log;
  1139 kmem_log_header_t	*kmem_content_log;
  1139 kmem_log_header_t	*kmem_content_log;
  1140 kmem_log_header_t	*kmem_failure_log;
  1140 kmem_log_header_t	*kmem_failure_log;
  1652 	}
  1652 	}
  1653 	vmem_free(vmp, slab, cp->cache_slabsize);
  1653 	vmem_free(vmp, slab, cp->cache_slabsize);
  1654 }
  1654 }
  1655 
  1655 
  1656 static void *
  1656 static void *
  1657 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp)
  1657 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
  1658 {
  1658 {
  1659 	kmem_bufctl_t *bcp, **hash_bucket;
  1659 	kmem_bufctl_t *bcp, **hash_bucket;
  1660 	void *buf;
  1660 	void *buf;
       
  1661 	boolean_t new_slab = (sp->slab_refcnt == 0);
  1661 
  1662 
  1662 	ASSERT(MUTEX_HELD(&cp->cache_lock));
  1663 	ASSERT(MUTEX_HELD(&cp->cache_lock));
  1663 	/*
  1664 	/*
  1664 	 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
  1665 	 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
  1665 	 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
  1666 	 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
  1666 	 * slab is newly created (sp->slab_refcnt == 0).
  1667 	 * slab is newly created.
  1667 	 */
  1668 	 */
  1668 	ASSERT((sp->slab_refcnt == 0) || (KMEM_SLAB_IS_PARTIAL(sp) &&
  1669 	ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
  1669 	    (sp == avl_first(&cp->cache_partial_slabs))));
  1670 	    (sp == avl_first(&cp->cache_partial_slabs))));
  1670 	ASSERT(sp->slab_cache == cp);
  1671 	ASSERT(sp->slab_cache == cp);
  1671 
  1672 
  1672 	cp->cache_slab_alloc++;
  1673 	cp->cache_slab_alloc++;
  1673 	cp->cache_bufslab--;
  1674 	cp->cache_bufslab--;
  1674 	sp->slab_refcnt++;
  1675 	sp->slab_refcnt++;
  1675 
  1676 
  1676 	bcp = sp->slab_head;
  1677 	bcp = sp->slab_head;
  1677 	if ((sp->slab_head = bcp->bc_next) == NULL) {
  1678 	sp->slab_head = bcp->bc_next;
  1678 		ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
       
  1679 		if (sp->slab_refcnt == 1) {
       
  1680 			ASSERT(sp->slab_chunks == 1);
       
  1681 		} else {
       
  1682 			ASSERT(sp->slab_chunks > 1); /* the slab was partial */
       
  1683 			avl_remove(&cp->cache_partial_slabs, sp);
       
  1684 			sp->slab_later_count = 0; /* clear history */
       
  1685 			sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
       
  1686 			sp->slab_stuck_offset = (uint32_t)-1;
       
  1687 		}
       
  1688 		list_insert_head(&cp->cache_complete_slabs, sp);
       
  1689 		cp->cache_complete_slab_count++;
       
  1690 	} else {
       
  1691 		ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
       
  1692 		if (sp->slab_refcnt == 1) {
       
  1693 			avl_add(&cp->cache_partial_slabs, sp);
       
  1694 		} else {
       
  1695 			/*
       
  1696 			 * The slab is now more allocated than it was, so the
       
  1697 			 * order remains unchanged.
       
  1698 			 */
       
  1699 			ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
       
  1700 		}
       
  1701 	}
       
  1702 
  1679 
  1703 	if (cp->cache_flags & KMF_HASH) {
  1680 	if (cp->cache_flags & KMF_HASH) {
  1704 		/*
  1681 		/*
  1705 		 * Add buffer to allocated-address hash table.
  1682 		 * Add buffer to allocated-address hash table.
  1706 		 */
  1683 		 */
  1714 	} else {
  1691 	} else {
  1715 		buf = KMEM_BUF(cp, bcp);
  1692 		buf = KMEM_BUF(cp, bcp);
  1716 	}
  1693 	}
  1717 
  1694 
  1718 	ASSERT(KMEM_SLAB_MEMBER(sp, buf));
  1695 	ASSERT(KMEM_SLAB_MEMBER(sp, buf));
       
  1696 
       
  1697 	if (sp->slab_head == NULL) {
       
  1698 		ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
       
  1699 		if (new_slab) {
       
  1700 			ASSERT(sp->slab_chunks == 1);
       
  1701 		} else {
       
  1702 			ASSERT(sp->slab_chunks > 1); /* the slab was partial */
       
  1703 			avl_remove(&cp->cache_partial_slabs, sp);
       
  1704 			sp->slab_later_count = 0; /* clear history */
       
  1705 			sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
       
  1706 			sp->slab_stuck_offset = (uint32_t)-1;
       
  1707 		}
       
  1708 		list_insert_head(&cp->cache_complete_slabs, sp);
       
  1709 		cp->cache_complete_slab_count++;
       
  1710 		return (buf);
       
  1711 	}
       
  1712 
       
  1713 	ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
       
  1714 	/*
       
  1715 	 * Peek to see if the magazine layer is enabled before
       
  1716 	 * we prefill.  We're not holding the cpu cache lock,
       
  1717 	 * so the peek could be wrong, but there's no harm in it.
       
  1718 	 */
       
  1719 	if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
       
  1720 	    (KMEM_CPU_CACHE(cp)->cc_magsize != 0))  {
       
  1721 		kmem_slab_prefill(cp, sp);
       
  1722 		return (buf);
       
  1723 	}
       
  1724 
       
  1725 	if (new_slab) {
       
  1726 		avl_add(&cp->cache_partial_slabs, sp);
       
  1727 		return (buf);
       
  1728 	}
       
  1729 
       
  1730 	/*
       
  1731 	 * The slab is now more allocated than it was, so the
       
  1732 	 * order remains unchanged.
       
  1733 	 */
       
  1734 	ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
  1719 	return (buf);
  1735 	return (buf);
  1720 }
  1736 }
  1721 
  1737 
  1722 /*
  1738 /*
  1723  * Allocate a raw (unconstructed) buffer from cp's slab layer.
  1739  * Allocate a raw (unconstructed) buffer from cp's slab layer.
  1747 		if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
  1763 		if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
  1748 			cp->cache_bufmax = cp->cache_buftotal;
  1764 			cp->cache_bufmax = cp->cache_buftotal;
  1749 		cp->cache_bufslab += sp->slab_chunks;
  1765 		cp->cache_bufslab += sp->slab_chunks;
  1750 	}
  1766 	}
  1751 
  1767 
  1752 	buf = kmem_slab_alloc_impl(cp, sp);
  1768 	buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
  1753 	ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
  1769 	ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
  1754 	    (cp->cache_complete_slab_count +
  1770 	    (cp->cache_complete_slab_count +
  1755 	    avl_numnodes(&cp->cache_partial_slabs) +
  1771 	    avl_numnodes(&cp->cache_partial_slabs) +
  1756 	    (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
  1772 	    (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
  1757 	mutex_exit(&cp->cache_lock);
  1773 	mutex_exit(&cp->cache_lock);
  2625 
  2641 
  2626 	kmem_slab_free(cp, buf);
  2642 	kmem_slab_free(cp, buf);
  2627 }
  2643 }
  2628 
  2644 
  2629 /*
  2645 /*
       
  2646  * Used when there's no room to free a buffer to the per-CPU cache.
       
  2647  * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
       
  2648  * caller should try freeing to the per-CPU cache again.
       
  2649  * Note that we don't directly install the magazine in the cpu cache,
       
  2650  * since its state may have changed wildly while the lock was dropped.
       
  2651  */
       
  2652 static int
       
  2653 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
       
  2654 {
       
  2655 	kmem_magazine_t *emp;
       
  2656 	kmem_magtype_t *mtp;
       
  2657 
       
  2658 	ASSERT(MUTEX_HELD(&ccp->cc_lock));
       
  2659 	ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
       
  2660 	    ((uint_t)ccp->cc_rounds == -1)) &&
       
  2661 	    ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
       
  2662 	    ((uint_t)ccp->cc_prounds == -1)));
       
  2663 
       
  2664 	emp = kmem_depot_alloc(cp, &cp->cache_empty);
       
  2665 	if (emp != NULL) {
       
  2666 		if (ccp->cc_ploaded != NULL)
       
  2667 			kmem_depot_free(cp, &cp->cache_full,
       
  2668 			    ccp->cc_ploaded);
       
  2669 		kmem_cpu_reload(ccp, emp, 0);
       
  2670 		return (1);
       
  2671 	}
       
  2672 	/*
       
  2673 	 * There are no empty magazines in the depot,
       
  2674 	 * so try to allocate a new one.  We must drop all locks
       
  2675 	 * across kmem_cache_alloc() because lower layers may
       
  2676 	 * attempt to allocate from this cache.
       
  2677 	 */
       
  2678 	mtp = cp->cache_magtype;
       
  2679 	mutex_exit(&ccp->cc_lock);
       
  2680 	emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
       
  2681 	mutex_enter(&ccp->cc_lock);
       
  2682 
       
  2683 	if (emp != NULL) {
       
  2684 		/*
       
  2685 		 * We successfully allocated an empty magazine.
       
  2686 		 * However, we had to drop ccp->cc_lock to do it,
       
  2687 		 * so the cache's magazine size may have changed.
       
  2688 		 * If so, free the magazine and try again.
       
  2689 		 */
       
  2690 		if (ccp->cc_magsize != mtp->mt_magsize) {
       
  2691 			mutex_exit(&ccp->cc_lock);
       
  2692 			kmem_cache_free(mtp->mt_cache, emp);
       
  2693 			mutex_enter(&ccp->cc_lock);
       
  2694 			return (1);
       
  2695 		}
       
  2696 
       
  2697 		/*
       
  2698 		 * We got a magazine of the right size.  Add it to
       
  2699 		 * the depot and try the whole dance again.
       
  2700 		 */
       
  2701 		kmem_depot_free(cp, &cp->cache_empty, emp);
       
  2702 		return (1);
       
  2703 	}
       
  2704 
       
  2705 	/*
       
  2706 	 * We couldn't allocate an empty magazine,
       
  2707 	 * so fall through to the slab layer.
       
  2708 	 */
       
  2709 	return (0);
       
  2710 }
       
  2711 
       
  2712 /*
  2630  * Free a constructed object to cache cp.
  2713  * Free a constructed object to cache cp.
  2631  */
  2714  */
  2632 void
  2715 void
  2633 kmem_cache_free(kmem_cache_t *cp, void *buf)
  2716 kmem_cache_free(kmem_cache_t *cp, void *buf)
  2634 {
  2717 {
  2635 	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
  2718 	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
  2636 	kmem_magazine_t *emp;
       
  2637 	kmem_magtype_t *mtp;
       
  2638 
  2719 
  2639 	/*
  2720 	/*
  2640 	 * The client must not free either of the buffers passed to the move
  2721 	 * The client must not free either of the buffers passed to the move
  2641 	 * callback function.
  2722 	 * callback function.
  2642 	 */
  2723 	 */
  2658 				return;
  2739 				return;
  2659 		}
  2740 		}
  2660 	}
  2741 	}
  2661 
  2742 
  2662 	mutex_enter(&ccp->cc_lock);
  2743 	mutex_enter(&ccp->cc_lock);
       
  2744 	/*
       
  2745 	 * Any changes to this logic should be reflected in kmem_slab_prefill()
       
  2746 	 */
  2663 	for (;;) {
  2747 	for (;;) {
  2664 		/*
  2748 		/*
  2665 		 * If there's a slot available in the current CPU's
  2749 		 * If there's a slot available in the current CPU's
  2666 		 * loaded magazine, just put the object there and return.
  2750 		 * loaded magazine, just put the object there and return.
  2667 		 */
  2751 		 */
  2685 		 * If the magazine layer is disabled, break out now.
  2769 		 * If the magazine layer is disabled, break out now.
  2686 		 */
  2770 		 */
  2687 		if (ccp->cc_magsize == 0)
  2771 		if (ccp->cc_magsize == 0)
  2688 			break;
  2772 			break;
  2689 
  2773 
       
  2774 		if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
       
  2775 			/*
       
  2776 			 * We couldn't free our constructed object to the
       
  2777 			 * magazine layer, so apply its destructor and free it
       
  2778 			 * to the slab layer.
       
  2779 			 */
       
  2780 			break;
       
  2781 		}
       
  2782 	}
       
  2783 	mutex_exit(&ccp->cc_lock);
       
  2784 	kmem_slab_free_constructed(cp, buf, B_TRUE);
       
  2785 }
       
  2786 
       
  2787 static void
       
  2788 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
       
  2789 {
       
  2790 	kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
       
  2791 	int cache_flags = cp->cache_flags;
       
  2792 
       
  2793 	kmem_bufctl_t *next, *head;
       
  2794 	size_t nbufs;
       
  2795 
       
  2796 	/*
       
  2797 	 * Completely allocate the newly created slab and put the pre-allocated
       
  2798 	 * buffers in magazines. Any of the buffers that cannot be put in
       
  2799 	 * magazines must be returned to the slab.
       
  2800 	 */
       
  2801 	ASSERT(MUTEX_HELD(&cp->cache_lock));
       
  2802 	ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
       
  2803 	ASSERT(cp->cache_constructor == NULL);
       
  2804 	ASSERT(sp->slab_cache == cp);
       
  2805 	ASSERT(sp->slab_refcnt == 1);
       
  2806 	ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
       
  2807 	ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
       
  2808 
       
  2809 	head = sp->slab_head;
       
  2810 	nbufs = (sp->slab_chunks - sp->slab_refcnt);
       
  2811 	sp->slab_head = NULL;
       
  2812 	sp->slab_refcnt += nbufs;
       
  2813 	cp->cache_bufslab -= nbufs;
       
  2814 	cp->cache_slab_alloc += nbufs;
       
  2815 	list_insert_head(&cp->cache_complete_slabs, sp);
       
  2816 	cp->cache_complete_slab_count++;
       
  2817 	mutex_exit(&cp->cache_lock);
       
  2818 	mutex_enter(&ccp->cc_lock);
       
  2819 
       
  2820 	while (head != NULL) {
       
  2821 		void *buf = KMEM_BUF(cp, head);
  2690 		/*
  2822 		/*
  2691 		 * Try to get an empty magazine from the depot.
  2823 		 * If there's a slot available in the current CPU's
       
  2824 		 * loaded magazine, just put the object there and
       
  2825 		 * continue.
  2692 		 */
  2826 		 */
  2693 		emp = kmem_depot_alloc(cp, &cp->cache_empty);
  2827 		if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
  2694 		if (emp != NULL) {
  2828 			ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
  2695 			if (ccp->cc_ploaded != NULL)
  2829 			    buf;
  2696 				kmem_depot_free(cp, &cp->cache_full,
  2830 			ccp->cc_free++;
  2697 				    ccp->cc_ploaded);
  2831 			nbufs--;
  2698 			kmem_cpu_reload(ccp, emp, 0);
  2832 			head = head->bc_next;
  2699 			continue;
  2833 			continue;
  2700 		}
  2834 		}
  2701 
  2835 
  2702 		/*
  2836 		/*
  2703 		 * There are no empty magazines in the depot,
  2837 		 * The loaded magazine is full.  If the previously
  2704 		 * so try to allocate a new one.  We must drop all locks
  2838 		 * loaded magazine was empty, exchange them and try
  2705 		 * across kmem_cache_alloc() because lower layers may
  2839 		 * again.
  2706 		 * attempt to allocate from this cache.
       
  2707 		 */
  2840 		 */
  2708 		mtp = cp->cache_magtype;
  2841 		if (ccp->cc_prounds == 0) {
  2709 		mutex_exit(&ccp->cc_lock);
  2842 			kmem_cpu_reload(ccp, ccp->cc_ploaded,
  2710 		emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
  2843 			    ccp->cc_prounds);
  2711 		mutex_enter(&ccp->cc_lock);
       
  2712 
       
  2713 		if (emp != NULL) {
       
  2714 			/*
       
  2715 			 * We successfully allocated an empty magazine.
       
  2716 			 * However, we had to drop ccp->cc_lock to do it,
       
  2717 			 * so the cache's magazine size may have changed.
       
  2718 			 * If so, free the magazine and try again.
       
  2719 			 */
       
  2720 			if (ccp->cc_magsize != mtp->mt_magsize) {
       
  2721 				mutex_exit(&ccp->cc_lock);
       
  2722 				kmem_cache_free(mtp->mt_cache, emp);
       
  2723 				mutex_enter(&ccp->cc_lock);
       
  2724 				continue;
       
  2725 			}
       
  2726 
       
  2727 			/*
       
  2728 			 * We got a magazine of the right size.  Add it to
       
  2729 			 * the depot and try the whole dance again.
       
  2730 			 */
       
  2731 			kmem_depot_free(cp, &cp->cache_empty, emp);
       
  2732 			continue;
  2844 			continue;
  2733 		}
  2845 		}
  2734 
  2846 
  2735 		/*
  2847 		/*
  2736 		 * We couldn't allocate an empty magazine,
  2848 		 * If the magazine layer is disabled, break out now.
  2737 		 * so fall through to the slab layer.
       
  2738 		 */
  2849 		 */
  2739 		break;
  2850 
       
  2851 		if (ccp->cc_magsize == 0) {
       
  2852 			break;
       
  2853 		}
       
  2854 
       
  2855 		if (!kmem_cpucache_magazine_alloc(ccp, cp))
       
  2856 			break;
  2740 	}
  2857 	}
  2741 	mutex_exit(&ccp->cc_lock);
  2858 	mutex_exit(&ccp->cc_lock);
  2742 
  2859 	if (nbufs != 0) {
  2743 	/*
  2860 		ASSERT(head != NULL);
  2744 	 * We couldn't free our constructed object to the magazine layer,
  2861 
  2745 	 * so apply its destructor and free it to the slab layer.
  2862 		/*
  2746 	 */
  2863 		 * If there was a failure, return remaining objects to
  2747 	kmem_slab_free_constructed(cp, buf, B_TRUE);
  2864 		 * the slab
       
  2865 		 */
       
  2866 		while (head != NULL) {
       
  2867 			ASSERT(nbufs != 0);
       
  2868 			next = head->bc_next;
       
  2869 			head->bc_next = NULL;
       
  2870 			kmem_slab_free(cp, KMEM_BUF(cp, head));
       
  2871 			head = next;
       
  2872 			nbufs--;
       
  2873 		}
       
  2874 	}
       
  2875 	ASSERT(head == NULL);
       
  2876 	ASSERT(nbufs == 0);
       
  2877 	mutex_enter(&cp->cache_lock);
  2748 }
  2878 }
  2749 
  2879 
  2750 void *
  2880 void *
  2751 kmem_zalloc(size_t size, int kmflag)
  2881 kmem_zalloc(size_t size, int kmflag)
  2752 {
  2882 {
  3665 		cp->cache_flags &= ~KMF_DEBUG;
  3795 		cp->cache_flags &= ~KMF_DEBUG;
  3666 
  3796 
  3667 	if (cflags & KMC_NOTOUCH)
  3797 	if (cflags & KMC_NOTOUCH)
  3668 		cp->cache_flags &= ~KMF_TOUCH;
  3798 		cp->cache_flags &= ~KMF_TOUCH;
  3669 
  3799 
       
  3800 	if (cflags & KMC_PREFILL)
       
  3801 		cp->cache_flags |= KMF_PREFILL;
       
  3802 
  3670 	if (cflags & KMC_NOHASH)
  3803 	if (cflags & KMC_NOHASH)
  3671 		cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
  3804 		cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
  3672 
  3805 
  3673 	if (cflags & KMC_NOMAGAZINE)
  3806 	if (cflags & KMC_NOMAGAZINE)
  3674 		cp->cache_flags |= KMF_NOMAGAZINE;
  3807 		cp->cache_flags |= KMF_NOMAGAZINE;
  3776 		cp->cache_flags |= KMF_HASH;
  3909 		cp->cache_flags |= KMF_HASH;
  3777 	}
  3910 	}
  3778 
  3911 
  3779 	cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
  3912 	cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
  3780 	cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
  3913 	cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
       
  3914 
       
  3915 	/*
       
  3916 	 * Disallowing prefill when either the DEBUG or HASH flag is set or when
       
  3917 	 * there is a constructor avoids some tricky issues with debug setup
       
  3918 	 * that may be revisited later. We cannot allow prefill in a
       
  3919 	 * metadata cache because of potential recursion.
       
  3920 	 */
       
  3921 	if (vmp == kmem_msb_arena ||
       
  3922 	    cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
       
  3923 	    cp->cache_constructor != NULL)
       
  3924 		cp->cache_flags &= ~KMF_PREFILL;
  3781 
  3925 
  3782 	if (cp->cache_flags & KMF_HASH) {
  3926 	if (cp->cache_flags & KMF_HASH) {
  3783 		ASSERT(!(cflags & KMC_NOHASH));
  3927 		ASSERT(!(cflags & KMC_NOHASH));
  3784 		cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
  3928 		cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
  3785 		    kmem_bufctl_audit_cache : kmem_bufctl_cache;
  3929 		    kmem_bufctl_audit_cache : kmem_bufctl_cache;
  4871 		KMEM_STAT_ADD(kmem_move_stats.kms_already_pending);
  5015 		KMEM_STAT_ADD(kmem_move_stats.kms_already_pending);
  4872 		kmem_cache_free(kmem_move_cache, callback);
  5016 		kmem_cache_free(kmem_move_cache, callback);
  4873 		return (B_TRUE);
  5017 		return (B_TRUE);
  4874 	}
  5018 	}
  4875 
  5019 
  4876 	to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs));
  5020 	to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
       
  5021 	    B_FALSE);
  4877 	callback->kmm_to_buf = to_buf;
  5022 	callback->kmm_to_buf = to_buf;
  4878 	avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
  5023 	avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
  4879 
  5024 
  4880 	mutex_exit(&cp->cache_lock);
  5025 	mutex_exit(&cp->cache_lock);
  4881 
  5026