6887924 PP_ISKAS needs to be defined in terms of VN_ISKAS for vnodes
authorSean McEnroe <Sean.McEnroe@Sun.COM>
Mon, 23 Nov 2009 22:42:23 -0800
changeset 11185 f0c31008e395
parent 11184 6cb17428ddec
child 11186 8e156cb73582
6887924 PP_ISKAS needs to be defined in terms of VN_ISKAS for vnodes 6871169 kphysm_add_memory_dynamic panics testing vmstress + DR 6877160 guest domain panic at platsvc:mdeg_notify_client+10c 6874763 memseg_alloc_meta() incorrectly maps page_t pages 6873569 multiple calls to memlist_read_lock() can cause deadlock hazard 6886354 DR failure with "memory span duplication" error 6886782 panic after pagefault in seg_kpm after LDom add-mem on primary 6887644 domain hang/deadlock during ldom mem DR when trying to grab a write lock
usr/src/cmd/mdb/common/modules/genunix/genunix.c
usr/src/cmd/mdb/common/modules/genunix/memory.c
usr/src/uts/common/avs/ns/sdbc/sd_bio.c
usr/src/uts/common/cpr/cpr_dump.c
usr/src/uts/common/fs/autofs/auto_vfsops.c
usr/src/uts/common/fs/zfs/sys/zfs_vfsops.h
usr/src/uts/common/fs/zfs/zfs_ioctl.c
usr/src/uts/common/fs/zfs/zfs_vfsops.c
usr/src/uts/common/io/ib/clients/rds/rds_ioctl.c
usr/src/uts/common/io/ib/mgt/ibcm/ibcm_arp.c
usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_subr.c
usr/src/uts/common/os/dumpsubr.c
usr/src/uts/common/os/kstat_fr.c
usr/src/uts/common/os/mem_cage.c
usr/src/uts/common/os/mem_config.c
usr/src/uts/common/os/space.c
usr/src/uts/common/rpc/rpcib.c
usr/src/uts/common/sys/fs/lofs_node.h
usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_proto.h
usr/src/uts/common/sys/vnode.h
usr/src/uts/common/vm/page.h
usr/src/uts/common/vm/page_lock.c
usr/src/uts/common/vm/page_retire.c
usr/src/uts/common/vm/seg_kmem.c
usr/src/uts/common/vm/seg_kmem.h
usr/src/uts/common/vm/seg_vn.c
usr/src/uts/common/vm/vm_page.c
usr/src/uts/common/vm/vm_pagelist.c
usr/src/uts/sun4/os/memlist.c
usr/src/uts/sun4/os/startup.c
usr/src/uts/sun4u/os/cpr_impl.c
usr/src/uts/sun4v/io/dr_mem.c
usr/src/uts/sun4v/io/drctl.c
usr/src/uts/sun4v/io/vlds.c
usr/src/uts/sun4v/os/memseg.c
usr/src/uts/sun4v/promif/promif_emul.c
usr/src/uts/sun4v/sys/drctl.h
usr/src/uts/sun4v/vm/mach_kpm.c
--- a/usr/src/cmd/mdb/common/modules/genunix/genunix.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/cmd/mdb/common/modules/genunix/genunix.c	Mon Nov 23 22:42:23 2009 -0800
@@ -2211,7 +2211,7 @@
 static int
 kmastat_cache(uintptr_t addr, const kmem_cache_t *cp, kmastat_args_t *kap)
 {
-	kmastat_vmem_t **kvp = kap->ka_kvpp;
+	kmastat_vmem_t **kvpp = kap->ka_kvpp;
 	kmastat_vmem_t *kv;
 	datafmt_t *dfp = kmemfmt;
 	int magsize;
@@ -2234,15 +2234,15 @@
 	(void) mdb_pwalk("kmem_cpu_cache", cpu_avail, &avail, addr);
 	(void) mdb_pwalk("kmem_slab_partial", slab_avail, &avail, addr);
 
-	for (kv = *kvp; kv != NULL; kv = kv->kv_next) {
+	for (kv = *kvpp; kv != NULL; kv = kv->kv_next) {
 		if (kv->kv_addr == (uintptr_t)cp->cache_arena)
 			goto out;
 	}
 
 	kv = mdb_zalloc(sizeof (kmastat_vmem_t), UM_SLEEP | UM_GC);
-	kv->kv_next = *kvp;
+	kv->kv_next = *kvpp;
 	kv->kv_addr = (uintptr_t)cp->cache_arena;
-	*kvp = kv;
+	*kvpp = kv;
 out:
 	kv->kv_meminuse += meminuse;
 	kv->kv_alloc += alloc;
--- a/usr/src/cmd/mdb/common/modules/genunix/memory.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/cmd/mdb/common/modules/genunix/memory.c	Mon Nov 23 22:42:23 2009 -0800
@@ -29,6 +29,7 @@
 #include <sys/thread.h>
 #include <sys/swap.h>
 #include <sys/memlist.h>
+#include <sys/vnode.h>
 #if defined(__i386) || defined(__amd64)
 #include <sys/balloon_impl.h>
 #endif
@@ -490,6 +491,7 @@
 	memstat_t stats;
 	GElf_Sym sym;
 	vn_htable_t ht;
+	struct vnode *kvps;
 	uintptr_t vn_size = 0;
 #if defined(__i386) || defined(__amd64)
 	bln_stats_t bln_stats;
@@ -533,25 +535,19 @@
 		return (DCMD_ERR);
 	}
 
-	/* read kernel vnode pointer */
-	if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "kvp",
+	/* read kernel vnode array pointer */
+	if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "kvps",
 	    (GElf_Sym *)&sym) == -1) {
-		mdb_warn("unable to read kvp");
+		mdb_warn("unable to read kvps");
 		return (DCMD_ERR);
 	}
-
-	stats.ms_kvp = (struct vnode *)(uintptr_t)sym.st_value;
+	kvps = (struct vnode *)(uintptr_t)sym.st_value;
+	stats.ms_kvp =  &kvps[KV_KVP];
 
 	/*
-	 * Read the zio vnode pointer.  It may not exist on all kernels, so it
-	 * it isn't found, it's not a fatal error.
+	 * Read the zio vnode pointer.
 	 */
-	if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "zvp",
-	    (GElf_Sym *)&sym) == -1) {
-		stats.ms_zvp = NULL;
-	} else {
-		stats.ms_zvp = (struct vnode *)(uintptr_t)sym.st_value;
-	}
+	stats.ms_zvp = &kvps[KV_ZVP];
 
 	/*
 	 * If physmem != total_pages, then the administrator has limited the
--- a/usr/src/uts/common/avs/ns/sdbc/sd_bio.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/avs/ns/sdbc/sd_bio.c	Mon Nov 23 22:42:23 2009 -0800
@@ -41,6 +41,7 @@
 
 #include <sys/sdt.h>		/* dtrace is S10 or later */
 
+#include <vm/seg_kmem.h>
 #include "sd_bcache.h"
 #include "sd_trace.h"
 #include "sd_io.h"
@@ -54,12 +55,6 @@
 extern uintptr_t kobj_getsymvalue(char *, int);	/* DDI violation */
 #endif
 
-/*
- * Shouldn't really use an extern here but no .h file provides this
- * so we have no choice (other than not using it)
- */
-extern struct vnode kvp;		/* the vnode for seg_kmem memory */
-
 #define	DO_PAGE_LIST	sdbc_do_page	/* enable pagelist code */
 
 int sdbc_do_page = 0;
--- a/usr/src/uts/common/cpr/cpr_dump.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/cpr/cpr_dump.c	Mon Nov 23 22:42:23 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 /*
  * Fill in and write out the cpr state file
  *	1. Allocate and write headers, ELF and cpr dump header
@@ -811,15 +809,8 @@
 	page0 = pp = page_first();
 
 	do {
-#if defined(__sparc)
-		extern struct vnode prom_ppages;
-		if (pp->p_vnode == NULL || PP_ISKAS(pp) ||
-		    pp->p_vnode == &prom_ppages ||
-		    PP_ISFREE(pp) && PP_ISAGED(pp))
-#else
 		if (pp->p_vnode == NULL || PP_ISKAS(pp) ||
 		    PP_ISFREE(pp) && PP_ISAGED(pp))
-#endif /* __sparc */
 			continue;
 
 		pfn = page_pptonum(pp);
@@ -835,7 +826,7 @@
 	    dcnt, tcnt);
 	CPR_DEBUG(CPR_DEBUG7, "cpr_count_upages: %ld pages, 0x%lx bytes\n",
 	    dcnt, mmu_ptob(dcnt));
-
+	page0 = NULL; /* for Lint */
 	return (dcnt);
 }
 
--- a/usr/src/uts/common/fs/autofs/auto_vfsops.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/fs/autofs/auto_vfsops.c	Mon Nov 23 22:42:23 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <sys/param.h>
 #include <sys/errno.h>
 #include <sys/proc.h>
@@ -360,7 +358,7 @@
 	char datalen = uap->datalen;
 	dev_t autofs_dev;
 	char strbuff[MAXPATHLEN + 1];
-	vnode_t *kvp;
+	vnode_t *kkvp;
 	struct autofs_globals *fngp;
 	zone_t *zone = curproc->p_zone;
 
@@ -622,15 +620,15 @@
 	 * happens when the daemon gets restarted?
 	 */
 	if ((error = lookupname("/dev/ticotsord", UIO_SYSSPACE, FOLLOW,
-	    NULLVPP, &kvp)) != 0) {
+	    NULLVPP, &kkvp)) != 0) {
 		cmn_err(CE_WARN, "autofs: lookupname: %d", error);
 		goto errout;
 	}
 
-	fnip->fi_knconf.knc_rdev = kvp->v_rdev;
+	fnip->fi_knconf.knc_rdev = kkvp->v_rdev;
 	fnip->fi_knconf.knc_protofmly = NC_LOOPBACK;
 	fnip->fi_knconf.knc_semantics = NC_TPI_COTS_ORD;
-	VN_RELE(kvp);
+	VN_RELE(kkvp);
 
 	/*
 	 * Make the root vnode
@@ -701,7 +699,7 @@
 
 	fnip = vfstofni(vfsp);
 	AUTOFS_DPRINT((4, "auto_unmount vfsp %p fnip %p\n", (void *)vfsp,
-			(void *)fnip));
+	    (void *)fnip));
 
 	if (secpolicy_fs_unmount(cr, vfsp) != 0)
 		return (EPERM);
--- a/usr/src/uts/common/fs/zfs/sys/zfs_vfsops.h	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/fs/zfs/sys/zfs_vfsops.h	Mon Nov 23 22:42:23 2009 -0800
@@ -143,7 +143,7 @@
 extern boolean_t zfs_usergroup_overquota(zfsvfs_t *zfsvfs,
     boolean_t isgroup, uint64_t fuid);
 extern int zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers);
-extern int zfsvfs_create(const char *name, zfsvfs_t **zvp);
+extern int zfsvfs_create(const char *name, zfsvfs_t **zfvp);
 extern void zfsvfs_free(zfsvfs_t *zfsvfs);
 extern int zfs_check_global_label(const char *dsname, const char *hexsl);
 
--- a/usr/src/uts/common/fs/zfs/zfs_ioctl.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/fs/zfs/zfs_ioctl.c	Mon Nov 23 22:42:23 2009 -0800
@@ -954,7 +954,7 @@
 }
 
 static int
-getzfsvfs(const char *dsname, zfsvfs_t **zvp)
+getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
 {
 	objset_t *os;
 	int error;
@@ -968,9 +968,9 @@
 	}
 
 	mutex_enter(&os->os_user_ptr_lock);
-	*zvp = dmu_objset_get_user(os);
-	if (*zvp) {
-		VFS_HOLD((*zvp)->z_vfs);
+	*zfvp = dmu_objset_get_user(os);
+	if (*zfvp) {
+		VFS_HOLD((*zfvp)->z_vfs);
 	} else {
 		error = ESRCH;
 	}
@@ -984,21 +984,21 @@
  * case its z_vfs will be NULL, and it will be opened as the owner.
  */
 static int
-zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zvp)
+zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp)
 {
 	int error = 0;
 
-	if (getzfsvfs(name, zvp) != 0)
-		error = zfsvfs_create(name, zvp);
+	if (getzfsvfs(name, zfvp) != 0)
+		error = zfsvfs_create(name, zfvp);
 	if (error == 0) {
-		rrw_enter(&(*zvp)->z_teardown_lock, RW_READER, tag);
-		if ((*zvp)->z_unmounted) {
+		rrw_enter(&(*zfvp)->z_teardown_lock, RW_READER, tag);
+		if ((*zfvp)->z_unmounted) {
 			/*
 			 * XXX we could probably try again, since the unmounting
 			 * thread should be just about to disassociate the
 			 * objset from the zfsvfs.
 			 */
-			rrw_exit(&(*zvp)->z_teardown_lock, tag);
+			rrw_exit(&(*zfvp)->z_teardown_lock, tag);
 			return (EBUSY);
 		}
 	}
--- a/usr/src/uts/common/fs/zfs/zfs_vfsops.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/fs/zfs/zfs_vfsops.c	Mon Nov 23 22:42:23 2009 -0800
@@ -816,7 +816,7 @@
 }
 
 int
-zfsvfs_create(const char *osname, zfsvfs_t **zvp)
+zfsvfs_create(const char *osname, zfsvfs_t **zfvp)
 {
 	objset_t *os;
 	zfsvfs_t *zfsvfs;
@@ -923,12 +923,12 @@
 	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
 		mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
 
-	*zvp = zfsvfs;
+	*zfvp = zfsvfs;
 	return (0);
 
 out:
 	dmu_objset_disown(os, zfsvfs);
-	*zvp = NULL;
+	*zfvp = NULL;
 	kmem_free(zfsvfs, sizeof (zfsvfs_t));
 	return (error);
 }
--- a/usr/src/uts/common/io/ib/clients/rds/rds_ioctl.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/io/ib/clients/rds/rds_ioctl.c	Mon Nov 23 22:42:23 2009 -0800
@@ -49,18 +49,18 @@
 int
 rds_do_ip_ioctl(int cmd, int len, void *arg)
 {
-	vnode_t	*kvp, *vp;
+	vnode_t	*kkvp, *vp;
 	TIUSER	*tiptr;
 	struct	strioctl iocb;
 	k_sigset_t smask;
 	int	err = 0;
 
-	if (lookupname("/dev/udp", UIO_SYSSPACE, FOLLOW, NULLVPP, &kvp) == 0) {
-		if (t_kopen((file_t *)NULL, kvp->v_rdev, FREAD|FWRITE,
+	if (lookupname("/dev/udp", UIO_SYSSPACE, FOLLOW, NULLVPP, &kkvp) == 0) {
+		if (t_kopen((file_t *)NULL, kkvp->v_rdev, FREAD|FWRITE,
 		    &tiptr, CRED()) == 0) {
 			vp = tiptr->fp->f_vnode;
 		} else {
-			VN_RELE(kvp);
+			VN_RELE(kkvp);
 			return (EPROTO);
 		}
 	} else {
@@ -75,7 +75,7 @@
 	err = kstr_ioctl(vp, I_STR, (intptr_t)&iocb);
 	sigunintr(&smask);
 	(void) t_kclose(tiptr, 0);
-	VN_RELE(kvp);
+	VN_RELE(kkvp);
 	return (err);
 }
 
--- a/usr/src/uts/common/io/ib/mgt/ibcm/ibcm_arp.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/io/ib/mgt/ibcm/ibcm_arp.c	Mon Nov 23 22:42:23 2009 -0800
@@ -284,16 +284,16 @@
 static int
 ibcm_do_ip_ioctl(int cmd, int len, void *arg)
 {
-	vnode_t *kvp;
+	vnode_t *kkvp;
 	TIUSER  *tiptr;
 	struct  strioctl iocb;
 	int	err = 0;
 
-	if (lookupname("/dev/udp", UIO_SYSSPACE, FOLLOW, NULLVPP, &kvp) != 0)
+	if (lookupname("/dev/udp", UIO_SYSSPACE, FOLLOW, NULLVPP, &kkvp) != 0)
 		return (EPROTO);
 
-	if (t_kopen(NULL, kvp->v_rdev, FREAD|FWRITE, &tiptr, CRED()) != 0) {
-		VN_RELE(kvp);
+	if (t_kopen(NULL, kkvp->v_rdev, FREAD|FWRITE, &tiptr, CRED()) != 0) {
+		VN_RELE(kkvp);
 		return (EPROTO);
 	}
 
@@ -303,7 +303,7 @@
 	iocb.ic_dp = (caddr_t)arg;
 	err = kstr_ioctl(tiptr->fp->f_vnode, I_STR, (intptr_t)&iocb);
 	(void) t_kclose(tiptr, 0);
-	VN_RELE(kvp);
+	VN_RELE(kkvp);
 	return (err);
 }
 
--- a/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_subr.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/io/scsi/adapters/pmcs/pmcs_subr.c	Mon Nov 23 22:42:23 2009 -0800
@@ -6601,12 +6601,12 @@
  * acch: ddi_acc_handle_t to use for the mapping
  * dmah: ddi_dma_handle_t to use
  * length: Amount of memory for mapping
- * kvp: Pointer filled in with kernel virtual address on successful return
+ * kvap: Pointer filled in with kernel virtual address on successful return
  * dma_addr: Pointer filled in with DMA address on successful return
  */
 boolean_t
 pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr, ddi_acc_handle_t *acch,
-    ddi_dma_handle_t *dmah, size_t length, caddr_t *kvp, uint64_t *dma_addr)
+    ddi_dma_handle_t *dmah, size_t length, caddr_t *kvap, uint64_t *dma_addr)
 {
 	dev_info_t		*dip = pwp->dip;
 	ddi_dma_cookie_t	cookie;
@@ -6632,7 +6632,7 @@
 	}
 
 	if (ddi_dma_mem_alloc(*dmah, length, &mattr, ddma_flag, DDI_DMA_SLEEP,
-	    NULL, kvp, &real_length, acch) != DDI_SUCCESS) {
+	    NULL, kvap, &real_length, acch) != DDI_SUCCESS) {
 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL,
 		    "Failed to allocate DMA mem");
 		ddi_dma_free_handle(dmah);
@@ -6640,7 +6640,7 @@
 		return (B_FALSE);
 	}
 
-	if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvp, real_length,
+	if (ddi_dma_addr_bind_handle(*dmah, NULL, *kvap, real_length,
 	    ddabh_flag, DDI_DMA_SLEEP, NULL, &cookie, &cookie_cnt)
 	    != DDI_DMA_MAPPED) {
 		pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "Failed to bind DMA");
--- a/usr/src/uts/common/os/dumpsubr.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/os/dumpsubr.c	Mon Nov 23 22:42:23 2009 -0800
@@ -817,13 +817,9 @@
 dump_pfn_check(pfn_t pfn)
 {
 	page_t *pp = page_numtopp_nolock(pfn);
-#if defined(__sparc)
-	extern struct vnode prom_ppages;
-#endif
-
 	if (pp == NULL || pp->p_pagenum != pfn ||
 #if defined(__sparc)
-	    pp->p_vnode == &prom_ppages ||
+	    pp->p_vnode == &promvp ||
 #else
 	    PP_ISBOOTPAGES(pp) ||
 #endif
--- a/usr/src/uts/common/os/kstat_fr.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/os/kstat_fr.c	Mon Nov 23 22:42:23 2009 -0800
@@ -876,8 +876,6 @@
 extern caddr_t	econtig;
 #endif	/* __sparc */
 
-extern struct vnode kvp;
-
 /* ARGSUSED */
 static int
 system_pages_kstat_update(kstat_t *ksp, int rw)
--- a/usr/src/uts/common/os/mem_cage.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/os/mem_cage.c	Mon Nov 23 22:42:23 2009 -0800
@@ -911,7 +911,6 @@
 	page_t *pp;
 	kstat_t *ksp;
 
-	extern struct vnode kvp;
 	extern void page_list_noreloc_startup(page_t *);
 
 	ASSERT(!kcage_on);
@@ -1618,10 +1617,8 @@
 	int result;
 
 #if defined(__sparc)
-	extern struct vnode prom_ppages;
-	ASSERT(pp->p_vnode != &prom_ppages);
+	ASSERT(pp->p_vnode != &promvp);
 #endif /* __sparc */
-
 	ASSERT(!PP_ISFREE(pp));
 	ASSERT(PAGE_EXCL(pp));
 
--- a/usr/src/uts/common/os/mem_config.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/os/mem_config.c	Mon Nov 23 22:42:23 2009 -0800
@@ -63,6 +63,7 @@
 extern void mem_node_del(pfn_t, pfn_t);
 
 extern uint_t page_ctrs_adjust(int);
+void page_ctrs_cleanup(void);
 static void kphysm_setup_post_add(pgcnt_t);
 static int kphysm_setup_pre_del(pgcnt_t);
 static void kphysm_setup_post_del(pgcnt_t, int);
@@ -101,7 +102,7 @@
 extern void memseg_remap_meta(struct memseg *);
 static int memseg_is_dynamic(struct memseg *);
 static int memseg_includes_meta(struct memseg *);
-static pfn_t memseg_get_start(struct memseg *);
+pfn_t memseg_get_start(struct memseg *);
 static void memseg_cpu_vm_flush(void);
 
 int meta_alloc_enable;
@@ -354,6 +355,9 @@
 
 		mem_node_del_range(pt_base, pnum);
 
+		/* cleanup the  page counters */
+		page_ctrs_cleanup();
+
 		hat_unload(kas.a_hat, (caddr_t)pp, ptob(metapgs),
 		    HAT_UNLOAD_UNMAP|HAT_UNLOAD_UNLOCK);
 
@@ -2384,6 +2388,8 @@
 		mem_node_del_range(mdsp->mds_base,
 		    mdsp->mds_base + mdsp->mds_npgs - 1);
 	}
+	/* cleanup the page counters */
+	page_ctrs_cleanup();
 
 	comp_code = KPHYSM_OK;
 
--- a/usr/src/uts/common/os/space.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/os/space.c	Mon Nov 23 22:42:23 2009 -0800
@@ -101,6 +101,11 @@
 #include <sys/bootconf.h>
 
 /*
+ * Data for segkmem pages that should be resident
+ */
+struct vnode kvps[KV_MAX];
+
+/*
  * Data from swapgeneric.c that must be resident.
  */
 struct vnode *rootvp;		/* vnode of the root device */
--- a/usr/src/uts/common/rpc/rpcib.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/rpc/rpcib.c	Mon Nov 23 22:42:23 2009 -0800
@@ -5170,18 +5170,18 @@
 static int
 rpcib_do_ip_ioctl(int cmd, int len, void *arg)
 {
-	vnode_t *kvp, *vp;
+	vnode_t *kkvp, *vp;
 	TIUSER  *tiptr;
 	struct  strioctl iocb;
 	k_sigset_t smask;
 	int	err = 0;
 
-	if (lookupname("/dev/udp", UIO_SYSSPACE, FOLLOW, NULLVPP, &kvp) == 0) {
-		if (t_kopen(NULL, kvp->v_rdev, FREAD|FWRITE,
+	if (lookupname("/dev/udp", UIO_SYSSPACE, FOLLOW, NULLVPP, &kkvp) == 0) {
+		if (t_kopen(NULL, kkvp->v_rdev, FREAD|FWRITE,
 		    &tiptr, CRED()) == 0) {
 			vp = tiptr->fp->f_vnode;
 		} else {
-			VN_RELE(kvp);
+			VN_RELE(kkvp);
 			return (EPROTO);
 		}
 	} else {
@@ -5196,7 +5196,7 @@
 	err = kstr_ioctl(vp, I_STR, (intptr_t)&iocb);
 	sigunintr(&smask);
 	(void) t_kclose(tiptr, 0);
-	VN_RELE(kvp);
+	VN_RELE(kkvp);
 	return (err);
 }
 
--- a/usr/src/uts/common/sys/fs/lofs_node.h	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/sys/fs/lofs_node.h	Mon Nov 23 22:42:23 2009 -0800
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -31,8 +30,6 @@
 #ifndef _SYS_FS_LOFS_NODE_H
 #define	_SYS_FS_LOFS_NODE_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <sys/fs/lofs_info.h>
 
 #ifdef	__cplusplus
@@ -72,8 +69,6 @@
 #ifdef _KERNEL
 extern vnode_t *makelonode(vnode_t *, struct loinfo *, int);
 extern void freelonode(lnode_t *);
-
-extern struct vnode kvp;
 #endif /* _KERNEL */
 
 #ifdef	__cplusplus
--- a/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_proto.h	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/sys/scsi/adapters/pmcs/pmcs_proto.h	Mon Nov 23 22:42:23 2009 -0800
@@ -291,7 +291,7 @@
 void pmcs_sata_work(pmcs_hw_t *);
 boolean_t pmcs_dma_setup(pmcs_hw_t *pwp, ddi_dma_attr_t *dma_attr,
     ddi_acc_handle_t *acch, ddi_dma_handle_t *dmah, size_t length,
-    caddr_t *kvp, uint64_t *dma_addr);
+    caddr_t *kvap, uint64_t *dma_addr);
 void pmcs_fm_ereport(pmcs_hw_t *pwp, char *detail);
 int pmcs_check_dma_handle(ddi_dma_handle_t handle);
 int pmcs_check_acc_handle(ddi_acc_handle_t handle);
--- a/usr/src/uts/common/sys/vnode.h	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/sys/vnode.h	Mon Nov 23 22:42:23 2009 -0800
@@ -1318,10 +1318,22 @@
 	((VP1) && (VP2) && (vn_getops(VP1) == vn_getops(VP2)) ? \
 	VOP_CMP(VP1, VP2, NULL) : 0))
 
-extern struct vnode kvp;
-extern struct vnode zvp;
+/*
+ * Some well-known global vnodes used by the VM system to name pages.
+ */
+extern struct vnode kvps[];
 
-#define	VN_ISKAS(vp)		((vp) == &kvp || (vp) == &zvp)
+typedef enum {
+	KV_KVP,		/* vnode for all segkmem pages */
+	KV_ZVP,		/* vnode for all ZFS pages */
+#if defined(__sparc)
+	KV_MPVP,	/* vnode for all page_t meta-pages */
+	KV_PROMVP,	/* vnode for all PROM pages */
+#endif	/* __sparc */
+	KV_MAX		/* total number of vnodes in kvps[] */
+} kvps_index_t;
+
+#define	VN_ISKAS(vp)	((vp) >= &kvps[0] && (vp) < &kvps[KV_MAX])
 
 #endif	/* _KERNEL */
 
@@ -1367,7 +1379,6 @@
  * be necessary to ensure the page was freed.
  */
 #define	VN_DISPOSE(pp, flag, dn, cr)	{ \
-	extern struct vnode kvp; \
 	if ((pp)->p_vnode != NULL && !VN_ISKAS((pp)->p_vnode)) \
 		VOP_DISPOSE((pp)->p_vnode, (pp), (flag), (dn), (cr), NULL); \
 	else if ((flag) == B_FREE) \
--- a/usr/src/uts/common/vm/page.h	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/vm/page.h	Mon Nov 23 22:42:23 2009 -0800
@@ -898,8 +898,7 @@
 #define	PP_ISAGED(pp)		(((pp)->p_state & P_FREE) && \
 					((pp)->p_vnode == NULL))
 #define	PP_ISNORELOC(pp)	((pp)->p_state & P_NORELOC)
-#define	PP_ISKAS(pp)		(((pp)->p_vnode == &kvp) || \
-					    ((pp)->p_vnode == &zvp))
+#define	PP_ISKAS(pp)		(VN_ISKAS((pp)->p_vnode))
 #define	PP_ISNORELOCKERNEL(pp)	(PP_ISNORELOC(pp) && PP_ISKAS(pp))
 #define	PP_ISMIGRATE(pp)	((pp)->p_state & P_MIGRATE)
 #define	PP_ISSWAP(pp)		((pp)->p_state & P_SWAP)
@@ -1177,6 +1176,7 @@
 void page_unlock_capture(page_t *pp);
 int page_capture_unretire_pp(page_t *);
 
+extern int memsegs_trylock(int);
 extern void memsegs_lock(int);
 extern void memsegs_unlock(int);
 extern int memsegs_lock_held(void);
--- a/usr/src/uts/common/vm/page_lock.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/vm/page_lock.c	Mon Nov 23 22:42:23 2009 -0800
@@ -19,11 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
 
 /*
  * VM - page locking primitives
@@ -33,7 +32,6 @@
 #include <sys/vtrace.h>
 #include <sys/debug.h>
 #include <sys/cmn_err.h>
-#include <sys/vnode.h>
 #include <sys/bitmap.h>
 #include <sys/lockstat.h>
 #include <sys/sysmacros.h>
@@ -41,6 +39,7 @@
 #include <vm/page.h>
 #include <vm/seg_enum.h>
 #include <vm/vm_dep.h>
+#include <vm/seg_kmem.h>
 
 /*
  * This global mutex is for logical page locking.
@@ -145,8 +144,6 @@
 	    ((uintptr_t)(vp) >> 12)) \
 	    & (VPH_TABLE_SIZE - 1))
 
-extern	struct vnode	kvp;
-
 /*
  * Two slots after VPH_TABLE_SIZE are reserved in vph_mutex for kernel vnodes.
  * The lock for kvp is VPH_TABLE_SIZE + 0, and the lock for zvp is
@@ -1025,6 +1022,12 @@
  */
 static krwlock_t memlists_lock;
 
+int
+memsegs_trylock(int writer)
+{
+	return (rw_tryenter(&memsegslock, writer ? RW_WRITER : RW_READER));
+}
+
 void
 memsegs_lock(int writer)
 {
--- a/usr/src/uts/common/vm/page_retire.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/vm/page_retire.c	Mon Nov 23 22:42:23 2009 -0800
@@ -142,6 +142,7 @@
 #include <vm/vm_dep.h>
 #include <vm/as.h>
 #include <vm/hat.h>
+#include <vm/seg_kmem.h>
 
 /*
  * vnode for all pages which are retired from the VM system;
@@ -273,8 +274,6 @@
  */
 static int pr_enable = 0;
 
-extern struct vnode kvp;
-
 #ifdef	DEBUG
 struct page_retire_debug {
 	int prd_dup1;
--- a/usr/src/uts/common/vm/seg_kmem.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/vm/seg_kmem.c	Mon Nov 23 22:42:23 2009 -0800
@@ -111,8 +111,6 @@
 vmem_t *heap32_arena;		/* 32-bit kernel heap arena */
 vmem_t *heaptext_arena;		/* heaptext arena */
 struct as kas;			/* kernel address space */
-struct vnode kvp;		/* vnode for all segkmem pages */
-struct vnode zvp;		/* vnode for zfs pages */
 int segkmem_reloc;		/* enable/disable relocatable segkmem pages */
 vmem_t *static_arena;		/* arena for caches to import static memory */
 vmem_t *static_alloc_arena;	/* arena for allocating static memory */
--- a/usr/src/uts/common/vm/seg_kmem.h	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/vm/seg_kmem.h	Mon Nov 23 22:42:23 2009 -0800
@@ -19,14 +19,13 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #ifndef _VM_SEG_KMEM_H
 #define	_VM_SEG_KMEM_H
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
 
 #ifdef	__cplusplus
 extern "C" {
@@ -59,13 +58,21 @@
 extern vmem_t *heap32_arena;	/* 32-bit kernel heap arena */
 extern vmem_t *heaptext_arena;	/* kernel text arena, from heap */
 extern struct as kas;		/* kernel address space */
-extern struct vnode kvp;	/* vnode for all segkmem pages */
-extern struct vnode zvp;	/* vnode for all segkmem pages for zfs */
 extern int segkmem_reloc;	/* enable/disable segkmem relocatable pages */
 extern vmem_t *static_arena;	/* arena for caches to import static memory */
 extern vmem_t *static_alloc_arena;	/* arena for allocating static memory */
 extern vmem_t *zio_arena;	/* arena for zio caches */
 extern vmem_t *zio_alloc_arena;	/* arena for zio caches */
+extern struct vnode kvps[];
+/*
+ * segkmem page vnodes
+ */
+#define	kvp		(kvps[KV_KVP])
+#define	zvp		(kvps[KV_ZVP])
+#if defined(__sparc)
+#define	mpvp		(kvps[KV_MPVP])
+#define	promvp		(kvps[KV_PROMVP])
+#endif	/* __sparc */
 
 extern int segkmem_create(struct seg *);
 extern page_t *segkmem_page_create(void *, size_t, int, void *);
--- a/usr/src/uts/common/vm/seg_vn.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/vm/seg_vn.c	Mon Nov 23 22:42:23 2009 -0800
@@ -566,7 +566,6 @@
 			    !IS_P2ALIGNED(seg->s_size, pgsz)) {
 				a->szc = 0;
 			} else if (a->vp != NULL) {
-				extern struct vnode kvp;
 				if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
 					/*
 					 * paranoid check.
@@ -6017,7 +6016,6 @@
 	pgcnt_t pgcnt = page_get_pagecnt(szc);
 	int err;
 	u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
-	extern struct vnode kvp;
 
 	ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 	ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
--- a/usr/src/uts/common/vm/vm_page.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/vm/vm_page.c	Mon Nov 23 22:42:23 2009 -0800
@@ -4365,10 +4365,6 @@
 	pgcnt_t nbusypages;
 	int retry = 0;
 	const int MAXRETRIES = 4;
-#if defined(__sparc)
-	extern struct vnode prom_ppages;
-#endif /* __sparc */
-
 top:
 	/*
 	 * Flush dirty pages and destroy the clean ones.
@@ -4385,12 +4381,7 @@
 		 * skip the page if it has no vnode or the page associated
 		 * with the kernel vnode or prom allocated kernel mem.
 		 */
-#if defined(__sparc)
-		if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp) ||
-		    vp == &prom_ppages)
-#else /* x86 doesn't have prom or prom_ppage */
 		if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp))
-#endif /* __sparc */
 			continue;
 
 		/*
@@ -6726,14 +6717,10 @@
 int
 page_capture_pre_checks(page_t *pp, uint_t flags)
 {
-#if defined(__sparc)
-	extern struct vnode prom_ppages;
-#endif /* __sparc */
-
 	ASSERT(pp != NULL);
 
 #if defined(__sparc)
-	if (pp->p_vnode == &prom_ppages) {
+	if (pp->p_vnode == &promvp) {
 		return (EPERM);
 	}
 
--- a/usr/src/uts/common/vm/vm_pagelist.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/common/vm/vm_pagelist.c	Mon Nov 23 22:42:23 2009 -0800
@@ -1117,13 +1117,12 @@
 		/* update shared hpm_counters in other mnodes */
 		if (interleaved_mnodes) {
 			for (i = 0; i < max_mem_nodes; i++) {
-				if (i == mnode)
+				if ((i == mnode) ||
+				    (mem_node_config[i].exists == 0))
 					continue;
 				ASSERT(
 				    PAGE_COUNTERS_COUNTERS(i, r) == old_ctr ||
 				    PAGE_COUNTERS_COUNTERS(i, r) == NULL);
-				if (mem_node_config[i].exists == 0)
-					continue;
 				PAGE_COUNTERS_COUNTERS(i, r) = new_ctr;
 				PAGE_COUNTERS_ENTRIES(i, r) = pcsz;
 				PAGE_COUNTERS_BASE(i, r) = newbase;
@@ -1277,6 +1276,33 @@
 	return (rc);
 }
 
+/*
+ * Cleanup the hpm_counters field in the page counters
+ * array.
+ */
+void
+page_ctrs_cleanup(void)
+{
+	int r;	/* region size */
+	int i;	/* mnode index */
+
+	/*
+	 * Get the page counters write lock while we are
+	 * setting the page hpm_counters field to NULL
+	 * for non-existent mnodes.
+	 */
+	for (i = 0; i < max_mem_nodes; i++) {
+		PAGE_CTRS_WRITE_LOCK(i);
+		if (mem_node_config[i].exists) {
+			PAGE_CTRS_WRITE_UNLOCK(i);
+			continue;
+		}
+		for (r = 1; r < mmu_page_sizes; r++) {
+			PAGE_COUNTERS_COUNTERS(i, r) = NULL;
+		}
+		PAGE_CTRS_WRITE_UNLOCK(i);
+	}
+}
 
 #ifdef DEBUG
 
@@ -3449,7 +3475,17 @@
 			pfnhi = pfnlo + (slotlen * szcpgcnt) - 1;
 	}
 
-	memsegs_lock(0);
+	/*
+	 * This routine is can be called recursively so we shouldn't
+	 * acquire a reader lock if a write request is pending. This
+	 * could lead to a deadlock with the DR thread.
+	 *
+	 * Returning NULL informs the caller that we could not get
+	 * a contig page with the required characteristics.
+	 */
+
+	if (!memsegs_trylock(0))
+		return (NULL);
 
 	/*
 	 * loop through memsegs to look for contig page candidates
--- a/usr/src/uts/sun4/os/memlist.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4/os/memlist.c	Mon Nov 23 22:42:23 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 #include <sys/types.h>
 #include <sys/param.h>
 #include <sys/sysmacros.h>
@@ -177,9 +175,6 @@
 }
 
 
-
-struct vnode prom_ppages;
-
 static void
 more_pages(uint64_t base, uint64_t len)
 {
@@ -226,7 +221,7 @@
 			 * are page numbers (gack) for >32 bit
 			 * physical memory machines.
 			 */
-			(void) page_hashin(pp, &prom_ppages,
+			(void) page_hashin(pp, &promvp,
 			    (offset_t)pfnum, NULL);
 
 			if (kcage_on) {
--- a/usr/src/uts/sun4/os/startup.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4/os/startup.c	Mon Nov 23 22:42:23 2009 -0800
@@ -2561,8 +2561,6 @@
 	return (NULL);
 }
 
-extern struct vnode prom_ppages;
-
 /*
  * Put page allocated by OBP on prom_ppages
  */
@@ -2604,7 +2602,7 @@
 			add_physmem_cb(pp, base);
 			if (page_trylock(pp, SE_EXCL) == 0)
 				cmn_err(CE_PANIC, "prom page locked");
-			(void) page_hashin(pp, &prom_ppages,
+			(void) page_hashin(pp, &promvp,
 			    (offset_t)base, NULL);
 			(void) page_pp_lock(pp, 0, 1);
 			pp++, base++, num--;
@@ -2700,7 +2698,7 @@
 		 * unhash and unlock it
 		 */
 		while (rpp < lpp) {
-			ASSERT(PAGE_EXCL(rpp) && rpp->p_vnode == &prom_ppages);
+			ASSERT(PAGE_EXCL(rpp) && rpp->p_vnode == &promvp);
 			ASSERT(PP_ISNORELOC(rpp));
 			PP_CLRNORELOC(rpp);
 			page_pp_unlock(rpp, 0, 1);
--- a/usr/src/uts/sun4u/os/cpr_impl.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4u/os/cpr_impl.c	Mon Nov 23 22:42:23 2009 -0800
@@ -19,12 +19,10 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident	"%Z%%M%	%I%	%E% SMI"
-
 /*
  * Platform specific implementation code
  */
@@ -1505,7 +1503,6 @@
 static int
 i_cpr_find_ppages(void)
 {
-	extern struct vnode prom_ppages;
 	struct page *pp;
 	struct memlist *pmem;
 	pgcnt_t npages, pcnt, scnt, vcnt;
@@ -1539,15 +1536,15 @@
 	scnt = cpr_count_seg_pages(mapflag, cpr_clrbit);
 
 	/*
-	 * set bits for phys pages referenced by the prom_ppages vnode;
+	 * set bits for phys pages referenced by the promvp vnode;
 	 * these pages are mostly comprised of forthdebug words
 	 */
 	vcnt = 0;
-	for (pp = prom_ppages.v_pages; pp; ) {
+	for (pp = promvp.v_pages; pp; ) {
 		if (cpr_setbit(pp->p_offset, mapflag) == 0)
 			vcnt++;
 		pp = pp->p_vpnext;
-		if (pp == prom_ppages.v_pages)
+		if (pp == promvp.v_pages)
 			break;
 	}
 
--- a/usr/src/uts/sun4v/io/dr_mem.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4v/io/dr_mem.c	Mon Nov 23 22:42:23 2009 -0800
@@ -203,10 +203,6 @@
 static int mem_add(pfn_t, pgcnt_t);
 static int mem_del(pfn_t, pgcnt_t);
 
-static size_t rsvaddsz;
-extern void i_dr_mem_init(uint64_t *);
-extern void i_dr_mem_fini();
-extern void i_dr_mem_update();
 extern int kphysm_add_memory_dynamic(pfn_t, pgcnt_t);
 
 int
@@ -261,8 +257,6 @@
 		return (rv);
 	}
 
-	i_dr_mem_init(&rsvaddsz);
-
 	return (0);
 }
 
@@ -271,8 +265,6 @@
 {
 	int rv;
 
-	i_dr_mem_fini();
-
 	if ((rv = ds_cap_fini(&dr_mem_cap)) != 0) {
 		cmn_err(CE_NOTE, "dr_mem: ds_cap_fini failed: %d", rv);
 	}
@@ -720,6 +712,8 @@
 	dr_mem_hdr_t	*rp;
 	dr_mem_query_t	*stat;
 
+	drctl_block();
+
 	/* the incoming array of req_mblks to configure */
 	req_mblks = DR_MEM_CMD_MBLKS(req);
 
@@ -764,6 +758,8 @@
 	*resp = rp;
 	*resp_len = rlen;
 
+	drctl_unblock();
+
 	return (0);
 }
 
@@ -832,7 +828,7 @@
 dr_mem_configure(dr_mem_blk_t *mbp, int *status)
 {
 	int rv;
-	uint64_t addr, size, addsz;
+	uint64_t addr, size;
 
 	rv = 0;
 	addr = mbp->addr;
@@ -854,64 +850,10 @@
 			*status = DR_MEM_STAT_UNCONFIGURED;
 			rv = DR_MEM_RES_FAILURE;
 		}
-	} else if (rsvaddsz) {
-		addr += size;
-
-		/*
-		 * Add up to the first <rsvaddsz> portion of mblock
-		 * first since that portion has reserved meta pages.
-		 * This will likely guarantee an additional amount of
-		 * free pages from which we may have to allocate the
-		 * rest of the meta pages.
-		 *
-		 * Break up the request in descending order (if needed)
-		 * in order to ensure that cage grows from the high end
-		 * of the original request.
-		 */
-		for (addsz = MIN(size, rsvaddsz); addsz > 0; addsz = size) {
-			ASSERT(addr >= mbp->addr);
-			DR_DBG_MEM("addsz=0x%lx  size=0x%lx\n", addsz, size);
-			if (rv = mem_add(btop(addr - addsz), btop(addsz))) {
-				DR_DBG_MEM("failed to configure span"
-				    " 0x%lx.0x%lx (%d)\n", addr, addsz, rv);
-				break;
-			} else {
-				size -= addsz;
-				addr -= addsz;
-			}
-		}
-
-		/*
-		 * Mark the mblock configured if any span
-		 * in that mblock was successfully added.
-		 *
-		 * In case of partial success:
-		 *
-		 *	rv != DR_MEM_RES_OK
-		 *	status == DR_MEM_STAT_CONFIGURED
-		 *
-		 * mark span actually configured.
-		 */
-		if (size == mbp->size && rv != KPHYSM_ESPAN) {
-			*status = DR_MEM_STAT_UNCONFIGURED;
-		} else {
-			DR_DBG_MEM("failed (partial) to configure span"
-			    " 0x%lx.0x%lx (%d)\n", addr, addsz, rv);
-			*status = DR_MEM_STAT_CONFIGURED;
-			mbp->addr = addr;
-			mbp->size -= size;
-		}
-
-		rv = cvt_err(rv);
-		i_dr_mem_update();
 	} else {
-		/*
-		 * The reserved feature is disabled, add whole mblock.
-		 */
 		rv = mem_add(btop(addr), btop(size));
 		DR_DBG_MEM("addr=0x%lx size=0x%lx rv=%d\n", addr, size, rv);
 		if (rv) {
-			rv = cvt_err(rv);
 			*status = DR_MEM_STAT_UNCONFIGURED;
 		} else {
 			*status = DR_MEM_STAT_CONFIGURED;
@@ -934,7 +876,6 @@
 			*status = DR_MEM_STAT_CONFIGURED;
 			rv = DR_MEM_RES_EINVAL;
 	} else if (rv = mem_del(btop(mbp->addr), btop(mbp->size))) {
-		rv = cvt_err(rv);
 		*status = DR_MEM_STAT_CONFIGURED;
 	} else {
 		*status = DR_MEM_STAT_UNCONFIGURED;
@@ -1117,15 +1058,16 @@
 	DR_DBG_MEM("%s: begin base=0x%lx npgs=0x%lx\n", __func__, base, npgs);
 
 	if (npgs == 0)
-		return (0);
+		return (DR_MEM_RES_OK);
 
 	rv = kphysm_add_memory_dynamic(base, npgs);
 	DR_DBG_MEM("%s: kphysm_add(0x%lx, 0x%lx) = %d", __func__, base, npgs,
 	    rv);
-	if (!rv) {
+	if (rv == KPHYSM_OK) {
 		if (rc = kcage_range_add(base, npgs, KCAGE_DOWN))
 			cmn_err(CE_WARN, "kcage_range_add() = %d", rc);
 	}
+	rv = cvt_err(rv);
 	return (rv);
 }
 
@@ -1145,6 +1087,7 @@
 mem_del(pfn_t base, pgcnt_t npgs)
 {
 	int rv, err, del_range = 0;
+	int convert = 1;
 	mem_sync_t ms;
 	memquery_t mq;
 	memhandle_t mh;
@@ -1154,10 +1097,11 @@
 	DR_DBG_MEM("%s: begin base=0x%lx npgs=0x%lx\n", __func__, base, npgs);
 
 	if (npgs == 0)
-		return (0);
+		return (DR_MEM_RES_OK);
 
 	if ((rv = kphysm_del_gethandle(&mh)) != KPHYSM_OK) {
 		cmn_err(CE_WARN, "%s: del_gethandle() = %d", __func__, rv);
+		rv = cvt_err(rv);
 		return (rv);
 	}
 	if ((rv = kphysm_del_span_query(base, npgs, &mq))
@@ -1168,10 +1112,19 @@
 	if (mq.nonrelocatable) {
 		DR_DBG_MEM("%s: non-reloc pages = %ld",
 		    __func__, mq.nonrelocatable);
-		rv = KPHYSM_ENONRELOC;
+		rv  = KPHYSM_ENONRELOC;
 		goto done;
 	}
 	if (rv = kcage_range_delete(base, npgs)) {
+		switch (rv) {
+		case EBUSY:
+			rv = DR_MEM_RES_ENOTVIABLE;
+			break;
+		default:
+			rv = DR_MEM_RES_FAILURE;
+			break;
+		}
+		convert = 0; /* conversion done */
 		cmn_err(CE_WARN, "%s: del_range() = %d", __func__, rv);
 		goto done;
 	} else {
@@ -1183,6 +1136,18 @@
 	}
 	if ((rv = memlist_add_span(ptob(base), ptob(npgs), &d_ml))
 	    != MEML_SPANOP_OK) {
+		switch (rv) {
+		case MEML_SPANOP_ESPAN:
+			rv = DR_MEM_RES_ESPAN;
+			break;
+		case MEML_SPANOP_EALLOC:
+			rv = DR_MEM_RES_ERESOURCE;
+			break;
+		default:
+			rv = DR_MEM_RES_FAILURE;
+			break;
+		}
+		convert = 0; /* conversion done */
 		cmn_err(CE_WARN, "%s: add_span() = %d", __func__, rv);
 		goto done;
 	}
@@ -1245,6 +1210,8 @@
 
 	if ((err = kphysm_del_release(mh)) != KPHYSM_OK)
 		cmn_err(CE_WARN, "%s: del_release() = %d", __func__, err);
+	if (convert)
+		rv = cvt_err(rv);
 
 	DR_DBG_MEM("%s: rv=%d", __func__, rv);
 
--- a/usr/src/uts/sun4v/io/drctl.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4v/io/drctl.c	Mon Nov 23 22:42:23 2009 -0800
@@ -156,6 +156,7 @@
 
 	drctlp->drc_inst = -1;
 	mutex_init(&drctlp->drc_lock, NULL, MUTEX_DRIVER, NULL);
+	cv_init(&drctlp->drc_busy_cv, NULL, CV_DRIVER, NULL);
 
 	if ((rv = mod_install(&modlinkage)) != 0)
 		mutex_destroy(&drctlp->drc_lock);
@@ -171,7 +172,7 @@
 
 	if ((rv = mod_remove(&modlinkage)) != 0)
 		return (rv);
-
+	cv_destroy(&drctlp->drc_busy_cv);
 	mutex_destroy(&drctlp->drc_lock);
 	return (0);
 }
@@ -376,7 +377,6 @@
 	return (0);
 }
 
-
 static int
 drctl_config_common(int cmd, int flags, drctl_rsrc_t *res,
     int count, drctl_resp_t **rbuf, size_t *rsize, size_t *rq_size)
@@ -460,7 +460,6 @@
 	}
 
 	mutex_enter(&drctlp->drc_lock);
-
 	if (drctlp->drc_busy != NULL) {
 		mutex_exit(&drctlp->drc_lock);
 		*rbuf = drctl_generate_err_resp(busy_msg, rsize);
@@ -488,6 +487,7 @@
 			kmem_free(*rbuf, *rsize);
 			*rbuf = drctl_generate_err_resp(rsp_msg, rsize);
 			drctlp->drc_busy = NULL;
+			cv_broadcast(&drctlp->drc_busy_cv);
 		} else { /* message format is valid */
 			drctlp->drc_busy = ck;
 			drctlp->drc_cmd = cmd;
@@ -511,8 +511,8 @@
 		drctlp->drc_cmd = -1;
 		drctlp->drc_flags = 0;
 		drctlp->drc_busy = NULL;
+		cv_broadcast(&drctlp->drc_busy_cv);
 	}
-
 	return (rv);
 }
 
@@ -528,12 +528,10 @@
 	size_t rq_size;
 
 	mutex_enter(&drctlp->drc_lock);
-
 	if (drctlp->drc_busy != ck) {
 		mutex_exit(&drctlp->drc_lock);
 		return (EBUSY);
 	}
-
 	mutex_exit(&drctlp->drc_lock);
 
 	flags = drctlp->drc_flags;
@@ -579,11 +577,11 @@
 	    flags, res, count, NULL, 0, &rq_size);
 
 done:
-	drctlp->drc_cmd = -1;
-	drctlp->drc_flags = 0;
-	drctlp->drc_busy = NULL;
-
-	return (rv);
+		drctlp->drc_cmd = -1;
+		drctlp->drc_flags = 0;
+		drctlp->drc_busy = NULL;
+		cv_broadcast(&drctlp->drc_busy_cv);
+		return (rv);
 }
 
 static int
@@ -697,3 +695,35 @@
 
 	return (msgp);
 }
+
+/*
+ * Block DR operations
+ */
+void
+drctl_block(void)
+{
+	/* Wait for any in progress DR operation to complete */
+	mutex_enter(&drctlp->drc_lock);
+	while (drctlp->drc_busy != NULL)
+		(void) cv_wait_sig(&drctlp->drc_busy_cv, &drctlp->drc_lock);
+	/* Mark the link busy */
+	drctlp->drc_busy = (drctl_cookie_t)-1;
+	drctlp->drc_cmd = DRCTL_DRC_BLOCK;
+	drctlp->drc_flags = 0;
+	mutex_exit(&drctlp->drc_lock);
+}
+
+/*
+ * Unblock DR operations
+ */
+void
+drctl_unblock(void)
+{
+	/* Mark the link free */
+	mutex_enter(&drctlp->drc_lock);
+	drctlp->drc_cmd = -1;
+	drctlp->drc_flags = 0;
+	drctlp->drc_busy = NULL;
+	cv_broadcast(&drctlp->drc_busy_cv);
+	mutex_exit(&drctlp->drc_lock);
+}
--- a/usr/src/uts/sun4v/io/vlds.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4v/io/vlds.c	Mon Nov 23 22:42:23 2009 -0800
@@ -278,7 +278,6 @@
 	}
 
 	vlds_mdeg_init();
-	(void) vlds_mdeg_register();
 
 	return (s);
 }
@@ -293,8 +292,6 @@
 
 	ddi_soft_state_fini(&vlds_statep);
 
-	(void) vlds_mdeg_unregister();
-
 	return (s);
 }
 
@@ -338,6 +335,8 @@
 
 	vlds_minor_init();
 
+	(void) vlds_mdeg_register();
+
 	return (DDI_SUCCESS);
 }
 
@@ -352,6 +351,7 @@
 
 	vlds_minor_free(vlds_minor_bitmap);
 	ddi_remove_minor_node(devi, NULL);
+	(void) vlds_mdeg_unregister();
 	return (DDI_SUCCESS);
 }
 
--- a/usr/src/uts/sun4v/os/memseg.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4v/os/memseg.c	Mon Nov 23 22:42:23 2009 -0800
@@ -44,9 +44,6 @@
 extern page_t *ppvm_base;
 extern pgcnt_t ppvm_size;
 
-static vnode_t pp_vn, rsv_vn;
-static pgcnt_t rsv_metapgs;
-static int meta_rsv_enable;
 static int sun4v_memseg_debug;
 
 extern struct memseg *memseg_reuse(pgcnt_t);
@@ -77,12 +74,11 @@
 int
 memseg_alloc_meta(pfn_t base, pgcnt_t npgs, void **ptp, pgcnt_t *metap)
 {
-	page_t		*pp, *opp, *epp, *pgpp;
+	page_t		*pp, *opp, *epp;
 	pgcnt_t		metapgs;
-	int		i, rsv;
+	int		i;
 	struct seg	kseg;
 	caddr_t		vaddr;
-	u_offset_t	off;
 
 	/*
 	 * Verify incoming memory is within supported DR range.
@@ -95,7 +91,7 @@
 	metapgs = btopr(npgs * sizeof (page_t));
 
 	if (!IS_P2ALIGNED((uint64_t)pp, PAGESIZE) &&
-	    page_find(&pp_vn, (u_offset_t)pp)) {
+	    page_find(&mpvp, (u_offset_t)pp)) {
 		/*
 		 * Another memseg has page_t's in the same
 		 * page which 'pp' resides.  This would happen
@@ -120,7 +116,7 @@
 	}
 
 	if (!IS_P2ALIGNED((uint64_t)epp, PAGESIZE) &&
-	    page_find(&pp_vn, (u_offset_t)epp)) {
+	    page_find(&mpvp, (u_offset_t)epp)) {
 		/*
 		 * Another memseg has page_t's in the same
 		 * page which 'epp' resides.  This would happen
@@ -144,59 +140,20 @@
 	vaddr = (caddr_t)pp;
 
 	for (i = 0; i < metapgs; i++)
-		if (page_find(&pp_vn, (u_offset_t)(vaddr + i * PAGESIZE)))
+		if (page_find(&mpvp, (u_offset_t)(vaddr + i * PAGESIZE)))
 			panic("page_find(0x%p, %p)\n",
-			    (void *)&pp_vn, (void *)(vaddr + i * PAGESIZE));
+			    (void *)&mpvp, (void *)(vaddr + i * PAGESIZE));
 
 	/*
 	 * Allocate the metadata pages; these are the pages that will
 	 * contain the page_t's for the incoming memory.
-	 *
-	 * If a normal allocation fails, use the reserved metapgs for
-	 * a small allocation; otherwise retry with PG_WAIT.
 	 */
-	rsv = off = 0;
-	if (metapgs <= rsv_metapgs) {
-		MEMSEG_DEBUG("memseg_get: use rsv 0x%lx metapgs", metapgs);
-		ASSERT(meta_rsv_enable);
-		rsv = 1;
-	} else if ((pgpp = page_create_va(&pp_vn, (u_offset_t)pp, ptob(metapgs),
+	if ((page_create_va(&mpvp, (u_offset_t)pp, ptob(metapgs),
 	    PG_NORELOC | PG_EXCL, &kseg, vaddr)) == NULL) {
-		cmn_err(CE_WARN, "memseg_get: can't get 0x%ld metapgs",
+		MEMSEG_DEBUG("memseg_alloc_meta: can't get 0x%ld metapgs",
 		    metapgs);
 		return (KPHYSM_ERESOURCE);
 	}
-	if (rsv) {
-		/*
-		 * The reseve pages must be hashed out of the reserve vnode
-		 * and rehashed by <pp_vn,vaddr>.  The resreved pages also
-		 * must be replenished immedidately at the end of the add
-		 * processing.
-		 */
-		for (i = 0; i < metapgs; i++) {
-			pgpp = page_find(&rsv_vn, off);
-			ASSERT(pgpp);
-			page_hashout(pgpp, 0);
-			hat_devload(kas.a_hat, vaddr, PAGESIZE,
-			    page_pptonum(pgpp), PROT_READ | PROT_WRITE,
-			    HAT_LOAD | HAT_LOAD_REMAP | HAT_LOAD_NOCONSIST);
-			ASSERT(!page_find(&pp_vn, (u_offset_t)vaddr));
-			if (!page_hashin(pgpp, &pp_vn, (u_offset_t)vaddr, 0))
-				panic("memseg_get: page_hashin(0x%p, 0x%p)",
-				    (void *)pgpp, (void *)vaddr);
-			off += PAGESIZE;
-			vaddr += PAGESIZE;
-			rsv_metapgs--;
-		}
-	} else {
-		for (i = 0; i < metapgs; i++) {
-			hat_devload(kas.a_hat, vaddr, PAGESIZE,
-			    page_pptonum(pgpp), PROT_READ | PROT_WRITE,
-			    HAT_LOAD | HAT_LOAD_REMAP | HAT_LOAD_NOCONSIST);
-			pgpp = pgpp->p_next;
-			vaddr += PAGESIZE;
-		}
-	}
 
 	ASSERT(ptp);
 	ASSERT(metap);
@@ -228,7 +185,7 @@
 	 * Free pages allocated during add.
 	 */
 	for (i = 0; i < metapgs; i++) {
-		pp = page_find(&pp_vn, off);
+		pp = page_find(&mpvp, off);
 		ASSERT(pp);
 		ASSERT(pp->p_szc == 0);
 		page_io_unlock(pp);
@@ -248,7 +205,7 @@
 	ASSERT(off);
 	ASSERT(IS_P2ALIGNED((uint64_t)off, PAGESIZE));
 
-	pp = page_find(&pp_vn, off);
+	pp = page_find(&mpvp, off);
 	ASSERT(pp);
 	ASSERT(pp->p_szc == 0);
 	ASSERT(pp->p_pagenum != PFN_INVALID);
@@ -285,7 +242,7 @@
 	 */
 
 	if (!IS_P2ALIGNED((uint64_t)pp, PAGESIZE) &&
-	    page_find(&pp_vn, (u_offset_t)(pp - 1)) && !page_deleted(pp - 1)) {
+	    page_find(&mpvp, (u_offset_t)(pp - 1)) && !page_deleted(pp - 1)) {
 		/*
 		 * Another memseg has page_t's in the same
 		 * page which 'pp' resides.  This would happen
@@ -312,7 +269,7 @@
 	}
 
 	if (!IS_P2ALIGNED((uint64_t)epp, PAGESIZE) &&
-	    page_find(&pp_vn, (u_offset_t)epp) && !page_deleted(epp)) {
+	    page_find(&mpvp, (u_offset_t)epp) && !page_deleted(epp)) {
 		/*
 		 * Another memseg has page_t's in the same
 		 * page which 'epp' resides.  This would happen
@@ -333,13 +290,13 @@
 
 	off = (u_offset_t)pp;
 
-	MEMSEG_DEBUG("memseg_remap: off=0x%lx metapgs=0x%lx\n", (uint64_t)off,
-	    metapgs);
+	MEMSEG_DEBUG("memseg_remap_meta: off=0x%lx metapgs=0x%lx\n",
+	    (uint64_t)off, metapgs);
 	/*
 	 * Free pages allocated during add.
 	 */
 	for (i = 0; i < metapgs; i++) {
-		pp = page_find(&pp_vn, off);
+		pp = page_find(&mpvp, off);
 		ASSERT(pp);
 		ASSERT(pp->p_szc == 0);
 		page_io_unlock(pp);
@@ -347,64 +304,3 @@
 		off += PAGESIZE;
 	}
 }
-
-static void
-rsv_alloc()
-{
-	int i;
-	page_t *pp;
-	pgcnt_t metapgs;
-	u_offset_t off;
-	struct seg kseg;
-
-	kseg.s_as = &kas;
-
-	/*
-	 * Reserve enough page_t pages for an add request of
-	 * RSV_SIZE bytes.
-	 */
-	metapgs = btopr(btop(RSV_SIZE) * sizeof (page_t)) - rsv_metapgs;
-
-	for (i = off = 0; i < metapgs; i++, off += PAGESIZE) {
-		(void) page_create_va(&rsv_vn, off, PAGESIZE,
-		    PG_NORELOC | PG_WAIT, &kseg, 0);
-		pp = page_find(&rsv_vn, off);
-		ASSERT(pp);
-		ASSERT(PAGE_EXCL(pp));
-		page_iolock_init(pp);
-		rsv_metapgs++;
-	}
-}
-
-void
-i_dr_mem_init(size_t *hint)
-{
-	if (meta_rsv_enable) {
-		rsv_alloc();
-		if (hint)
-			*hint = RSV_SIZE;
-	}
-}
-
-void
-i_dr_mem_fini()
-{
-	int i;
-	page_t *pp;
-	u_offset_t off;
-
-	for (i = off = 0; i < rsv_metapgs; i++, off += PAGESIZE) {
-		if (pp = page_find(&rsv_vn, off)) {
-			ASSERT(PAGE_EXCL(pp));
-			page_destroy(pp, 0);
-		}
-		ASSERT(!page_find(&rsv_vn, off));
-	}
-	rsv_metapgs = 0;
-}
-
-void
-i_dr_mem_update()
-{
-	rsv_alloc();
-}
--- a/usr/src/uts/sun4v/promif/promif_emul.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4v/promif/promif_emul.c	Mon Nov 23 22:42:23 2009 -0800
@@ -36,6 +36,7 @@
 #include <sys/mdesc.h>
 #include <sys/mach_descrip.h>
 #include <sys/cpu_module.h>
+#include <vm/seg_kmem.h>
 
 #ifndef _KMDB
 #include <sys/pte.h>
@@ -50,7 +51,7 @@
 int (*prom_cif_handler)(void *) = NULL;
 
 extern struct memlist *phys_avail;
-extern struct vnode prom_ppages;
+extern struct vnode promvp;
 extern void kdi_tlb_page_unlock(caddr_t, int);
 
 #define	COMBINE(hi, lo) (((uint64_t)(uint32_t)(hi) << 32) | (uint32_t)(lo))
@@ -309,7 +310,7 @@
 				ASSERT(PAGE_EXCL(pp));
 				ASSERT(PP_ISNORELOC(pp));
 				ASSERT(!PP_ISFREE(pp));
-				ASSERT(page_find(&prom_ppages, pfn));
+				ASSERT(page_find(&promvp, pfn));
 				ASSERT(page_get_pagecnt(pp->p_szc) == 1);
 
 				if (pp->p_mapping) {
--- a/usr/src/uts/sun4v/sys/drctl.h	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4v/sys/drctl.h	Mon Nov 23 22:42:23 2009 -0800
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -46,7 +46,8 @@
 	DRCTL_IO_CONFIG_REQUEST,
 	DRCTL_IO_CONFIG_NOTIFY,
 	DRCTL_IO_UNCONFIG_REQUEST,
-	DRCTL_IO_UNCONFIG_NOTIFY
+	DRCTL_IO_UNCONFIG_NOTIFY,
+	DRCTL_DRC_BLOCK
 } drctl_cmds_t;
 
 /*
@@ -135,6 +136,8 @@
 extern int drctl_config_init(int, int,
     drctl_rsrc_t *, int, drctl_resp_t **, size_t *, drctl_cookie_t);
 extern int drctl_config_fini(drctl_cookie_t, drctl_rsrc_t *, int);
+extern void drctl_block(void);
+extern void drctl_unblock(void);
 
 /*
  * Values for the 2nd arg (flags) of drctl_config_init
--- a/usr/src/uts/sun4v/vm/mach_kpm.c	Mon Nov 23 20:41:47 2009 -0800
+++ b/usr/src/uts/sun4v/vm/mach_kpm.c	Mon Nov 23 22:42:23 2009 -0800
@@ -35,11 +35,15 @@
 #include <sys/machsystm.h>
 #include <vm/seg_kpm.h>
 #include <vm/mach_kpm.h>
+#include <vm/faultcode.h>
+
+extern pfn_t memseg_get_start(struct memseg *);
 
 /*
  * Kernel Physical Mapping (kpm) facility
  */
 
+
 void
 mach_kpm_init()
 {
@@ -226,7 +230,15 @@
 	 * if nkpmpgs needs to be used at some point.
 	 */
 
-	base = msp->pages_base;
+	/*
+	 * The meta (page_t) pages for dynamically added memory are allocated
+	 * either from the incoming memory itself or from existing memory.
+	 * In the former case the base of the incoming pages will be different
+	 * than the base of the dynamic segment so call memseg_get_start() to
+	 * get the actual base of the incoming memory for each case.
+	 */
+
+	base = memseg_get_start(msp);
 	end = msp->pages_end;
 
 	hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
@@ -259,7 +271,15 @@
 {
 	pfn_t base, end;
 
-	base = msp->pages_base;
+	/*
+	 * The meta (page_t) pages for dynamically added memory are allocated
+	 * either from the incoming memory itself or from existing memory.
+	 * In the former case the base of the incoming pages will be different
+	 * than the base of the dynamic segment so call memseg_get_start() to
+	 * get the actual base of the incoming memory for each case.
+	 */
+
+	base = memseg_get_start(msp);
 	end = msp->pages_end;
 
 	hat_unload(kas.a_hat, kpm_vbase +  mmu_ptob(base), mmu_ptob(end - base),