6373298 Integrate support for MMU context ID domains
authorhuah
Tue, 20 Jun 2006 07:21:09 -0700
changeset 2241 592fbc504a44
parent 2240 c2ec4fa0aeb8
child 2242 e7f5f015ff71
6373298 Integrate support for MMU context ID domains
usr/src/cmd/perl/contrib/Sun/Solaris/Kstat/Kstat.xs
usr/src/uts/common/os/fork.c
usr/src/uts/common/vm/seg_kmem.h
usr/src/uts/sfmmu/ml/sfmmu_asm.s
usr/src/uts/sfmmu/ml/sfmmu_kdi.s
usr/src/uts/sfmmu/vm/hat_sfmmu.c
usr/src/uts/sfmmu/vm/hat_sfmmu.h
usr/src/uts/sun4/cpu/cpu_module.c
usr/src/uts/sun4/io/trapstat.c
usr/src/uts/sun4/ml/offsets.in
usr/src/uts/sun4/ml/swtch.s
usr/src/uts/sun4/os/mp_startup.c
usr/src/uts/sun4/os/startup.c
usr/src/uts/sun4/vm/sfmmu.c
usr/src/uts/sun4u/cpu/opl_olympus.c
usr/src/uts/sun4u/cpu/opl_olympus_asm.s
usr/src/uts/sun4u/cpu/spitfire.c
usr/src/uts/sun4u/cpu/spitfire_asm.s
usr/src/uts/sun4u/cpu/us3_common.c
usr/src/uts/sun4u/cpu/us3_common_asm.s
usr/src/uts/sun4u/cpu/us3_common_mmu.c
usr/src/uts/sun4u/lw8/os/lw8_platmod.c
usr/src/uts/sun4u/ml/mach_offsets.in
usr/src/uts/sun4u/opl/io/drmach.c
usr/src/uts/sun4u/opl/io/mc-opl.c
usr/src/uts/sun4u/opl/os/opl.c
usr/src/uts/sun4u/os/cpr_impl.c
usr/src/uts/sun4u/os/ppage.c
usr/src/uts/sun4u/serengeti/io/sbdp_cpu.c
usr/src/uts/sun4u/serengeti/os/serengeti.c
usr/src/uts/sun4u/starcat/io/drmach.c
usr/src/uts/sun4u/starcat/ml/drmach_asm.s
usr/src/uts/sun4u/starcat/sys/starcat.h
usr/src/uts/sun4u/starfire/io/drmach.c
usr/src/uts/sun4u/sunfire/io/ac_test.c
usr/src/uts/sun4u/sunfire/io/fhc.c
usr/src/uts/sun4u/sys/cpu_module.h
usr/src/uts/sun4u/sys/machcpuvar.h
usr/src/uts/sun4u/sys/machparam.h
usr/src/uts/sun4u/sys/opl.h
usr/src/uts/sun4u/vm/mach_sfmmu.c
usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
usr/src/uts/sun4v/cpu/common_asm.s
usr/src/uts/sun4v/ml/mach_offsets.in
usr/src/uts/sun4v/os/fillsysinfo.c
usr/src/uts/sun4v/os/ppage.c
usr/src/uts/sun4v/sys/cpu_module.h
usr/src/uts/sun4v/sys/machcpuvar.h
usr/src/uts/sun4v/sys/machparam.h
usr/src/uts/sun4v/vm/mach_sfmmu_asm.s
--- a/usr/src/cmd/perl/contrib/Sun/Solaris/Kstat/Kstat.xs	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/cmd/perl/contrib/Sun/Solaris/Kstat/Kstat.xs	Tue Jun 20 07:21:09 2006 -0700
@@ -486,9 +486,6 @@
 	SAVE_INT32(self, sfmmugp, sf_khash_searches);
 	SAVE_INT32(self, sfmmugp, sf_khash_links);
 	SAVE_INT32(self, sfmmugp, sf_swapout);
-	SAVE_INT32(self, sfmmugp, sf_ctxfree);
-	SAVE_INT32(self, sfmmugp, sf_ctxdirty);
-	SAVE_INT32(self, sfmmugp, sf_ctxsteal);
 	SAVE_INT32(self, sfmmugp, sf_tsb_alloc);
 	SAVE_INT32(self, sfmmugp, sf_tsb_allocfail);
 	SAVE_INT32(self, sfmmugp, sf_tsb_sectsb_create);
@@ -531,10 +528,7 @@
 	SAVE_INT32(self, sfmmugp, sf_tsb_resize_failures);
 	SAVE_INT32(self, sfmmugp, sf_tsb_reloc);
 	SAVE_INT32(self, sfmmugp, sf_user_vtop);
-	SAVE_INT32(self, sfmmugp, sf_ctx_swap);
-	SAVE_INT32(self, sfmmugp, sf_tlbflush_all);
-	SAVE_INT32(self, sfmmugp, sf_tlbflush_ctx);
-	SAVE_INT32(self, sfmmugp, sf_tlbflush_deferred);
+	SAVE_INT32(self, sfmmugp, sf_ctx_inv);
 	SAVE_INT32(self, sfmmugp, sf_tlb_reprog_pgsz);
 }
 #endif
--- a/usr/src/uts/common/os/fork.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/common/os/fork.c	Tue Jun 20 07:21:09 2006 -0700
@@ -242,11 +242,12 @@
 			while (*orphpp != cp)
 				orphpp = &(*orphpp)->p_nextorph;
 			*orphpp = cp->p_nextorph;
-			ASSERT(p->p_child == cp);
-			p->p_child = cp->p_sibling;
-			if (p->p_child) {
-				p->p_child->p_psibling = NULL;
-			}
+			if (p->p_child == cp)
+				p->p_child = cp->p_sibling;
+			if (cp->p_sibling)
+				cp->p_sibling->p_psibling = cp->p_psibling;
+			if (cp->p_psibling)
+				cp->p_psibling->p_sibling = cp->p_sibling;
 			mutex_enter(&cp->p_lock);
 			tk = cp->p_task;
 			task_detach(cp);
@@ -594,11 +595,12 @@
 	while (*orphpp != cp)
 		orphpp = &(*orphpp)->p_nextorph;
 	*orphpp = cp->p_nextorph;
-	ASSERT(p->p_child == cp);
-	p->p_child = cp->p_sibling;
-	if (p->p_child) {
-		p->p_child->p_psibling = NULL;
-	}
+	if (p->p_child == cp)
+		p->p_child = cp->p_sibling;
+	if (cp->p_sibling)
+		cp->p_sibling->p_psibling = cp->p_psibling;
+	if (cp->p_psibling)
+		cp->p_psibling->p_sibling = cp->p_sibling;
 	pid_exit(cp);
 	mutex_exit(&pidlock);
 
--- a/usr/src/uts/common/vm/seg_kmem.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/common/vm/seg_kmem.h	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -58,7 +57,6 @@
 extern struct seg kvseg32;	/* 32-bit kernel heap segment */
 extern vmem_t *heap32_arena;	/* 32-bit kernel heap arena */
 extern vmem_t *heaptext_arena;	/* kernel text arena, from heap */
-extern struct ctx *kctx;	/* kernel context */
 extern struct as kas;		/* kernel address space */
 extern struct vnode kvp;	/* vnode for all segkmem pages */
 extern int segkmem_reloc;	/* enable/disable segkmem relocatable pages */
--- a/usr/src/uts/sfmmu/ml/sfmmu_asm.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sfmmu/ml/sfmmu_asm.s	Tue Jun 20 07:21:09 2006 -0700
@@ -482,6 +482,19 @@
 /*
  * sfmmu related subroutines
  */
+uint_t
+sfmmu_disable_intrs()
+{ return(0); }
+
+/* ARGSUSED */
+void
+sfmmu_enable_intrs(uint_t pstate_save)
+{}
+
+/* ARGSUSED */
+void
+sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp)
+{}
 
 /*
  * Use cas, if tte has changed underneath us then reread and try again.
@@ -534,6 +547,280 @@
 sfmmu_panic5:
 	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
 
+	.global	sfmmu_panic6
+sfmmu_panic6:
+	.asciz	"sfmmu_asm: interrupts not disabled"
+
+	.global	sfmmu_panic7
+sfmmu_panic7:
+	.asciz	"sfmmu_asm: kernel as"
+
+	.global	sfmmu_panic8
+sfmmu_panic8:
+	.asciz	"sfmmu_asm: gnum is zero"
+
+	.global	sfmmu_panic9
+sfmmu_panic9:
+	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
+
+        ENTRY(sfmmu_disable_intrs)
+        rdpr    %pstate, %o0
+#ifdef DEBUG
+	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
+#endif /* DEBUG */
+        retl
+          wrpr   %o0, PSTATE_IE, %pstate
+        SET_SIZE(sfmmu_disable_intrs)
+	
+	ENTRY(sfmmu_enable_intrs)
+        retl
+          wrpr    %g0, %o0, %pstate
+        SET_SIZE(sfmmu_enable_intrs)
+
+/*
+ * This routine is called both by resume() and sfmmu_get_ctx() to
+ * allocate a new context for the process on a MMU.
+ * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
+ * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
+ * is the case when sfmmu_alloc_ctx is called from resume().
+ *
+ * The caller must disable interrupts before entering this routine.
+ * To reduce ctx switch overhead, the code contains both 'fast path' and
+ * 'slow path' code. The fast path code covers the common case where only
+ * a quick check is needed and the real ctx allocation is not required.
+ * It can be done without holding the per-process (PP) lock.
+ * The 'slow path' code must be protected by the PP Lock and performs ctx
+ * allocation.
+ * Hardware context register and HAT mmu cnum are updated accordingly.
+ *
+ * %o0 - sfmmup
+ * %o1 - allocflag
+ * %o2 - CPU
+ */
+        ENTRY_NP(sfmmu_alloc_ctx)
+
+#ifdef DEBUG
+	sethi   %hi(ksfmmup), %o3
+	ldx     [%o3 + %lo(ksfmmup)], %o3
+	cmp     %o3, %o0
+	bne,pt   %xcc, 0f
+	  nop
+
+	sethi   %hi(panicstr), %g1		! if kernel as, panic
+        ldx     [%g1 + %lo(panicstr)], %g1
+        tst     %g1
+        bnz,pn  %icc, 7f
+          nop
+
+	sethi	%hi(sfmmu_panic7), %o0
+	call	panic
+	  or	%o0, %lo(sfmmu_panic7), %o0
+
+7:
+	retl
+	  nop
+
+0:
+	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
+#endif /* DEBUG */	
+
+	! load global mmu_ctxp info
+	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
+        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
+
+	! load global mmu_ctxp gnum
+	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
+
+#ifdef DEBUG
+	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
+	bne,pt	%xcc, 3f
+	  nop
+	
+	sethi   %hi(panicstr), %g1	! test if panicstr is already set
+        ldx     [%g1 + %lo(panicstr)], %g1
+        tst     %g1
+        bnz,pn  %icc, 3f
+          nop
+	
+	sethi	%hi(sfmmu_panic8), %o0
+	call	panic
+	  or	%o0, %lo(sfmmu_panic8), %o0
+3:
+#endif
+
+	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
+
+	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
+	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
+
+	/*
+	 * %g5 = sfmmu gnum returned
+	 * %g6 = sfmmu cnum returned
+	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
+	 * %g4 = scratch
+	 *
+	 * Fast path code, do a quick check.
+	 */
+	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
+	
+	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
+	bne,pt	%icc, 1f			! valid hat cnum, check gnum
+	  nop
+
+	! cnum == INVALID, check allocflag
+	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
+	  mov	%g6, %o1
+
+	! (invalid HAT cnum) && (allocflag == 1)
+	ba,pt	%icc, 2f
+	  nop
+1:
+	! valid HAT cnum, check gnum
+	cmp	%g5, %o4
+	be,a,pt	%icc, 8f			! gnum unchanged, go to done
+	  mov	%g6, %o1
+
+2:
+	/* 
+	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
+	 * followed by the 'slow path' code.
+	 */
+	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
+3:
+	brz	%g3, 5f
+	  nop
+4:
+	brnz,a,pt       %g3, 4b				! spin if lock is 1
+	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
+	ba	%xcc, 3b				! retry the lock
+	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
+
+5:
+	membar  #LoadLoad
+	/*
+	 * %g5 = sfmmu gnum returned
+	 * %g6 = sfmmu cnum returned
+	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
+	 * %g4 = scratch
+	 */
+	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
+
+	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
+	bne,pt	%icc, 1f			! valid hat cnum, check gnum
+	  nop
+
+	! cnum == INVALID, check allocflag
+	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
+	  mov	%g6, %o1
+
+	! (invalid HAT cnum) && (allocflag == 1)
+	ba,pt	%icc, 6f
+	  nop
+1:
+	! valid HAT cnum, check gnum
+	cmp	%g5, %o4
+	be,a,pt	%icc, 2f			! gnum unchanged, go to done
+	  mov	%g6, %o1
+
+	ba,pt	%icc, 6f
+	  nop
+2:
+	membar  #LoadStore|#StoreStore
+	ba,pt %icc, 8f
+	  clrb  [%o0 + SFMMU_CTX_LOCK]
+6:
+	/*
+	 * We get here if we do not have a valid context, or
+	 * the HAT gnum does not match global gnum. We hold
+	 * sfmmu_ctx_lock spinlock. Allocate that context.
+	 *
+	 * %o3 = mmu_ctxp
+	 */
+	add	%o3, MMU_CTX_CNUM, %g3
+	ld	[%o3 + MMU_CTX_NCTXS], %g4
+
+	/*
+         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
+         * %g3 = mmu cnum address
+	 * %g4 = mmu nctxs
+	 *
+	 * %o0 = sfmmup
+	 * %o1 = mmu current cnum value (used as new cnum)
+	 * %o4 = mmu gnum
+	 *
+	 * %o5 = scratch
+	 */
+	ld	[%g3], %o1
+0:
+	cmp	%o1, %g4
+	bl,a,pt %icc, 1f
+	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
+
+	/*
+	 * cnum reachs max, update HAT with INVALID
+	 */
+	set	INVALID_CONTEXT, %o1
+
+	/* 
+	 * update hat cnum to INVALID, sun4v sfmmu_load_mmustate checks
+	 * hat cnum to determine if set the number of TSBs to 0.
+	 */
+	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
+	or	%o4, %o1, %o4
+	stx	%o4, [%g2 + SFMMU_CTXS]
+
+	membar  #LoadStore|#StoreStore
+	ba,pt	%icc, 8f
+	  clrb	[%o0 + SFMMU_CTX_LOCK]
+1:
+	! %g3 = addr of mmu_ctxp->cnum
+	! %o5 = mmu_ctxp->cnum + 1
+	cas	[%g3], %o1, %o5
+	cmp	%o1, %o5
+	bne,a,pn %xcc, 0b	! cas failed
+	  ld	[%g3], %o1
+
+#ifdef DEBUG
+        set	MAX_SFMMU_CTX_VAL, %o5
+	cmp	%o1, %o5
+	ble,pt %icc, 2f
+	  nop
+	
+	sethi	%hi(sfmmu_panic9), %o0
+	call	panic
+	  or	%o0, %lo(sfmmu_panic9), %o0
+2:	
+#endif
+	! update hat gnum and cnum
+	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
+	or	%o4, %o1, %o4
+	stx	%o4, [%g2 + SFMMU_CTXS]
+
+	membar  #LoadStore|#StoreStore
+	clrb	[%o0 + SFMMU_CTX_LOCK]
+
+8:
+	/*
+	 * program the secondary context register
+	 *
+	 * %o1 = cnum
+	 */
+#ifdef	sun4u
+	ldub	[%o0 + SFMMU_CEXT], %o2
+	sll	%o2, CTXREG_EXT_SHIFT, %o2
+	or	%o1, %o2, %o1
+#endif
+
+	mov	MMU_SCONTEXT, %o4
+	sethi	%hi(FLUSH_ADDR), %o5
+	stxa	%o1, [%o4]ASI_MMU_CTX	         ! set 2nd context reg.
+	flush	%o5
+
+	retl
+	nop
+
+	SET_SIZE(sfmmu_alloc_ctx)
+
 
 	ENTRY_NP(sfmmu_modifytte)
 	ldx	[%o2], %g3			/* current */
@@ -1062,21 +1349,7 @@
 	 */
 	rdpr	%pstate, %o5
 #ifdef DEBUG
-	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,pt 	%icc, 1f			/* disabled, panic	 */
-	  nop
-
-	sethi	%hi(panicstr), %g1
-	ldx	[%g1 + %lo(panicstr)], %g1
-	tst	%g1
-	bnz,pt	%icc, 1f
-	  nop
-
-	save	%sp, -SA(MINFRAME), %sp
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	 or	%o0, %lo(sfmmu_panic1), %o0
-1:
+	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
 #endif /* DEBUG */
 
 	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
@@ -1117,21 +1390,7 @@
 	 */
 	rdpr	%pstate, %o5			! %o5 = saved pstate
 #ifdef DEBUG
-	andcc	%o5, PSTATE_IE, %g0		! if interrupts already
-	bnz,pt	%icc, 1f			! disabled, panic
-	  nop
-
-	sethi	%hi(panicstr), %g1
-	ldx	[%g1 + %lo(panicstr)], %g1
-	tst	%g1
-	bnz,pt	%icc, 1f
-	  nop
-
-	save	%sp, -SA(MINFRAME), %sp
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	  or	%o0, %lo(sfmmu_panic1), %o0
-1:
+	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
 #endif /* DEBUG */
 	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
 
@@ -1367,20 +1626,7 @@
 
 	rdpr	%pstate, %o5
 #ifdef DEBUG
-	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,pt 	%icc, 3f			/* disabled, panic	 */
-	  nop
-
-	sethi	%hi(panicstr), %g1
-	ldx	[%g1 + %lo(panicstr)], %g1
-	tst	%g1
-	bnz,pt	%icc, 3f
-	  nop
-
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	 or	%o0, %lo(sfmmu_panic1), %o0
-3:
+	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l4, %g1)
 #endif /* DEBUG */
 	/*
 	 * disable interrupts, clear Address Mask to access 64 bit physaddr
@@ -1846,11 +2092,6 @@
 
 #else /* lint */
 
-
-#if (CTX_SIZE != (1 << CTX_SZ_SHIFT))
-#error - size of context struct does not match with CTX_SZ_SHIFT
-#endif
-
 #if (IMAP_SEG != 0)
 #error - ism_map->ism_seg offset is not zero
 #endif
@@ -3317,21 +3558,7 @@
  	 */
  	rdpr	%pstate, %o3
 #ifdef DEBUG
-	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,pt	%icc, 1f			/* disabled, panic	 */
-	  nop
-
-	sethi	%hi(panicstr), %g1
-	ldx	[%g1 + %lo(panicstr)], %g1
-	tst	%g1
-	bnz,pt	%icc, 1f
-	  nop
-
-	save	%sp, -SA(MINFRAME), %sp
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	 or	%o0, %lo(sfmmu_panic1), %o0
-1:
+	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
 #endif
 	/*
 	 * disable interrupts to protect the TSBMISS area
--- a/usr/src/uts/sfmmu/ml/sfmmu_kdi.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sfmmu/ml/sfmmu_kdi.s	Tue Jun 20 07:21:09 2006 -0700
@@ -225,7 +225,7 @@
  * uint64_t
  * kdi_vatotte(uintptr_t va, int cnum)
  * {
- *	sfmmu_t *sfmmup = ctxs[cnum].ctx_sfmmu;
+ *	sfmmu_t *sfmmup = ksfmmup;
  *	uint64_t hmebpa, hmetag, hmeblkpa;
  *	int i;
  *
@@ -265,7 +265,13 @@
 #else
 
 	/*
-	 * Invocation in normal context as a VA-to-TTE translator.
+	 * Invocation in normal context as a VA-to-TTE translator
+	 * for kernel context only. This routine returns 0 on
+	 * success and -1 on error.
+	 *
+	 * %o0 = VA, input register
+	 * %o1 = KCONTEXT
+	 * %o2 = ttep, output register
 	 */
 	ENTRY_NP(kdi_vatotte)
 	mov	%o0, %g1		/* VA in %g1 */
@@ -288,21 +294,21 @@
 	mov	-1, %o0
 	SET_SIZE(kdi_vatotte)
 
+	/*
+	 * %g1 = vaddr passed in, tte or 0 (error) when return
+	 * %g2 = KCONTEXT
+	 * %g7 = return address
+	 */
 	ENTRY_NP(kdi_trap_vatotte)
-	set	nctxs, %g3
-	ld	[%g3], %g3
-	cmp	%g2, %g3
-	bge,a	%xcc, 6f
-	clr	%g1
 
-	set	ctxs, %g3
-	ldx	[%g3], %g3
-	mulx	%g2, CTX_SIZE, %g2
-	add	%g2, %g3, %g2
-	add	%g2, CTX_SFMMUP, %g2
-	ldx	[%g2], %g2		/* VA %g1, sfmmup %g2 */
+	cmp	%g2, KCONTEXT		/* make sure called in kernel ctx */
+	bne,a,pn %icc, 6f
+	  clr	%g1
 
-	mov	1, %g3			/* VA %g1, sfmmup %g2, idx %g3 */
+	sethi   %hi(ksfmmup), %g2
+        ldx     [%g2 + %lo(ksfmmup)], %g2
+
+	mov	1, %g3			/* VA %g1, ksfmmup %g2, idx %g3 */
 	mov	HBLK_RANGE_SHIFT, %g4
 	ba	3f
 	nop
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c	Tue Jun 20 07:21:09 2006 -0700
@@ -38,6 +38,7 @@
  */
 
 #include <sys/types.h>
+#include <sys/kstat.h>
 #include <vm/hat.h>
 #include <vm/hat_sfmmu.h>
 #include <vm/page.h>
@@ -149,13 +150,7 @@
  * Private sfmmu data structures for hat management
  */
 static struct kmem_cache *sfmmuid_cache;
-
-/*
- * Private sfmmu data structures for ctx management
- */
-static struct ctx	*ctxhand;	/* hand used while stealing ctxs */
-static struct ctx	*ctxfree;	/* head of free ctx list */
-static struct ctx	*ctxdirty;	/* head of dirty ctx list */
+static struct kmem_cache *mmuctxdom_cache;
 
 /*
  * Private sfmmu data structures for tsb management
@@ -173,7 +168,6 @@
 static struct kmem_cache *sfmmu1_cache;
 static struct kmem_cache *pa_hment_cache;
 
-static kmutex_t 	ctx_list_lock;	/* mutex for ctx free/dirty lists */
 static kmutex_t 	ism_mlist_lock;	/* mutex for ism mapping list */
 /*
  * private data for ism
@@ -315,8 +309,7 @@
 static int	tst_tnc(page_t *pp, pgcnt_t);
 static void	conv_tnc(page_t *pp, int);
 
-static struct ctx *sfmmu_get_ctx(sfmmu_t *);
-static void	sfmmu_free_ctx(sfmmu_t *, struct ctx *);
+static void	sfmmu_get_ctx(sfmmu_t *);
 static void	sfmmu_free_sfmmu(sfmmu_t *);
 
 static void	sfmmu_gettte(struct hat *, caddr_t, tte_t *);
@@ -335,9 +328,7 @@
 			pfn_t, int);
 static void	sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
 static void	sfmmu_tlb_range_demap(demap_range_t *);
-static void	sfmmu_tlb_ctx_demap(sfmmu_t *);
-static void	sfmmu_tlb_all_demap(void);
-static void	sfmmu_tlb_swap_ctx(sfmmu_t *, struct ctx *);
+static void	sfmmu_invalidate_ctx(sfmmu_t *);
 static void	sfmmu_sync_mmustate(sfmmu_t *);
 
 static void 	sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
@@ -378,11 +369,6 @@
 static void	sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
 			struct hmehash_bucket *);
 static void	sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
-
-static void	sfmmu_reuse_ctx(struct ctx *, sfmmu_t *);
-static void	sfmmu_disallow_ctx_steal(sfmmu_t *);
-static void	sfmmu_allow_ctx_steal(sfmmu_t *);
-
 static void	sfmmu_rm_large_mappings(page_t *, int);
 
 static void	hat_lock_init(void);
@@ -410,12 +396,14 @@
 static void	sfmmu_kpm_vac_unload(page_t *, caddr_t);
 static void	sfmmu_kpm_demap_large(caddr_t);
 static void	sfmmu_kpm_demap_small(caddr_t);
-static void	sfmmu_kpm_demap_tlbs(caddr_t, int);
+static void	sfmmu_kpm_demap_tlbs(caddr_t);
 static void	sfmmu_kpm_hme_unload(page_t *);
 static kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t);
 static void	sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp);
 static void	sfmmu_kpm_page_cache(page_t *, int, int);
 
+static void	sfmmu_ctx_wrap_around(mmu_ctx_t *);
+
 /* kpm globals */
 #ifdef	DEBUG
 /*
@@ -447,8 +435,13 @@
 uint64_t	khme_hash_pa;		/* PA of khme_hash */
 int 		uhmehash_num;		/* # of buckets in user hash table */
 int 		khmehash_num;		/* # of buckets in kernel hash table */
-struct ctx	*ctxs;			/* used by <machine/mmu.c> */
-uint_t		nctxs;			/* total number of contexts */
+
+uint_t		max_mmu_ctxdoms = 0;	/* max context domains in the system */
+mmu_ctx_t	**mmu_ctxs_tbl;		/* global array of context domains */
+uint64_t	mmu_saved_gnum = 0;	/* to init incoming MMUs' gnums */
+
+#define	DEFAULT_NUM_CTXS_PER_MMU 8192
+static uint_t	nctxs = DEFAULT_NUM_CTXS_PER_MMU;
 
 int		cache;			/* describes system cache */
 
@@ -567,7 +560,6 @@
  * Global data
  */
 sfmmu_t 	*ksfmmup;		/* kernel's hat id */
-struct ctx 	*kctx;			/* kernel's context */
 
 #ifdef DEBUG
 static void	chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
@@ -682,26 +674,15 @@
  */
 #define	MAX_CB_ADDR	32
 
-#ifdef DEBUG
-
-/*
- * Debugging trace ring buffer for stolen and freed ctxs.  The
- * stolen_ctxs[] array is protected by the ctx_trace_mutex.
- */
-struct ctx_trace stolen_ctxs[TRSIZE];
-struct ctx_trace *ctx_trace_first = &stolen_ctxs[0];
-struct ctx_trace *ctx_trace_last = &stolen_ctxs[TRSIZE-1];
-struct ctx_trace *ctx_trace_ptr = &stolen_ctxs[0];
-kmutex_t ctx_trace_mutex;
-uint_t	num_ctx_stolen = 0;
-
-int	ism_debug = 0;
-
-#endif /* DEBUG */
-
 tte_t	hw_tte;
 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
 
+static char	*mmu_ctx_kstat_names[] = {
+	"mmu_ctx_tsb_exceptions",
+	"mmu_ctx_tsb_raise_exception",
+	"mmu_ctx_wrap_around",
+};
+
 /*
  * kpm virtual address to physical address
  */
@@ -1003,9 +984,8 @@
 void
 hat_init(void)
 {
-	struct ctx	*ctx;
-	struct ctx	*cur_ctx = NULL;
 	int 		i;
+	size_t		size;
 
 	hat_lock_init();
 	hat_kstat_init();
@@ -1030,31 +1010,64 @@
 	uhmehash_num--;		/* make sure counter starts from 0 */
 
 	/*
-	 * Initialize ctx structures and list lock.
-	 * We keep two lists of ctxs. The "free" list contains contexts
-	 * ready to use.  The "dirty" list contains contexts that are OK
-	 * to use after flushing the TLBs of any stale mappings.
-	 */
-	mutex_init(&ctx_list_lock, NULL, MUTEX_DEFAULT, NULL);
-	kctx = &ctxs[KCONTEXT];
-	ctx = &ctxs[NUM_LOCKED_CTXS];
-	ctxhand = ctxfree = ctx;		/* head of free list */
-	ctxdirty = NULL;
-	for (i = NUM_LOCKED_CTXS; i < nctxs; i++) {
-		cur_ctx = &ctxs[i];
-		cur_ctx->ctx_flags = CTX_FREE_FLAG;
-		cur_ctx->ctx_free = &ctxs[i + 1];
-	}
-	cur_ctx->ctx_free = NULL;		/* tail of free list */
+	 * Allocate context domain structures.
+	 *
+	 * A platform may choose to modify max_mmu_ctxdoms in
+	 * set_platform_defaults(). If a platform does not define
+	 * a set_platform_defaults() or does not choose to modify
+	 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
+	 *
+	 * For sun4v, there will be one global context domain, this is to
+	 * avoid the ldom cpu substitution problem.
+	 *
+	 * For all platforms that have CPUs sharing MMUs, this
+	 * value must be defined.
+	 */
+	if (max_mmu_ctxdoms == 0) {
+#ifndef sun4v
+		max_mmu_ctxdoms = max_ncpus;
+#else /* sun4v */
+		max_mmu_ctxdoms = 1;
+#endif /* sun4v */
+	}
+
+	size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
+	mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
+
+	/* mmu_ctx_t is 64 bytes aligned */
+	mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
+	    sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
+	/*
+	 * MMU context domain initialization for the Boot CPU.
+	 * This needs the context domains array allocated above.
+	 */
+	mutex_enter(&cpu_lock);
+	sfmmu_cpu_init(CPU);
+	mutex_exit(&cpu_lock);
 
 	/*
 	 * Intialize ism mapping list lock.
 	 */
+
 	mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
 
-	sfmmuid_cache = kmem_cache_create("sfmmuid_cache", sizeof (sfmmu_t),
-	    0, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
-	    NULL, NULL, NULL, 0);
+	/*
+	 * Each sfmmu structure carries an array of MMU context info
+	 * structures, one per context domain. The size of this array depends
+	 * on the maximum number of context domains. So, the size of the
+	 * sfmmu structure varies per platform.
+	 *
+	 * sfmmu is allocated from static arena, because trap
+	 * handler at TL > 0 is not allowed to touch kernel relocatable
+	 * memory. sfmmu's alignment is changed to 64 bytes from
+	 * default 8 bytes, as the lower 6 bits will be used to pass
+	 * pgcnt to vtag_flush_pgcnt_tl1.
+	 */
+	size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
+
+	sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
+	    64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
+	    NULL, NULL, static_arena, 0);
 
 	sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
 	    sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
@@ -1233,7 +1246,6 @@
 hat_lock_init()
 {
 	int i;
-	struct ctx *ctx;
 
 	/*
 	 * initialize the array of mutexes protecting a page's mapping
@@ -1256,14 +1268,6 @@
 	for (i = 0; i < SFMMU_NUM_LOCK; i++)
 		mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
 		    NULL);
-
-#ifdef	DEBUG
-	mutex_init(&ctx_trace_mutex, NULL, MUTEX_DEFAULT, NULL);
-#endif	/* DEBUG */
-
-	for (ctx = ctxs, i = 0; i < nctxs; i++, ctx++) {
-		rw_init(&ctx->ctx_rwlock, NULL, RW_DEFAULT, NULL);
-	}
 }
 
 extern caddr_t kmem64_base, kmem64_end;
@@ -1279,23 +1283,21 @@
 hat_alloc(struct as *as)
 {
 	sfmmu_t *sfmmup;
-	struct ctx *ctx;
 	int i;
+	uint64_t cnum;
 	extern uint_t get_color_start(struct as *);
 
 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
 	sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
 	sfmmup->sfmmu_as = as;
 	sfmmup->sfmmu_flags = 0;
+	LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
 
 	if (as == &kas) {
-		ctx = kctx;
 		ksfmmup = sfmmup;
-		sfmmup->sfmmu_cnum = ctxtoctxnum(ctx);
-		ASSERT(sfmmup->sfmmu_cnum == KCONTEXT);
 		sfmmup->sfmmu_cext = 0;
-		ctx->ctx_sfmmu = sfmmup;
-		ctx->ctx_flags = 0;
+		cnum = KCONTEXT;
+
 		sfmmup->sfmmu_clrstart = 0;
 		sfmmup->sfmmu_tsb = NULL;
 		/*
@@ -1311,8 +1313,9 @@
 		 * we fault when we try to run and so have to get
 		 * another ctx.
 		 */
-		sfmmup->sfmmu_cnum = INVALID_CONTEXT;
 		sfmmup->sfmmu_cext = 0;
+		cnum = INVALID_CONTEXT;
+
 		/* initialize original physical page coloring bin */
 		sfmmup->sfmmu_clrstart = get_color_start(as);
 #ifdef DEBUG
@@ -1331,6 +1334,13 @@
 		sfmmup->sfmmu_flags = HAT_SWAPPED;
 		ASSERT(sfmmup->sfmmu_tsb != NULL);
 	}
+
+	ASSERT(max_mmu_ctxdoms > 0);
+	for (i = 0; i < max_mmu_ctxdoms; i++) {
+		sfmmup->sfmmu_ctxs[i].cnum = cnum;
+		sfmmup->sfmmu_ctxs[i].gnum = 0;
+	}
+
 	sfmmu_setup_tsbinfo(sfmmup);
 	for (i = 0; i < max_mmu_page_sizes; i++) {
 		sfmmup->sfmmu_ttecnt[i] = 0;
@@ -1355,6 +1365,164 @@
 }
 
 /*
+ * Create per-MMU context domain kstats for a given MMU ctx.
+ */
+static void
+sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
+{
+	mmu_ctx_stat_t	stat;
+	kstat_t		*mmu_kstat;
+
+	ASSERT(MUTEX_HELD(&cpu_lock));
+	ASSERT(mmu_ctxp->mmu_kstat == NULL);
+
+	mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
+	    "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
+
+	if (mmu_kstat == NULL) {
+		cmn_err(CE_WARN, "kstat_create for MMU %d failed",
+		    mmu_ctxp->mmu_idx);
+	} else {
+		mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
+		for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
+			kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
+			    mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
+		mmu_ctxp->mmu_kstat = mmu_kstat;
+		kstat_install(mmu_kstat);
+	}
+}
+
+/*
+ * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
+ * context domain information for a given CPU. If a platform does not
+ * specify that interface, then the function below is used instead to return
+ * default information. The defaults are as follows:
+ *
+ *	- For sun4u systems there's one MMU context domain per CPU.
+ *	  This default is used by all sun4u systems except OPL. OPL systems
+ *	  provide platform specific interface to map CPU ids to MMU ids
+ *	  because on OPL more than 1 CPU shares a single MMU.
+ *        Note that on sun4v, there is one global context domain for
+ *	  the entire system. This is to avoid running into potential problem
+ *	  with ldom physical cpu substitution feature.
+ *	- The number of MMU context IDs supported on any CPU in the
+ *	  system is 8K.
+ */
+/*ARGSUSED*/
+static void
+sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
+{
+	infop->mmu_nctxs = nctxs;
+#ifndef sun4v
+	infop->mmu_idx = cpu[cpuid]->cpu_seqid;
+#else /* sun4v */
+	infop->mmu_idx = 0;
+#endif /* sun4v */
+}
+
+/*
+ * Called during CPU initialization to set the MMU context-related information
+ * for a CPU.
+ *
+ * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
+ */
+void
+sfmmu_cpu_init(cpu_t *cp)
+{
+	mmu_ctx_info_t	info;
+	mmu_ctx_t	*mmu_ctxp;
+
+	ASSERT(MUTEX_HELD(&cpu_lock));
+
+	if (&plat_cpuid_to_mmu_ctx_info == NULL)
+		sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
+	else
+		plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
+
+	ASSERT(info.mmu_idx < max_mmu_ctxdoms);
+
+	if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
+		/* Each mmu_ctx is cacheline aligned. */
+		mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
+		bzero(mmu_ctxp, sizeof (mmu_ctx_t));
+
+		mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
+		    (void *)ipltospl(DISP_LEVEL));
+		mmu_ctxp->mmu_idx = info.mmu_idx;
+		mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
+		/*
+		 * Globally for lifetime of a system,
+		 * gnum must always increase.
+		 * mmu_saved_gnum is protected by the cpu_lock.
+		 */
+		mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
+		mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
+
+		sfmmu_mmu_kstat_create(mmu_ctxp);
+
+		mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
+	} else {
+		ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
+	}
+
+	/*
+	 * The mmu_lock is acquired here to prevent races with
+	 * the wrap-around code.
+	 */
+	mutex_enter(&mmu_ctxp->mmu_lock);
+
+
+	mmu_ctxp->mmu_ncpus++;
+	CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
+	CPU_MMU_IDX(cp) = info.mmu_idx;
+	CPU_MMU_CTXP(cp) = mmu_ctxp;
+
+	mutex_exit(&mmu_ctxp->mmu_lock);
+}
+
+/*
+ * Called to perform MMU context-related cleanup for a CPU.
+ */
+void
+sfmmu_cpu_cleanup(cpu_t *cp)
+{
+	mmu_ctx_t	*mmu_ctxp;
+
+	ASSERT(MUTEX_HELD(&cpu_lock));
+
+	mmu_ctxp = CPU_MMU_CTXP(cp);
+	ASSERT(mmu_ctxp != NULL);
+
+	/*
+	 * The mmu_lock is acquired here to prevent races with
+	 * the wrap-around code.
+	 */
+	mutex_enter(&mmu_ctxp->mmu_lock);
+
+	CPU_MMU_CTXP(cp) = NULL;
+
+	CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
+	if (--mmu_ctxp->mmu_ncpus == 0) {
+		mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
+		mutex_exit(&mmu_ctxp->mmu_lock);
+		mutex_destroy(&mmu_ctxp->mmu_lock);
+
+		if (mmu_ctxp->mmu_kstat)
+			kstat_delete(mmu_ctxp->mmu_kstat);
+
+		/* mmu_saved_gnum is protected by the cpu_lock. */
+		if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
+			mmu_saved_gnum = mmu_ctxp->mmu_gnum;
+
+		kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
+
+		return;
+	}
+
+	mutex_exit(&mmu_ctxp->mmu_lock);
+}
+
+/*
  * Hat_setup, makes an address space context the current active one.
  * In sfmmu this translates to setting the secondary context with the
  * corresponding context.
@@ -1362,8 +1530,6 @@
 void
 hat_setup(struct hat *sfmmup, int allocflag)
 {
-	struct ctx *ctx;
-	uint_t ctx_num;
 	hatlock_t *hatlockp;
 
 	/* Init needs some special treatment. */
@@ -1383,24 +1549,8 @@
 		 */
 		sfmmu_tsb_swapin(sfmmup, hatlockp);
 
-		sfmmu_disallow_ctx_steal(sfmmup);
-
-		kpreempt_disable();
-
-		ctx = sfmmutoctx(sfmmup);
-		CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
-		ctx_num = ctxtoctxnum(ctx);
-		ASSERT(sfmmup == ctx->ctx_sfmmu);
-		ASSERT(ctx_num >= NUM_LOCKED_CTXS);
-		sfmmu_setctx_sec(ctx_num);
-		sfmmu_load_mmustate(sfmmup);
-
-		kpreempt_enable();
-
-		/*
-		 * Allow ctx to be stolen.
-		 */
-		sfmmu_allow_ctx_steal(sfmmup);
+		sfmmu_get_ctx(sfmmup);
+
 		sfmmu_hat_exit(hatlockp);
 	} else {
 		ASSERT(allocflag == HAT_ALLOC);
@@ -1409,6 +1559,12 @@
 		kpreempt_disable();
 
 		CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
+
+		/*
+		 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
+		 * pagesize bits don't matter in this case since we are passing
+		 * INVALID_CONTEXT to it.
+		 */
 		sfmmu_setctx_sec(INVALID_CONTEXT);
 		sfmmu_clear_utsbinfo();
 
@@ -1455,13 +1611,7 @@
 	if (sfmmup->sfmmu_rmstat) {
 		hat_freestat(sfmmup->sfmmu_as, NULL);
 	}
-	if (!delay_tlb_flush) {
-		sfmmu_tlb_ctx_demap(sfmmup);
-		xt_sync(sfmmup->sfmmu_cpusran);
-	} else {
-		SFMMU_STAT(sf_tlbflush_deferred);
-	}
-	sfmmu_free_ctx(sfmmup, sfmmutoctx(sfmmup));
+
 	while (sfmmup->sfmmu_tsb != NULL) {
 		struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
 		sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
@@ -1495,8 +1645,6 @@
 	struct hme_blk *hmeblkp;
 	struct hme_blk *pr_hblk = NULL;
 	struct hme_blk *nx_hblk;
-	struct ctx *ctx;
-	int cnum;
 	int i;
 	uint64_t hblkpa, prevpa, nx_pa;
 	struct hme_blk *list = NULL;
@@ -1566,24 +1714,8 @@
 	 * Now free up the ctx so that others can reuse it.
 	 */
 	hatlockp = sfmmu_hat_enter(sfmmup);
-	ctx = sfmmutoctx(sfmmup);
-	cnum = ctxtoctxnum(ctx);
-
-	if (cnum != INVALID_CONTEXT) {
-		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-		if (sfmmup->sfmmu_cnum == cnum) {
-			sfmmu_reuse_ctx(ctx, sfmmup);
-			/*
-			 * Put ctx back to the free list.
-			 */
-			mutex_enter(&ctx_list_lock);
-			CTX_SET_FLAGS(ctx, CTX_FREE_FLAG);
-			ctx->ctx_free = ctxfree;
-			ctxfree = ctx;
-			mutex_exit(&ctx_list_lock);
-		}
-		rw_exit(&ctx->ctx_rwlock);
-	}
+
+	sfmmu_invalidate_ctx(sfmmup);
 
 	/*
 	 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
@@ -4658,9 +4790,8 @@
 	struct hme_blk *list = NULL;
 	int i;
 	uint64_t hblkpa, prevpa, nx_pa;
-	hatlock_t	*hatlockp;
-	struct tsb_info	*tsbinfop;
-	struct ctx	*ctx;
+	demap_range_t dmr, *dmrp;
+	cpuset_t cpuset;
 	caddr_t	endaddr = startaddr + len;
 	caddr_t	sa;
 	caddr_t	ea;
@@ -4668,34 +4799,12 @@
 	caddr_t	cb_ea[MAX_CB_ADDR];
 	int	addr_cnt = 0;
 	int	a = 0;
-	int	cnum;
-
-	hatlockp = sfmmu_hat_enter(sfmmup);
-
-	/*
-	 * Since we know we're unmapping a huge range of addresses,
-	 * just throw away the context and switch to another.  It's
-	 * cheaper than trying to unmap all of the TTEs we may find
-	 * from the TLB individually, which is too expensive in terms
-	 * of xcalls.  Better yet, if we're exiting, no need to flush
-	 * anything at all!
-	 */
-	if (!sfmmup->sfmmu_free) {
-		ctx = sfmmutoctx(sfmmup);
-		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-		cnum = sfmmutoctxnum(sfmmup);
-		if (cnum != INVALID_CONTEXT) {
-			sfmmu_tlb_swap_ctx(sfmmup, ctx);
-		}
-		rw_exit(&ctx->ctx_rwlock);
-
-		for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
-		    tsbinfop = tsbinfop->tsb_next) {
-			if (tsbinfop->tsb_flags & TSB_SWAPPED)
-				continue;
-			sfmmu_inv_tsb(tsbinfop->tsb_va,
-			    TSB_BYTES(tsbinfop->tsb_szc));
-		}
+
+	if (sfmmup->sfmmu_free) {
+		dmrp = NULL;
+	} else {
+		dmrp = &dmr;
+		DEMAP_RANGE_INIT(sfmmup, dmrp);
 	}
 
 	/*
@@ -4731,7 +4840,7 @@
 			if (hmeblkp->hblk_vcnt != 0 ||
 			    hmeblkp->hblk_hmecnt != 0)
 				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
-				    sa, ea, NULL, flags);
+				    sa, ea, dmrp, flags);
 
 			/*
 			 * on unmap we also release the HME block itself, once
@@ -4765,6 +4874,12 @@
 			cb_sa[addr_cnt] = sa;
 			cb_ea[addr_cnt] = ea;
 			if (++addr_cnt == MAX_CB_ADDR) {
+				if (dmrp != NULL) {
+					DEMAP_RANGE_FLUSH(dmrp);
+					cpuset = sfmmup->sfmmu_cpusran;
+					xt_sync(cpuset);
+				}
+
 				for (a = 0; a < MAX_CB_ADDR; ++a) {
 					callback->hcb_start_addr = cb_sa[a];
 					callback->hcb_end_addr = cb_ea[a];
@@ -4781,6 +4896,11 @@
 	}
 
 	sfmmu_hblks_list_purge(&list);
+	if (dmrp != NULL) {
+		DEMAP_RANGE_FLUSH(dmrp);
+		cpuset = sfmmup->sfmmu_cpusran;
+		xt_sync(cpuset);
+	}
 
 	for (a = 0; a < addr_cnt; ++a) {
 		callback->hcb_start_addr = cb_sa[a];
@@ -4788,8 +4908,6 @@
 		callback->hcb_function(callback);
 	}
 
-	sfmmu_hat_exit(hatlockp);
-
 	/*
 	 * Check TSB and TLB page sizes if the process isn't exiting.
 	 */
@@ -4797,7 +4915,6 @@
 		sfmmu_check_page_sizes(sfmmup, 0);
 }
 
-
 /*
  * Unload all the mappings in the range [addr..addr+len). addr and len must
  * be MMU_PAGESIZE aligned.
@@ -5180,7 +5297,7 @@
 	ttesz = get_hblk_ttesz(hmeblkp);
 
 	use_demap_range = (do_virtual_coloring &&
-				TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
+	    ((dmrp == NULL) || TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
 	if (use_demap_range) {
 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
 	} else {
@@ -5894,18 +6011,19 @@
 		CPUSET_DEL(cpuset, CPU->cpu_id);
 
 		/* LINTED: constant in conditional context */
-		SFMMU_XCALL_STATS(KCONTEXT);
+		SFMMU_XCALL_STATS(ksfmmup);
 
 		/*
 		 * Flush TLB entry on remote CPU's
 		 */
-		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, KCONTEXT);
+		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
+		    (uint64_t)ksfmmup);
 		xt_sync(cpuset);
 
 		/*
 		 * Flush TLB entry on local CPU
 		 */
-		vtag_flushpage(addr, KCONTEXT);
+		vtag_flushpage(addr, (uint64_t)ksfmmup);
 	}
 
 	while (index != 0) {
@@ -7710,8 +7828,7 @@
 	ism_ment_t	*free_ment = NULL;
 	ism_blk_t	*ism_blkp;
 	struct hat	*ism_hatid;
-	struct ctx	*ctx;
-	int 		cnum, found, i;
+	int 		found, i;
 	hatlock_t	*hatlockp;
 	struct tsb_info	*tsbinfo;
 	uint_t		ismshift = page_get_shift(ismszc);
@@ -7777,7 +7894,6 @@
 		ism_hatid = ism_map[i].imap_ismhat;
 		ASSERT(ism_hatid != NULL);
 		ASSERT(ism_hatid->sfmmu_ismhat == 1);
-		ASSERT(ism_hatid->sfmmu_cnum == INVALID_CONTEXT);
 
 		/*
 		 * First remove ourselves from the ism mapping list.
@@ -7793,14 +7909,9 @@
 		 * will go to tl=0.
 		 */
 		hatlockp = sfmmu_hat_enter(sfmmup);
-		ctx = sfmmutoctx(sfmmup);
-		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-		cnum = sfmmutoctxnum(sfmmup);
-
-		if (cnum != INVALID_CONTEXT) {
-			sfmmu_tlb_swap_ctx(sfmmup, ctx);
-		}
-		rw_exit(&ctx->ctx_rwlock);
+
+		sfmmu_invalidate_ctx(sfmmup);
+
 		sfmmu_hat_exit(hatlockp);
 
 		/*
@@ -8555,291 +8666,195 @@
 	}
 }
 
-/*
- * This routine gets called when the system has run out of free contexts.
- * This will simply choose context passed to it to be stolen and reused.
- */
-/* ARGSUSED */
-static void
-sfmmu_reuse_ctx(struct ctx *ctx, sfmmu_t *sfmmup)
-{
-	sfmmu_t *stolen_sfmmup;
-	cpuset_t cpuset;
-	ushort_t	cnum = ctxtoctxnum(ctx);
-
-	ASSERT(cnum != KCONTEXT);
-	ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0);	/* write locked */
-
-	/*
-	 * simply steal and reuse the ctx passed to us.
-	 */
-	stolen_sfmmup = ctx->ctx_sfmmu;
-	ASSERT(sfmmu_hat_lock_held(sfmmup));
-	ASSERT(stolen_sfmmup->sfmmu_cnum == cnum);
-	ASSERT(stolen_sfmmup != ksfmmup);
-
-	TRACE_CTXS(&ctx_trace_mutex, ctx_trace_ptr, cnum, stolen_sfmmup,
-	    sfmmup, CTX_TRC_STEAL);
-	SFMMU_STAT(sf_ctxsteal);
-
-	/*
-	 * Update sfmmu and ctx structs. After this point all threads
-	 * belonging to this hat/proc will fault and not use the ctx
-	 * being stolen.
-	 */
-	kpreempt_disable();
-	/*
-	 * Enforce reverse order of assignments from sfmmu_get_ctx().  This
-	 * is done to prevent a race where a thread faults with the context
-	 * but the TSB has changed.
-	 */
-	stolen_sfmmup->sfmmu_cnum = INVALID_CONTEXT;
-	membar_enter();
-	ctx->ctx_sfmmu = NULL;
-
-	/*
-	 * 1. flush TLB in all CPUs that ran the process whose ctx
-	 * we are stealing.
-	 * 2. change context for all other CPUs to INVALID_CONTEXT,
-	 * if they are running in the context that we are going to steal.
-	 */
-	cpuset = stolen_sfmmup->sfmmu_cpusran;
-	CPUSET_DEL(cpuset, CPU->cpu_id);
-	CPUSET_AND(cpuset, cpu_ready_set);
-	SFMMU_XCALL_STATS(cnum);
-	xt_some(cpuset, sfmmu_ctx_steal_tl1, cnum, INVALID_CONTEXT);
-	xt_sync(cpuset);
-
-	/*
-	 * flush TLB of local processor
-	 */
-	vtag_flushctx(cnum);
-
-	/*
-	 * If we just stole the ctx from the current process
-	 * on local cpu then we also invalidate his context
-	 * here.
-	 */
-	if (sfmmu_getctx_sec() == cnum) {
-		sfmmu_setctx_sec(INVALID_CONTEXT);
-		sfmmu_clear_utsbinfo();
-	}
-
-	kpreempt_enable();
-	SFMMU_STAT(sf_tlbflush_ctx);
-}
-
-/*
- * Returns a context with the reader lock held.
- *
- * We maintain 2 different list of contexts.  The first list
- * is the free list and it is headed by ctxfree.  These contexts
- * are ready to use.  The second list is the dirty list and is
- * headed by ctxdirty. These contexts have been freed but haven't
- * been flushed from the TLB.
+
+/*
+ * Wrapper routine used to return a context.
  *
  * It's the responsibility of the caller to guarantee that the
  * process serializes on calls here by taking the HAT lock for
  * the hat.
  *
- * Changing the page size is a rather complicated process, so
- * rather than jump through lots of hoops to special case it,
- * the easiest way to go about it is to tell the MMU we want
- * to change page sizes and then switch to using a different
- * context.  When we program the context registers for the
- * process, we can take care of setting up the (new) page size
- * for that context at that point.
- */
-
-static struct ctx *
+ */
+static void
 sfmmu_get_ctx(sfmmu_t *sfmmup)
 {
-	struct ctx *ctx;
-	ushort_t cnum;
-	struct ctx *lastctx = &ctxs[nctxs-1];
-	struct ctx *firstctx = &ctxs[NUM_LOCKED_CTXS];
-	uint_t	found_stealable_ctx;
-	uint_t	retry_count = 0;
-
-#define	NEXT_CTX(ctx)   (((ctx) >= lastctx) ? firstctx : ((ctx) + 1))
-
-retry:
-
-	ASSERT(sfmmup->sfmmu_cnum != KCONTEXT);
-	/*
-	 * Check to see if this process has already got a ctx.
-	 * In that case just set the sec-ctx, grab a readers lock, and
-	 * return.
-	 *
-	 * We have to double check after we get the readers lock on the
-	 * context, since it could be stolen in this short window.
-	 */
-	if (sfmmup->sfmmu_cnum >= NUM_LOCKED_CTXS) {
-		ctx = sfmmutoctx(sfmmup);
-		rw_enter(&ctx->ctx_rwlock, RW_READER);
-		if (ctx->ctx_sfmmu == sfmmup) {
-			return (ctx);
-		} else {
-			rw_exit(&ctx->ctx_rwlock);
-		}
-	}
-
-	found_stealable_ctx = 0;
-	mutex_enter(&ctx_list_lock);
-	if ((ctx = ctxfree) != NULL) {
-		/*
-		 * Found a ctx in free list. Delete it from the list and
-		 * use it.  There's a short window where the stealer can
-		 * look at the context before we grab the lock on the
-		 * context, so we have to handle that with the free flag.
-		 */
-		SFMMU_STAT(sf_ctxfree);
-		ctxfree = ctx->ctx_free;
-		ctx->ctx_sfmmu = NULL;
-		mutex_exit(&ctx_list_lock);
-		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-		ASSERT(ctx->ctx_sfmmu == NULL);
-		ASSERT((ctx->ctx_flags & CTX_FREE_FLAG) != 0);
-	} else if ((ctx = ctxdirty) != NULL) {
-		/*
-		 * No free contexts.  If we have at least one dirty ctx
-		 * then flush the TLBs on all cpus if necessary and move
-		 * the dirty list to the free list.
-		 */
-		SFMMU_STAT(sf_ctxdirty);
-		ctxdirty = NULL;
-		if (delay_tlb_flush)
-			sfmmu_tlb_all_demap();
-		ctxfree = ctx->ctx_free;
-		ctx->ctx_sfmmu = NULL;
-		mutex_exit(&ctx_list_lock);
-		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-		ASSERT(ctx->ctx_sfmmu == NULL);
-		ASSERT((ctx->ctx_flags & CTX_FREE_FLAG) != 0);
-	} else {
-		/*
-		 * No free context available, so steal one.
-		 *
-		 * The policy to choose the appropriate context is simple;
-		 * just sweep all the ctxs using ctxhand. This will steal
-		 * the LRU ctx.
-		 *
-		 * We however only steal a non-free context that can be
-		 * write locked.  Keep searching till we find a stealable
-		 * ctx.
-		 */
-		mutex_exit(&ctx_list_lock);
-		ctx = ctxhand;
-		do {
-			/*
-			 * If you get the writers lock, and the ctx isn't
-			 * a free ctx, THEN you can steal this ctx.
-			 */
-			if ((ctx->ctx_flags & CTX_FREE_FLAG) == 0 &&
-			    rw_tryenter(&ctx->ctx_rwlock, RW_WRITER) != 0) {
-				if (ctx->ctx_flags & CTX_FREE_FLAG) {
-					/* let the first guy have it */
-					rw_exit(&ctx->ctx_rwlock);
-				} else {
-					found_stealable_ctx = 1;
-					break;
-				}
-			}
-			ctx = NEXT_CTX(ctx);
-		} while (ctx != ctxhand);
-
-		if (found_stealable_ctx) {
-			/*
-			 * Try and reuse the ctx.
-			 */
-			sfmmu_reuse_ctx(ctx, sfmmup);
-
-		} else if (retry_count++ < GET_CTX_RETRY_CNT) {
-			goto retry;
-
-		} else {
-			panic("Can't find any stealable context");
-		}
-	}
-
-	ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0);	/* write locked */
-	ctx->ctx_sfmmu = sfmmup;
-
-	/*
-	 * Clear the ctx_flags field.
-	 */
-	ctx->ctx_flags = 0;
-
-	cnum = ctxtoctxnum(ctx);
-	membar_exit();
-	sfmmup->sfmmu_cnum = cnum;
+	mmu_ctx_t *mmu_ctxp;
+	uint_t pstate_save;
+
+	ASSERT(sfmmu_hat_lock_held(sfmmup));
+	ASSERT(sfmmup != ksfmmup);
+
+	kpreempt_disable();
+
+	mmu_ctxp = CPU_MMU_CTXP(CPU);
+	ASSERT(mmu_ctxp);
+	ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
+	ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
+
+	/*
+	 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
+	 */
+	if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
+		sfmmu_ctx_wrap_around(mmu_ctxp);
 
 	/*
 	 * Let the MMU set up the page sizes to use for
 	 * this context in the TLB. Don't program 2nd dtlb for ism hat.
 	 */
-	if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0))
+	if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
 		mmu_set_ctx_page_sizes(sfmmup);
-
-	/*
-	 * Downgrade to reader's lock.
-	 */
-	rw_downgrade(&ctx->ctx_rwlock);
-
-	/*
-	 * If this value doesn't get set to what we want
-	 * it won't matter, so don't worry about locking.
-	 */
-	ctxhand = NEXT_CTX(ctx);
-
-	/*
-	 * Better not have been stolen while we held the ctx'
-	 * lock or we're hosed.
-	 */
-	ASSERT(sfmmup == sfmmutoctx(sfmmup)->ctx_sfmmu);
-
-	return (ctx);
-
-#undef NEXT_CTX
-}
-
-
-/*
- * Set the process context to INVALID_CONTEXT (but
- * without stealing the ctx) so that it faults and
- * reloads the MMU state from TL=0.  Caller must
- * hold the hat lock since we don't acquire it here.
+	}
+
+	/*
+	 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
+	 * interrupts disabled to prevent race condition with wrap-around
+	 * ctx invalidatation. In sun4v, ctx invalidation also involves
+	 * a HV call to set the number of TSBs to 0. If interrupts are not
+	 * disabled until after sfmmu_load_mmustate is complete TSBs may
+	 * become assigned to INVALID_CONTEXT. This is not allowed.
+	 */
+	pstate_save = sfmmu_disable_intrs();
+
+	sfmmu_alloc_ctx(sfmmup, 1, CPU);
+	sfmmu_load_mmustate(sfmmup);
+
+	sfmmu_enable_intrs(pstate_save);
+
+	kpreempt_enable();
+}
+
+/*
+ * When all cnums are used up in a MMU, cnum will wrap around to the
+ * next generation and start from 2.
+ */
+static void
+sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp)
+{
+
+	/* caller must have disabled the preemption */
+	ASSERT(curthread->t_preempt >= 1);
+	ASSERT(mmu_ctxp != NULL);
+
+	/* acquire Per-MMU (PM) spin lock */
+	mutex_enter(&mmu_ctxp->mmu_lock);
+
+	/* re-check to see if wrap-around is needed */
+	if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
+		goto done;
+
+	SFMMU_MMU_STAT(mmu_wrap_around);
+
+	/* update gnum */
+	ASSERT(mmu_ctxp->mmu_gnum != 0);
+	mmu_ctxp->mmu_gnum++;
+	if (mmu_ctxp->mmu_gnum == 0 ||
+	    mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
+		cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
+		    (void *)mmu_ctxp);
+	}
+
+	if (mmu_ctxp->mmu_ncpus > 1) {
+		cpuset_t cpuset;
+
+		membar_enter(); /* make sure updated gnum visible */
+
+		SFMMU_XCALL_STATS(NULL);
+
+		/* xcall to others on the same MMU to invalidate ctx */
+		cpuset = mmu_ctxp->mmu_cpuset;
+		ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id));
+		CPUSET_DEL(cpuset, CPU->cpu_id);
+		CPUSET_AND(cpuset, cpu_ready_set);
+
+		/*
+		 * Pass in INVALID_CONTEXT as the first parameter to
+		 * sfmmu_raise_tsb_exception, which invalidates the context
+		 * of any process running on the CPUs in the MMU.
+		 */
+		xt_some(cpuset, sfmmu_raise_tsb_exception,
+		    INVALID_CONTEXT, INVALID_CONTEXT);
+		xt_sync(cpuset);
+
+		SFMMU_MMU_STAT(mmu_tsb_raise_exception);
+	}
+
+	if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
+		sfmmu_setctx_sec(INVALID_CONTEXT);
+		sfmmu_clear_utsbinfo();
+	}
+
+	/*
+	 * No xcall is needed here. For sun4u systems all CPUs in context
+	 * domain share a single physical MMU therefore it's enough to flush
+	 * TLB on local CPU. On sun4v systems we use 1 global context
+	 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
+	 * handler. Note that vtag_flushall_uctxs() is called
+	 * for Ultra II machine, where the equivalent flushall functionality
+	 * is implemented in SW, and only user ctx TLB entries are flushed.
+	 */
+	if (&vtag_flushall_uctxs != NULL) {
+		vtag_flushall_uctxs();
+	} else {
+		vtag_flushall();
+	}
+
+	/* reset mmu cnum, skips cnum 0 and 1 */
+	mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
+
+done:
+	mutex_exit(&mmu_ctxp->mmu_lock);
+}
+
+
+/*
+ * For multi-threaded process, set the process context to INVALID_CONTEXT
+ * so that it faults and reloads the MMU state from TL=0. For single-threaded
+ * process, we can just load the MMU state directly without having to
+ * set context invalid. Caller must hold the hat lock since we don't
+ * acquire it here.
  */
 static void
 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
 {
-	int cnum;
-	cpuset_t cpuset;
+	uint_t cnum;
+	uint_t pstate_save;
 
 	ASSERT(sfmmup != ksfmmup);
 	ASSERT(sfmmu_hat_lock_held(sfmmup));
 
 	kpreempt_disable();
 
-	cnum = sfmmutoctxnum(sfmmup);
-	if (cnum != INVALID_CONTEXT) {
-		cpuset = sfmmup->sfmmu_cpusran;
-		CPUSET_DEL(cpuset, CPU->cpu_id);
-		CPUSET_AND(cpuset, cpu_ready_set);
-		SFMMU_XCALL_STATS(cnum);
-
-		xt_some(cpuset, sfmmu_raise_tsb_exception,
-		    cnum, INVALID_CONTEXT);
-		xt_sync(cpuset);
-
-		/*
-		 * If the process is running on the local CPU
-		 * we need to update the MMU state here as well.
-		 */
-		if (sfmmu_getctx_sec() == cnum)
-			sfmmu_load_mmustate(sfmmup);
-
-		SFMMU_STAT(sf_tsb_raise_exception);
+	/*
+	 * We check whether the pass'ed-in sfmmup is the same as the
+	 * current running proc. This is to makes sure the current proc
+	 * stays single-threaded if it already is.
+	 */
+	if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
+	    (curthread->t_procp->p_lwpcnt == 1)) {
+		/* single-thread */
+		cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
+		if (cnum != INVALID_CONTEXT) {
+			uint_t curcnum;
+			/*
+			 * Disable interrupts to prevent race condition
+			 * with sfmmu_ctx_wrap_around ctx invalidation.
+			 * In sun4v, ctx invalidation involves setting
+			 * TSB to NULL, hence, interrupts should be disabled
+			 * untill after sfmmu_load_mmustate is completed.
+			 */
+			pstate_save = sfmmu_disable_intrs();
+			curcnum = sfmmu_getctx_sec();
+			if (curcnum == cnum)
+				sfmmu_load_mmustate(sfmmup);
+			sfmmu_enable_intrs(pstate_save);
+			ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
+		}
+	} else {
+		/*
+		 * multi-thread
+		 * or when sfmmup is not the same as the curproc.
+		 */
+		sfmmu_invalidate_ctx(sfmmup);
 	}
 
 	kpreempt_enable();
@@ -8868,9 +8883,7 @@
 	struct tsb_info *new_tsbinfo = NULL;
 	struct tsb_info *curtsb, *prevtsb;
 	uint_t tte_sz_mask;
-	cpuset_t cpuset;
-	struct ctx *ctx = NULL;
-	int ctxnum;
+	int i;
 
 	ASSERT(sfmmup != ksfmmup);
 	ASSERT(sfmmup->sfmmu_ismhat == 0);
@@ -8959,27 +8972,7 @@
 	 */
 	if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
 		/* The TSB is either growing or shrinking. */
-		ctx = sfmmutoctx(sfmmup);
-		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-
-		ctxnum = sfmmutoctxnum(sfmmup);
-		sfmmup->sfmmu_cnum = INVALID_CONTEXT;
-		membar_enter();	/* make sure visible on all CPUs */
-
-		kpreempt_disable();
-		if (ctxnum != INVALID_CONTEXT) {
-			cpuset = sfmmup->sfmmu_cpusran;
-			CPUSET_DEL(cpuset, CPU->cpu_id);
-			CPUSET_AND(cpuset, cpu_ready_set);
-			SFMMU_XCALL_STATS(ctxnum);
-
-			xt_some(cpuset, sfmmu_raise_tsb_exception,
-			    ctxnum, INVALID_CONTEXT);
-			xt_sync(cpuset);
-
-			SFMMU_STAT(sf_tsb_raise_exception);
-		}
-		kpreempt_enable();
+		sfmmu_invalidate_ctx(sfmmup);
 	} else {
 		/*
 		 * It is illegal to swap in TSBs from a process other
@@ -8989,8 +8982,19 @@
 		 * misses.
 		 */
 		ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
-		ASSERT(sfmmutoctxnum(sfmmup) == INVALID_CONTEXT);
-	}
+	}
+
+#ifdef DEBUG
+	ASSERT(max_mmu_ctxdoms > 0);
+
+	/*
+	 * Process should have INVALID_CONTEXT on all MMUs
+	 */
+	for (i = 0; i < max_mmu_ctxdoms; i++) {
+
+		ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
+	}
+#endif
 
 	new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
 	membar_stst();	/* strict ordering required */
@@ -9008,18 +9012,6 @@
 	if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
 		sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
 
-	if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
-		kpreempt_disable();
-		membar_exit();
-		sfmmup->sfmmu_cnum = ctxnum;
-		if (ctxnum != INVALID_CONTEXT &&
-		    sfmmu_getctx_sec() == ctxnum) {
-			sfmmu_load_mmustate(sfmmup);
-		}
-		kpreempt_enable();
-		rw_exit(&ctx->ctx_rwlock);
-	}
-
 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
 
 	/*
@@ -9040,15 +9032,15 @@
 }
 
 /*
- * Steal context from process, forcing the process to switch to another
- * context on the next TLB miss, and therefore start using the TLB that
- * is reprogrammed for the new page sizes.
+ * This function will re-program hat pgsz array, and invalidate the
+ * process' context, forcing the process to switch to another
+ * context on the next TLB miss, and therefore start using the
+ * TLB that is reprogrammed for the new page sizes.
  */
 void
-sfmmu_steal_context(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
-{
-	struct ctx *ctx;
-	int i, cnum;
+sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
+{
+	int i;
 	hatlock_t *hatlockp = NULL;
 
 	hatlockp = sfmmu_hat_enter(sfmmup);
@@ -9058,14 +9050,9 @@
 			sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
 	}
 	SFMMU_STAT(sf_tlb_reprog_pgsz);
-	ctx = sfmmutoctx(sfmmup);
-	rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-	cnum = sfmmutoctxnum(sfmmup);
-
-	if (cnum != INVALID_CONTEXT) {
-		sfmmu_tlb_swap_ctx(sfmmup, ctx);
-	}
-	rw_exit(&ctx->ctx_rwlock);
+
+	sfmmu_invalidate_ctx(sfmmup);
+
 	sfmmu_hat_exit(hatlockp);
 }
 
@@ -9326,50 +9313,6 @@
 }
 
 /*
- * Free up a ctx
- */
-static void
-sfmmu_free_ctx(sfmmu_t *sfmmup, struct ctx *ctx)
-{
-	int ctxnum;
-
-	rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-
-	TRACE_CTXS(&ctx_trace_mutex, ctx_trace_ptr, sfmmup->sfmmu_cnum,
-	    sfmmup, 0, CTX_TRC_FREE);
-
-	if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) {
-		CPUSET_ZERO(sfmmup->sfmmu_cpusran);
-		rw_exit(&ctx->ctx_rwlock);
-		return;
-	}
-
-	ASSERT(sfmmup == ctx->ctx_sfmmu);
-
-	ctx->ctx_sfmmu = NULL;
-	ctx->ctx_flags = 0;
-	sfmmup->sfmmu_cnum = INVALID_CONTEXT;
-	membar_enter();
-	CPUSET_ZERO(sfmmup->sfmmu_cpusran);
-	ctxnum = sfmmu_getctx_sec();
-	if (ctxnum == ctxtoctxnum(ctx)) {
-		sfmmu_setctx_sec(INVALID_CONTEXT);
-		sfmmu_clear_utsbinfo();
-	}
-
-	/*
-	 * Put the freed ctx on the dirty list
-	 */
-	mutex_enter(&ctx_list_lock);
-	CTX_SET_FLAGS(ctx, CTX_FREE_FLAG);
-	ctx->ctx_free = ctxdirty;
-	ctxdirty = ctx;
-	mutex_exit(&ctx_list_lock);
-
-	rw_exit(&ctx->ctx_rwlock);
-}
-
-/*
  * Free up a sfmmu
  * Since the sfmmu is currently embedded in the hat struct we simply zero
  * out our fields and free up the ism map blk list if any.
@@ -9389,7 +9332,7 @@
 	ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
 	ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
 	ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
-	ASSERT(sfmmup->sfmmu_cnum == INVALID_CONTEXT);
+
 	sfmmup->sfmmu_free = 0;
 	sfmmup->sfmmu_ismhat = 0;
 
@@ -10531,65 +10474,6 @@
 }
 
 /*
- * Make sure that there is a valid ctx, if not get a ctx.
- * Also, get a readers lock on the ctx, so that the ctx cannot
- * be stolen underneath us.
- */
-static void
-sfmmu_disallow_ctx_steal(sfmmu_t *sfmmup)
-{
-	struct	ctx *ctx;
-
-	ASSERT(sfmmup != ksfmmup);
-	ASSERT(sfmmup->sfmmu_ismhat == 0);
-
-	/*
-	 * If ctx has been stolen, get a ctx.
-	 */
-	if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) {
-		/*
-		 * Our ctx was stolen. Get a ctx with rlock.
-		 */
-		ctx = sfmmu_get_ctx(sfmmup);
-		return;
-	} else {
-		ctx = sfmmutoctx(sfmmup);
-	}
-
-	/*
-	 * Get the reader lock.
-	 */
-	rw_enter(&ctx->ctx_rwlock, RW_READER);
-	if (ctx->ctx_sfmmu != sfmmup) {
-		/*
-		 * The ctx got stolen, so spin again.
-		 */
-		rw_exit(&ctx->ctx_rwlock);
-		ctx = sfmmu_get_ctx(sfmmup);
-	}
-
-	ASSERT(sfmmup->sfmmu_cnum >= NUM_LOCKED_CTXS);
-}
-
-/*
- * Decrement reference count for our ctx. If the reference count
- * becomes 0, our ctx can be stolen by someone.
- */
-static void
-sfmmu_allow_ctx_steal(sfmmu_t *sfmmup)
-{
-	struct	ctx *ctx;
-
-	ASSERT(sfmmup != ksfmmup);
-	ASSERT(sfmmup->sfmmu_ismhat == 0);
-	ctx = sfmmutoctx(sfmmup);
-
-	ASSERT(sfmmup == ctx->ctx_sfmmu);
-	ASSERT(sfmmup->sfmmu_cnum != INVALID_CONTEXT);
-	rw_exit(&ctx->ctx_rwlock);
-}
-
-/*
  * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
  * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
  * KM_SLEEP allocation.
@@ -10683,22 +10567,14 @@
  *
  * There are many scenarios that could land us here:
  *
- *	1) Process has no context.  In this case, ctx is
- *         INVALID_CONTEXT and sfmmup->sfmmu_cnum == 1 so
- *         we will acquire a context before returning.
- *      2) Need to re-load our MMU state.  In this case,
- *         ctx is INVALID_CONTEXT and sfmmup->sfmmu_cnum != 1.
- *      3) ISM mappings are being updated.  This is handled
- *         just like case #2.
- *      4) We wish to program a new page size into the TLB.
- *         This is handled just like case #1, since changing
- *         TLB page size requires us to flush the TLB.
- *	5) Window fault and no valid translation found.
- *
- * Cases 1-4, ctx is INVALID_CONTEXT so we handle it and then
- * exit which will retry the trapped instruction.  Case #5 we
- * punt to trap() which will raise us a trap level and handle
- * the fault before unwinding.
+ * If the context is invalid we land here. The context can be invalid
+ * for 3 reasons: 1) we couldn't allocate a new context and now need to
+ * perform a wrap around operation in order to allocate a new context.
+ * 2) Context was invalidated to change pagesize programming 3) ISMs or
+ * TSBs configuration is changeing for this process and we are forced into
+ * here to do a syncronization operation. If the context is valid we can
+ * be here from window trap hanlder. In this case just call trap to handle
+ * the fault.
  *
  * Note that the process will run in INVALID_CONTEXT before
  * faulting into here and subsequently loading the MMU registers
@@ -10718,6 +10594,7 @@
 	struct tsb_info *tsbinfop;
 
 	SFMMU_STAT(sf_tsb_exceptions);
+	SFMMU_MMU_STAT(mmu_tsb_exceptions);
 	sfmmup = astosfmmu(curthread->t_procp->p_as);
 	ctxnum = tagaccess & TAGACC_CTX_MASK;
 
@@ -10737,8 +10614,10 @@
 	 * locking the HAT and grabbing the rwlock on the context as a
 	 * reader temporarily.
 	 */
-	if (ctxnum == INVALID_CONTEXT ||
-	    SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
+	ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
+	    ctxnum == INVALID_CONTEXT);
+
+	if (ctxnum == INVALID_CONTEXT) {
 		/*
 		 * Must set lwp state to LWP_SYS before
 		 * trying to acquire any adaptive lock
@@ -10764,7 +10643,7 @@
 		 */
 		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
 			cv_wait(&sfmmup->sfmmu_tsb_cv,
-				    HATLOCK_MUTEXP(hatlockp));
+			    HATLOCK_MUTEXP(hatlockp));
 			goto retry;
 		}
 
@@ -10779,13 +10658,8 @@
 			goto retry;
 		}
 
-		sfmmu_disallow_ctx_steal(sfmmup);
-		ctxnum = sfmmup->sfmmu_cnum;
-		kpreempt_disable();
-		sfmmu_setctx_sec(ctxnum);
-		sfmmu_load_mmustate(sfmmup);
-		kpreempt_enable();
-		sfmmu_allow_ctx_steal(sfmmup);
+		sfmmu_get_ctx(sfmmup);
+
 		sfmmu_hat_exit(hatlockp);
 		/*
 		 * Must restore lwp_state if not calling
@@ -10857,7 +10731,6 @@
 	caddr_t 	va;
 	ism_ment_t	*ment;
 	sfmmu_t		*sfmmup;
-	int 		ctxnum;
 	int 		vcolor;
 	int		ttesz;
 
@@ -10877,7 +10750,7 @@
 	for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
 
 		sfmmup = ment->iment_hat;
-		ctxnum = sfmmup->sfmmu_cnum;
+
 		va = ment->iment_base_va;
 		va = (caddr_t)((uintptr_t)va  + (uintptr_t)addr);
 
@@ -10895,20 +10768,15 @@
 			sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);
 		}
 
-		if (ctxnum != INVALID_CONTEXT) {
-			/*
-			 * Flush TLBs.  We don't need to do this for
-			 * invalid context since the flushing is already
-			 * done as part of context stealing.
-			 */
-			cpuset = sfmmup->sfmmu_cpusran;
-			CPUSET_AND(cpuset, cpu_ready_set);
-			CPUSET_DEL(cpuset, CPU->cpu_id);
-			SFMMU_XCALL_STATS(ctxnum);
-			xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
-			    ctxnum);
-			vtag_flushpage(va, ctxnum);
-		}
+		cpuset = sfmmup->sfmmu_cpusran;
+		CPUSET_AND(cpuset, cpu_ready_set);
+		CPUSET_DEL(cpuset, CPU->cpu_id);
+
+		SFMMU_XCALL_STATS(sfmmup);
+		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
+		    (uint64_t)sfmmup);
+
+		vtag_flushpage(va, (uint64_t)sfmmup);
 
 		/*
 		 * Flush D$
@@ -10918,7 +10786,8 @@
 		if (cache_flush_flag == CACHE_FLUSH) {
 			cpuset = cpu_ready_set;
 			CPUSET_DEL(cpuset, CPU->cpu_id);
-			SFMMU_XCALL_STATS(ctxnum);
+
+			SFMMU_XCALL_STATS(sfmmup);
 			vcolor = addr_to_vcolor(va);
 			xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
 			vac_flushpage(pfnum, vcolor);
@@ -10937,7 +10806,7 @@
 	pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
 	int hat_lock_held)
 {
-	int ctxnum, vcolor;
+	int vcolor;
 	cpuset_t cpuset;
 	hatlock_t *hatlockp;
 
@@ -10947,34 +10816,41 @@
 	 */
 	vcolor = addr_to_vcolor(addr);
 
+	/*
+	 * We must hold the hat lock during the flush of TLB,
+	 * to avoid a race with sfmmu_invalidate_ctx(), where
+	 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
+	 * causing TLB demap routine to skip flush on that MMU.
+	 * If the context on a MMU has already been set to
+	 * INVALID_CONTEXT, we just get an extra flush on
+	 * that MMU.
+	 */
+	if (!hat_lock_held && !tlb_noflush)
+		hatlockp = sfmmu_hat_enter(sfmmup);
+
 	kpreempt_disable();
 	if (!tlb_noflush) {
 		/*
-		 * Flush the TSB.
-		 */
-		if (!hat_lock_held)
-			hatlockp = sfmmu_hat_enter(sfmmup);
+		 * Flush the TSB and TLB.
+		 */
 		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp);
-		ctxnum = (int)sfmmutoctxnum(sfmmup);
-		if (!hat_lock_held)
-			sfmmu_hat_exit(hatlockp);
-
-		if (ctxnum != INVALID_CONTEXT) {
-			/*
-			 * Flush TLBs.  We don't need to do this if our
-			 * context is invalid context.  Since we hold the
-			 * HAT lock the context must have been stolen and
-			 * hence will be flushed before re-use.
-			 */
-			cpuset = sfmmup->sfmmu_cpusran;
-			CPUSET_AND(cpuset, cpu_ready_set);
-			CPUSET_DEL(cpuset, CPU->cpu_id);
-			SFMMU_XCALL_STATS(ctxnum);
-			xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
-				ctxnum);
-			vtag_flushpage(addr, ctxnum);
-		}
-	}
+
+		cpuset = sfmmup->sfmmu_cpusran;
+		CPUSET_AND(cpuset, cpu_ready_set);
+		CPUSET_DEL(cpuset, CPU->cpu_id);
+
+		SFMMU_XCALL_STATS(sfmmup);
+
+		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
+		    (uint64_t)sfmmup);
+
+		vtag_flushpage(addr, (uint64_t)sfmmup);
+
+	}
+
+	if (!hat_lock_held && !tlb_noflush)
+		sfmmu_hat_exit(hatlockp);
+
 
 	/*
 	 * Flush the D$
@@ -10990,7 +10866,7 @@
 			CPUSET_AND(cpuset, cpu_ready_set);
 		}
 		CPUSET_DEL(cpuset, CPU->cpu_id);
-		SFMMU_XCALL_STATS(sfmmutoctxnum(sfmmup));
+		SFMMU_XCALL_STATS(sfmmup);
 		xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
 		vac_flushpage(pfnum, vcolor);
 	}
@@ -11006,7 +10882,6 @@
 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
 	int tlb_noflush, int hat_lock_held)
 {
-	int ctxnum;
 	cpuset_t cpuset;
 	hatlock_t *hatlockp;
 
@@ -11022,29 +10897,23 @@
 	if (!hat_lock_held)
 		hatlockp = sfmmu_hat_enter(sfmmup);
 	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp);
-	ctxnum = sfmmutoctxnum(sfmmup);
+
+	kpreempt_disable();
+
+	cpuset = sfmmup->sfmmu_cpusran;
+	CPUSET_AND(cpuset, cpu_ready_set);
+	CPUSET_DEL(cpuset, CPU->cpu_id);
+
+	SFMMU_XCALL_STATS(sfmmup);
+	xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
+
+	vtag_flushpage(addr, (uint64_t)sfmmup);
+
 	if (!hat_lock_held)
 		sfmmu_hat_exit(hatlockp);
 
-	/*
-	 * Flush TLBs.  We don't need to do this if our context is invalid
-	 * context.  Since we hold the HAT lock the context must have been
-	 * stolen and hence will be flushed before re-use.
-	 */
-	if (ctxnum != INVALID_CONTEXT) {
-		/*
-		 * There is no need to protect against ctx being stolen.
-		 * If the ctx is stolen we will simply get an extra flush.
-		 */
-		kpreempt_disable();
-		cpuset = sfmmup->sfmmu_cpusran;
-		CPUSET_AND(cpuset, cpu_ready_set);
-		CPUSET_DEL(cpuset, CPU->cpu_id);
-		SFMMU_XCALL_STATS(ctxnum);
-		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, ctxnum);
-		vtag_flushpage(addr, ctxnum);
-		kpreempt_enable();
-	}
+	kpreempt_enable();
+
 }
 
 /*
@@ -11057,10 +10926,9 @@
 sfmmu_tlb_range_demap(demap_range_t *dmrp)
 {
 	sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
-	int ctxnum;
 	hatlock_t *hatlockp;
 	cpuset_t cpuset;
-	uint64_t ctx_pgcnt;
+	uint64_t sfmmu_pgcnt;
 	pgcnt_t pgcnt = 0;
 	int pgunload = 0;
 	int dirtypg = 0;
@@ -11098,117 +10966,64 @@
 		pgcnt += dirtypg;
 	}
 
-	/*
-	 * In the case where context is invalid context, bail.
-	 * We hold the hat lock while checking the ctx to prevent
-	 * a race with sfmmu_replace_tsb() which temporarily sets
-	 * the ctx to INVALID_CONTEXT to force processes to enter
-	 * sfmmu_tsbmiss_exception().
-	 */
-	hatlockp = sfmmu_hat_enter(sfmmup);
-	ctxnum = sfmmutoctxnum(sfmmup);
-	sfmmu_hat_exit(hatlockp);
-	if (ctxnum == INVALID_CONTEXT) {
-		dmrp->dmr_bitvec = 0;
-		return;
-	}
-
 	ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
 	if (sfmmup->sfmmu_free == 0) {
 		addr = dmrp->dmr_addr;
 		bitvec = dmrp->dmr_bitvec;
-		ctx_pgcnt = (uint64_t)((ctxnum << 16) | pgcnt);
+
+		/*
+		 * make sure it has SFMMU_PGCNT_SHIFT bits only,
+		 * as it will be used to pack argument for xt_some
+		 */
+		ASSERT((pgcnt > 0) &&
+		    (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
+
+		/*
+		 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
+		 * the low 6 bits of sfmmup. This is doable since pgcnt
+		 * always >= 1.
+		 */
+		ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
+		sfmmu_pgcnt = (uint64_t)sfmmup |
+		    ((pgcnt - 1) & SFMMU_PGCNT_MASK);
+
+		/*
+		 * We must hold the hat lock during the flush of TLB,
+		 * to avoid a race with sfmmu_invalidate_ctx(), where
+		 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
+		 * causing TLB demap routine to skip flush on that MMU.
+		 * If the context on a MMU has already been set to
+		 * INVALID_CONTEXT, we just get an extra flush on
+		 * that MMU.
+		 */
+		hatlockp = sfmmu_hat_enter(sfmmup);
 		kpreempt_disable();
+
 		cpuset = sfmmup->sfmmu_cpusran;
 		CPUSET_AND(cpuset, cpu_ready_set);
 		CPUSET_DEL(cpuset, CPU->cpu_id);
-		SFMMU_XCALL_STATS(ctxnum);
+
+		SFMMU_XCALL_STATS(sfmmup);
 		xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
-			ctx_pgcnt);
+		    sfmmu_pgcnt);
+
 		for (; bitvec != 0; bitvec >>= 1) {
 			if (bitvec & 1)
-				vtag_flushpage(addr, ctxnum);
+				vtag_flushpage(addr, (uint64_t)sfmmup);
 			addr += MMU_PAGESIZE;
 		}
 		kpreempt_enable();
+		sfmmu_hat_exit(hatlockp);
+
 		sfmmu_xcall_save += (pgunload-1);
 	}
 	dmrp->dmr_bitvec = 0;
 }
 
 /*
- * Flushes only TLB.
- */
-static void
-sfmmu_tlb_ctx_demap(sfmmu_t *sfmmup)
-{
-	int ctxnum;
-	cpuset_t cpuset;
-
-	ctxnum = (int)sfmmutoctxnum(sfmmup);
-	if (ctxnum == INVALID_CONTEXT) {
-		/*
-		 * if ctx was stolen then simply return
-		 * whoever stole ctx is responsible for flush.
-		 */
-		return;
-	}
-	ASSERT(ctxnum != KCONTEXT);
-	/*
-	 * There is no need to protect against ctx being stolen.  If the
-	 * ctx is stolen we will simply get an extra flush.
-	 */
-	kpreempt_disable();
-
-	cpuset = sfmmup->sfmmu_cpusran;
-	CPUSET_DEL(cpuset, CPU->cpu_id);
-	CPUSET_AND(cpuset, cpu_ready_set);
-	SFMMU_XCALL_STATS(ctxnum);
-
-	/*
-	 * Flush TLB.
-	 * RFE: it might be worth delaying the TLB flush as well. In that
-	 * case each cpu would have to traverse the dirty list and flush
-	 * each one of those ctx from the TLB.
-	 */
-	vtag_flushctx(ctxnum);
-	xt_some(cpuset, vtag_flushctx_tl1, ctxnum, 0);
-
-	kpreempt_enable();
-	SFMMU_STAT(sf_tlbflush_ctx);
-}
-
-/*
- * Flushes all TLBs.
- */
-static void
-sfmmu_tlb_all_demap(void)
-{
-	cpuset_t cpuset;
-
-	/*
-	 * There is no need to protect against ctx being stolen.  If the
-	 * ctx is stolen we will simply get an extra flush.
-	 */
-	kpreempt_disable();
-
-	cpuset = cpu_ready_set;
-	CPUSET_DEL(cpuset, CPU->cpu_id);
-	/* LINTED: constant in conditional context */
-	SFMMU_XCALL_STATS(INVALID_CONTEXT);
-
-	vtag_flushall();
-	xt_some(cpuset, vtag_flushall_tl1, 0, 0);
-	xt_sync(cpuset);
-
-	kpreempt_enable();
-	SFMMU_STAT(sf_tlbflush_all);
-}
-
-/*
  * In cases where we need to synchronize with TLB/TSB miss trap
  * handlers, _and_ need to flush the TLB, it's a lot easier to
- * steal the context from the process and free it than to do a
+ * throw away the context from the process than to do a
  * special song and dance to keep things consistent for the
  * handlers.
  *
@@ -11221,79 +11036,73 @@
  *
  * One added advantage of this approach is that on MMUs that
  * support a "flush all" operation, we will delay the flush until
- * we run out of contexts, and then flush the TLB one time.  This
+ * cnum wrap-around, and then flush the TLB one time.  This
  * is rather rare, so it's a lot less expensive than making 8000
- * x-calls to flush the TLB 8000 times.  Another is that we can do
- * all of this without pausing CPUs, due to some knowledge of how
- * resume() loads processes onto the processor; it sets the thread
- * into cpusran, and _then_ looks at cnum.  Because we do things in
- * the reverse order here, we guarantee exactly one of the following
- * statements is always true:
- *
- *   1) Nobody is in resume() so we have nothing to worry about anyway.
- *   2) The thread in resume() isn't in cpusran when we do the xcall,
- *      so we know when it does set itself it'll see cnum is
- *      INVALID_CONTEXT.
- *   3) The thread in resume() is in cpusran, and already might have
- *      looked at the old cnum.  That's OK, because we'll xcall it
- *      and, if necessary, flush the TLB along with the rest of the
- *      crowd.
+ * x-calls to flush the TLB 8000 times.
+ *
+ * A per-process (PP) lock is used to synchronize ctx allocations in
+ * resume() and ctx invalidations here.
  */
 static void
-sfmmu_tlb_swap_ctx(sfmmu_t *sfmmup, struct ctx *ctx)
+sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
 {
 	cpuset_t cpuset;
-	int cnum;
-
-	if (sfmmup->sfmmu_cnum == INVALID_CONTEXT)
-		return;
-
-	SFMMU_STAT(sf_ctx_swap);
+	int cnum, currcnum;
+	mmu_ctx_t *mmu_ctxp;
+	int i;
+	uint_t pstate_save;
+
+	SFMMU_STAT(sf_ctx_inv);
+
+	ASSERT(sfmmu_hat_lock_held(sfmmup));
+	ASSERT(sfmmup != ksfmmup);
 
 	kpreempt_disable();
 
-	ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0);
-	ASSERT(ctx->ctx_sfmmu == sfmmup);
-
-	cnum = ctxtoctxnum(ctx);
-	ASSERT(sfmmup->sfmmu_cnum == cnum);
-	ASSERT(cnum >= NUM_LOCKED_CTXS);
-
-	sfmmup->sfmmu_cnum = INVALID_CONTEXT;
-	membar_enter();	/* make sure visible on all CPUs */
-	ctx->ctx_sfmmu = NULL;
+	mmu_ctxp = CPU_MMU_CTXP(CPU);
+	ASSERT(mmu_ctxp);
+	ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
+	ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
+
+	currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
+
+	pstate_save = sfmmu_disable_intrs();
+
+	lock_set(&sfmmup->sfmmu_ctx_lock);	/* acquire PP lock */
+	/* set HAT cnum invalid across all context domains. */
+	for (i = 0; i < max_mmu_ctxdoms; i++) {
+
+		cnum = 	sfmmup->sfmmu_ctxs[i].cnum;
+		if (cnum == INVALID_CONTEXT) {
+			continue;
+		}
+
+		sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
+	}
+	membar_enter();	/* make sure globally visible to all CPUs */
+	lock_clear(&sfmmup->sfmmu_ctx_lock);	/* release PP lock */
+
+	sfmmu_enable_intrs(pstate_save);
 
 	cpuset = sfmmup->sfmmu_cpusran;
 	CPUSET_DEL(cpuset, CPU->cpu_id);
 	CPUSET_AND(cpuset, cpu_ready_set);
-	SFMMU_XCALL_STATS(cnum);
-
-	/*
-	 * Force anybody running this process on CPU
-	 * to enter sfmmu_tsbmiss_exception() on the
-	 * next TLB miss, synchronize behind us on
-	 * the HAT lock, and grab a new context.  At
-	 * that point the new page size will become
-	 * active in the TLB for the new context.
-	 * See sfmmu_get_ctx() for details.
-	 */
-	if (delay_tlb_flush) {
+	if (!CPUSET_ISNULL(cpuset)) {
+		SFMMU_XCALL_STATS(sfmmup);
 		xt_some(cpuset, sfmmu_raise_tsb_exception,
-		    cnum, INVALID_CONTEXT);
-		SFMMU_STAT(sf_tlbflush_deferred);
-	} else {
-		xt_some(cpuset, sfmmu_ctx_steal_tl1, cnum, INVALID_CONTEXT);
-		vtag_flushctx(cnum);
-		SFMMU_STAT(sf_tlbflush_ctx);
-	}
-	xt_sync(cpuset);
-
-	/*
-	 * If we just stole the ctx from the current
+		    (uint64_t)sfmmup, INVALID_CONTEXT);
+		xt_sync(cpuset);
+		SFMMU_STAT(sf_tsb_raise_exception);
+		SFMMU_MMU_STAT(mmu_tsb_raise_exception);
+	}
+
+	/*
+	 * If the hat to-be-invalidated is the same as the current
 	 * process on local CPU we need to invalidate
 	 * this CPU context as well.
 	 */
-	if (sfmmu_getctx_sec() == cnum) {
+	if ((sfmmu_getctx_sec() == currcnum) &&
+	    (currcnum != INVALID_CONTEXT)) {
 		sfmmu_setctx_sec(INVALID_CONTEXT);
 		sfmmu_clear_utsbinfo();
 	}
@@ -11301,15 +11110,10 @@
 	kpreempt_enable();
 
 	/*
-	 * Now put old ctx on the dirty list since we may not
-	 * have flushed the context out of the TLB.  We'll let
-	 * the next guy who uses this ctx flush it instead.
-	 */
-	mutex_enter(&ctx_list_lock);
-	CTX_SET_FLAGS(ctx, CTX_FREE_FLAG);
-	ctx->ctx_free = ctxdirty;
-	ctxdirty = ctx;
-	mutex_exit(&ctx_list_lock);
+	 * we hold the hat lock, so nobody should allocate a context
+	 * for us yet
+	 */
+	ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
 }
 
 /*
@@ -11322,12 +11126,11 @@
 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
 {
 	cpuset_t cpuset;
-	int	ctxnum = INVALID_CONTEXT;
 
 	kpreempt_disable();
 	cpuset = cpu_ready_set;
 	CPUSET_DEL(cpuset, CPU->cpu_id);
-	SFMMU_XCALL_STATS(ctxnum);	/* account to any ctx */
+	SFMMU_XCALL_STATS(NULL);	/* account to any ctx */
 	xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
 	xt_sync(cpuset);
 	vac_flushpage(pfnum, vcolor);
@@ -11338,14 +11141,13 @@
 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
 {
 	cpuset_t cpuset;
-	int	ctxnum = INVALID_CONTEXT;
 
 	ASSERT(vcolor >= 0);
 
 	kpreempt_disable();
 	cpuset = cpu_ready_set;
 	CPUSET_DEL(cpuset, CPU->cpu_id);
-	SFMMU_XCALL_STATS(ctxnum);	/* account to any ctx */
+	SFMMU_XCALL_STATS(NULL);	/* account to any ctx */
 	xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
 	xt_sync(cpuset);
 	vac_flushcolor(vcolor, pfnum);
@@ -11367,8 +11169,6 @@
 	hatlock_t *hatlockp;
 	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
 	sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
-	struct ctx *ctx;
-	int cnum;
 	extern uint32_t sendmondo_in_recover;
 
 	if (flags != HAT_PRESUSPEND)
@@ -11408,20 +11208,7 @@
 		}
 	}
 
-	ctx = sfmmutoctx(sfmmup);
-	rw_enter(&ctx->ctx_rwlock, RW_WRITER);
-	cnum = sfmmutoctxnum(sfmmup);
-
-	if (cnum != INVALID_CONTEXT) {
-		/*
-		 * Force all threads for this sfmmu to sfmmu_tsbmiss_exception
-		 * on their next TLB miss.
-		 */
-		sfmmu_tlb_swap_ctx(sfmmup, ctx);
-	}
-
-	rw_exit(&ctx->ctx_rwlock);
-
+	sfmmu_invalidate_ctx(sfmmup);
 	sfmmu_hat_exit(hatlockp);
 
 	return (0);
@@ -13174,7 +12961,7 @@
 			sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
 #ifdef	DEBUG
 			if (kpm_tlb_flush)
-				sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT);
+				sfmmu_kpm_demap_tlbs(vaddr);
 #endif
 		}
 
@@ -13271,7 +13058,7 @@
 		sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
 #ifdef	DEBUG
 		if (kpm_tlb_flush)
-			sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT);
+			sfmmu_kpm_demap_tlbs(vaddr);
 #endif
 
 	} else if (PP_ISTNC(pp)) {
@@ -13968,7 +13755,7 @@
 sfmmu_kpm_demap_large(caddr_t vaddr)
 {
 	sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
-	sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT);
+	sfmmu_kpm_demap_tlbs(vaddr);
 }
 
 /*
@@ -13978,14 +13765,14 @@
 sfmmu_kpm_demap_small(caddr_t vaddr)
 {
 	sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
-	sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT);
+	sfmmu_kpm_demap_tlbs(vaddr);
 }
 
 /*
  * Demap a kpm mapping in all TLB's.
  */
 static void
-sfmmu_kpm_demap_tlbs(caddr_t vaddr, int ctxnum)
+sfmmu_kpm_demap_tlbs(caddr_t vaddr)
 {
 	cpuset_t cpuset;
 
@@ -13993,9 +13780,12 @@
 	cpuset = ksfmmup->sfmmu_cpusran;
 	CPUSET_AND(cpuset, cpu_ready_set);
 	CPUSET_DEL(cpuset, CPU->cpu_id);
-	SFMMU_XCALL_STATS(ctxnum);
-	xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr, ctxnum);
-	vtag_flushpage(vaddr, ctxnum);
+	SFMMU_XCALL_STATS(ksfmmup);
+
+	xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr,
+	    (uint64_t)ksfmmup);
+	vtag_flushpage(vaddr, (uint64_t)ksfmmup);
+
 	kpreempt_enable();
 }
 
@@ -14401,7 +14191,7 @@
 
 		/* Flush vcolor in DCache */
 		CPUSET_DEL(cpuset, CPU->cpu_id);
-		SFMMU_XCALL_STATS(ksfmmup->sfmmu_cnum);
+		SFMMU_XCALL_STATS(ksfmmup);
 		xt_some(cpuset, vac_flushpage_tl1, pfn, vcolor);
 		vac_flushpage(pfn, vcolor);
 	}
@@ -14590,8 +14380,23 @@
 void
 hat_thread_exit(kthread_t *thd)
 {
+	uint64_t pgsz_cnum;
+	uint_t pstate_save;
+
 	ASSERT(thd->t_procp->p_as == &kas);
 
-	sfmmu_setctx_sec(KCONTEXT);
+	pgsz_cnum = KCONTEXT;
+#ifdef sun4u
+	pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
+#endif
+	/*
+	 * Note that sfmmu_load_mmustate() is currently a no-op for
+	 * kernel threads. We need to disable interrupts here,
+	 * simply because otherwise sfmmu_load_mmustate() would panic
+	 * if the caller does not disable interrupts.
+	 */
+	pstate_save = sfmmu_disable_intrs();
+	sfmmu_setctx_sec(pgsz_cnum);
 	sfmmu_load_mmustate(ksfmmup);
-}
+	sfmmu_enable_intrs(pstate_save);
+}
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h	Tue Jun 20 07:21:09 2006 -0700
@@ -259,6 +259,121 @@
 #define	TSB_SWAPPED	0x4
 
 /*
+ * Per-MMU context domain kstats.
+ *
+ * TSB Miss Exceptions
+ *	Number of times a TSB miss exception is handled in an MMU. See
+ *	sfmmu_tsbmiss_exception() for more details.
+ * TSB Raise Exception
+ *	Number of times the CPUs within an MMU are cross-called
+ *	to invalidate either a specific process context (when the process
+ *	switches MMU contexts) or the context of any process that is
+ *	running on those CPUs (as part of the MMU context wrap-around).
+ * Wrap Around
+ *	The number of times a wrap-around of MMU context happens.
+ */
+typedef enum mmu_ctx_stat_types {
+	MMU_CTX_TSB_EXCEPTIONS,		/* TSB miss exceptions handled */
+	MMU_CTX_TSB_RAISE_EXCEPTION,	/* ctx invalidation cross calls */
+	MMU_CTX_WRAP_AROUND,		/* wraparounds */
+	MMU_CTX_NUM_STATS
+} mmu_ctx_stat_t;
+
+/*
+ * Per-MMU context domain structure. This is instantiated the first time a CPU
+ * belonging to the MMU context domain is configured into the system, at boot
+ * time or at DR time.
+ *
+ * mmu_gnum
+ *	The current generation number for the context IDs on this MMU context
+ *	domain. It is protected by mmu_lock.
+ * mmu_cnum
+ *	The current cnum to be allocated on this MMU context domain. It
+ *	is protected via CAS.
+ * mmu_nctxs
+ *	The max number of context IDs supported on every CPU in this
+ *	MMU context domain. It is 8K except for Rock where it is 64K.
+ *      This is needed here in case the system supports mixed type of
+ *      processors/MMUs. It also helps to make ctx switch code access
+ *      fewer cache lines i.e. no need to retrieve it from some global nctxs.
+ * mmu_lock
+ *	The mutex spin lock used to serialize context ID wrap around
+ * mmu_idx
+ *	The index for this MMU context domain structure in the global array
+ *	mmu_ctxdoms.
+ * mmu_ncpus
+ *	The actual number of CPUs that have been configured in this
+ *	MMU context domain. This also acts as a reference count for the
+ *	structure. When the last CPU in an MMU context domain is unconfigured,
+ *	the structure is freed. It is protected by mmu_lock.
+ * mmu_cpuset
+ *	The CPU set of configured CPUs for this MMU context domain. Used
+ *	to cross-call all the CPUs in the MMU context domain to invalidate
+ *	context IDs during a wraparound operation. It is protected by mmu_lock.
+ */
+
+typedef struct mmu_ctx {
+	uint64_t	mmu_gnum;
+	uint_t		mmu_cnum;
+	uint_t		mmu_nctxs;
+	kmutex_t	mmu_lock;
+	uint_t		mmu_idx;
+	uint_t		mmu_ncpus;
+	cpuset_t	mmu_cpuset;
+	kstat_t		*mmu_kstat;
+	kstat_named_t	mmu_kstat_data[MMU_CTX_NUM_STATS];
+} mmu_ctx_t;
+
+#define	mmu_tsb_exceptions	\
+		mmu_kstat_data[MMU_CTX_TSB_EXCEPTIONS].value.ui64
+#define	mmu_tsb_raise_exception	\
+		mmu_kstat_data[MMU_CTX_TSB_RAISE_EXCEPTION].value.ui64
+#define	mmu_wrap_around		\
+		mmu_kstat_data[MMU_CTX_WRAP_AROUND].value.ui64
+
+extern uint_t		max_mmu_ctxdoms;
+extern mmu_ctx_t	**mmu_ctxs_tbl;
+
+extern void	sfmmu_cpu_init(cpu_t *);
+extern void	sfmmu_cpu_cleanup(cpu_t *);
+
+/*
+ * The following structure is used to get MMU context domain information for
+ * a CPU from the platform.
+ *
+ * mmu_idx
+ *	The MMU context domain index within the global array mmu_ctxs
+ * mmu_nctxs
+ *	The number of context IDs supported in the MMU context domain
+ *	(64K for Rock)
+ */
+typedef struct mmu_ctx_info {
+	uint_t		mmu_idx;
+	uint_t		mmu_nctxs;
+} mmu_ctx_info_t;
+
+#pragma weak plat_cpuid_to_mmu_ctx_info
+
+extern void	plat_cpuid_to_mmu_ctx_info(processorid_t, mmu_ctx_info_t *);
+
+/*
+ * Each address space has an array of sfmmu_ctx_t structures, one structure
+ * per MMU context domain.
+ *
+ * cnum
+ *	The context ID allocated for an address space on an MMU context domain
+ * gnum
+ *	The generation number for the context ID in the MMU context domain.
+ *
+ * This structure needs to be a power-of-two in size.
+ */
+typedef struct sfmmu_ctx {
+	uint64_t	gnum:48;
+	uint64_t	cnum:16;
+} sfmmu_ctx_t;
+
+
+/*
  * The platform dependent hat structure.
  * tte counts should be protected by cas.
  * cpuset is protected by cas.
@@ -281,16 +396,26 @@
 	uchar_t		sfmmu_rmstat;	/* refmod stats refcnt */
 	uchar_t		sfmmu_clrstart;	/* start color bin for page coloring */
 	ushort_t	sfmmu_clrbin;	/* per as phys page coloring bin */
-	short		sfmmu_cnum;	/* context number */
 	ushort_t	sfmmu_flags;	/* flags */
 	struct tsb_info	*sfmmu_tsb;	/* list of per as tsbs */
 	uint64_t	sfmmu_ismblkpa; /* pa of sfmmu_iblkp, or -1 */
+	lock_t		sfmmu_ctx_lock;	/* sync ctx alloc and invalidation */
 	kcondvar_t	sfmmu_tsb_cv;	/* signals TSB swapin or relocation */
 	uchar_t		sfmmu_cext;	/* context page size encoding */
 	uint8_t		sfmmu_pgsz[MMU_PAGE_SIZES];  /* ranking for MMU */
 #ifdef sun4v
 	struct hv_tsb_block sfmmu_hvblock;
 #endif
+	/*
+	 * sfmmu_ctxs is a variable length array of max_mmu_ctxdoms # of
+	 * elements. max_mmu_ctxdoms is determined at run-time.
+	 * sfmmu_ctxs[1] is just the fist element of an array, it always
+	 * has to be the last field to ensure that the memory allocated
+	 * for sfmmu_ctxs is consecutive with the memory of the rest of
+	 * the hat data structure.
+	 */
+	sfmmu_ctx_t	sfmmu_ctxs[1];
+
 };
 
 #define	sfmmu_iblk	h_un.sfmmu_iblkp
@@ -324,28 +449,6 @@
 #define	FLUSH_NECESSARY_CPUS	0
 #define	FLUSH_ALL_CPUS		1
 
-/*
- * Software context structure.  The size of this structure is currently
- * hardwired into the tsb miss handlers in assembly code through the
- * CTX_SZ_SHIFT define.  Since this define is used in a shift we should keep
- * this structure a power of two.
- *
- * ctx_flags:
- * Bit 0 : Free flag.
- */
-struct ctx {
-	union _ctx_un {
-		sfmmu_t *ctx_sfmmup;	/* back pointer to hat id */
-		struct ctx *ctx_freep;	/* next ctx in freelist */
-	} ctx_un;
-	krwlock_t	ctx_rwlock;	/* protect context from stealer */
-	uint32_t	ctx_flags;	/* flags */
-	uint8_t		pad[12];
-};
-
-#define	ctx_sfmmu	ctx_un.ctx_sfmmup
-#define	ctx_free	ctx_un.ctx_freep
-
 #ifdef	DEBUG
 /*
  * For debugging purpose only. Maybe removed later.
@@ -744,6 +847,14 @@
 
 #endif /* !_ASM */
 
+/* Proc Count Project */
+#define	SFMMU_PGCNT_MASK	0x3f
+#define	SFMMU_PGCNT_SHIFT	6
+#define	INVALID_MMU_ID		-1
+#define	SFMMU_MMU_GNUM_RSHIFT	16
+#define	SFMMU_MMU_CNUM_LSHIFT	(64 - SFMMU_MMU_GNUM_RSHIFT)
+#define	MAX_SFMMU_CTX_VAL	((1 << 16) - 1) /* for sanity check */
+#define	MAX_SFMMU_GNUM_VAL	((0x1UL << 48) - 1)
 
 /*
  * The tsb miss handlers written in assembly know that sfmmup
@@ -874,10 +985,10 @@
 
 #define	HME_HASH_FUNCTION(hatid, vaddr, shift)				\
 	((hatid != KHATID)?						\
-	(&uhme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \
-	    UHMEHASH_SZ) ]):					\
-	(&khme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \
-	    KHMEHASH_SZ) ]))
+	(&uhme_hash[ (((uintptr_t)(hatid) ^	\
+	    ((uintptr_t)vaddr >> (shift))) & UHMEHASH_SZ) ]):		\
+	(&khme_hash[ (((uintptr_t)(hatid) ^	\
+	    ((uintptr_t)vaddr >> (shift))) & KHMEHASH_SZ) ]))
 
 /*
  * This macro will traverse a hmeblk hash link list looking for an hme_blk
@@ -953,9 +1064,9 @@
 #define	SFMMU_HASH_LOCK_ISHELD(hmebp)					\
 		(mutex_owned(&hmebp->hmehash_mutex))
 
-#define	SFMMU_XCALL_STATS(ctxnum)					\
+#define	SFMMU_XCALL_STATS(sfmmup)					\
 {									\
-	if (ctxnum == KCONTEXT) {					\
+	if (sfmmup == ksfmmup) {					\
 		SFMMU_STAT(sf_kernel_xcalls);				\
 	} else {							\
 		SFMMU_STAT(sf_user_xcalls);				\
@@ -963,11 +1074,8 @@
 }
 
 #define	astosfmmu(as)		((as)->a_hat)
-#define	sfmmutoctxnum(sfmmup)	((sfmmup)->sfmmu_cnum)
-#define	sfmmutoctx(sfmmup)	(&ctxs[sfmmutoctxnum(sfmmup)])
 #define	hblktosfmmu(hmeblkp)	((sfmmu_t *)(hmeblkp)->hblk_tag.htag_id)
 #define	sfmmutoas(sfmmup)	((sfmmup)->sfmmu_as)
-#define	ctxnumtoctx(ctxnum)	(&ctxs[ctxnum])
 /*
  * We use the sfmmu data structure to keep the per as page coloring info.
  */
@@ -1355,6 +1463,35 @@
 	sethi	%hi(0x1000000), reg
 
 /*
+ * Macro to get hat per-MMU cnum on this CPU.
+ * sfmmu - In, pass in "sfmmup" from the caller.
+ * cnum	- Out, return 'cnum' to the caller
+ * scr	- scratch
+ */
+#define	SFMMU_CPU_CNUM(sfmmu, cnum, scr)				      \
+	CPU_ADDR(scr, cnum);	/* scr = load CPU struct addr */	      \
+	ld	[scr + CPU_MMU_IDX], cnum;	/* cnum = mmuid */	      \
+	add	sfmmu, SFMMU_CTXS, scr;	/* scr = sfmmup->sfmmu_ctxs[] */      \
+	sllx    cnum, SFMMU_MMU_CTX_SHIFT, cnum;			      \
+	add	scr, cnum, scr;		/* scr = sfmmup->sfmmu_ctxs[id] */    \
+	ldx	[scr + SFMMU_MMU_GC_NUM], scr;	/* sfmmu_ctxs[id].gcnum */    \
+	sllx    scr, SFMMU_MMU_CNUM_LSHIFT, scr;			      \
+	srlx    scr, SFMMU_MMU_CNUM_LSHIFT, cnum;	/* cnum = sfmmu cnum */
+
+/*
+ * Macro to get hat gnum & cnum assocaited with sfmmu_ctx[mmuid] entry
+ * entry - In,  pass in (&sfmmu_ctxs[mmuid] - SFMMU_CTXS) from the caller.
+ * gnum - Out, return sfmmu gnum
+ * cnum - Out, return sfmmu cnum
+ * reg	- scratch
+ */
+#define	SFMMU_MMUID_GNUM_CNUM(entry, gnum, cnum, reg)			     \
+	ldx	[entry + SFMMU_CTXS], reg;  /* reg = sfmmu (gnum | cnum) */  \
+	srlx	reg, SFMMU_MMU_GNUM_RSHIFT, gnum;    /* gnum = sfmmu gnum */ \
+	sllx	reg, SFMMU_MMU_CNUM_LSHIFT, cnum;			     \
+	srlx	cnum, SFMMU_MMU_CNUM_LSHIFT, cnum;   /* cnum = sfmmu cnum */
+
+/*
  * Macro to get this CPU's tsbmiss area.
  */
 #define	CPU_TSBMISS_AREA(tsbmiss, tmp1)					\
@@ -1408,6 +1545,65 @@
 
 #endif
 
+/*
+ * Macro to setup arguments with kernel sfmmup context + page size before
+ * calling sfmmu_setctx_sec()
+ */
+#ifdef sun4v
+#define	SET_KAS_CTXSEC_ARGS(sfmmup, arg0, arg1)			\
+	set	KCONTEXT, arg0;					\
+	set	0, arg1;
+#else
+#define	SET_KAS_CTXSEC_ARGS(sfmmup, arg0, arg1)			\
+	ldub	[sfmmup + SFMMU_CEXT], arg1;			\
+	set	KCONTEXT, arg0;					\
+	sll	arg1, CTXREG_EXT_SHIFT, arg1;
+#endif
+
+#define	PANIC_IF_INTR_DISABLED_PSTR(pstatereg, label, scr)	       	\
+	andcc	pstatereg, PSTATE_IE, %g0;	/* panic if intrs */	\
+/*CSTYLED*/								\
+	bnz,pt	%icc, label;			/* already disabled */	\
+	nop;								\
+									\
+	sethi	%hi(panicstr), scr;					\
+	ldx	[scr + %lo(panicstr)], scr;				\
+	tst	scr;							\
+/*CSTYLED*/								\
+	bnz,pt	%xcc, label;						\
+	nop;								\
+									\
+	save	%sp, -SA(MINFRAME), %sp;				\
+	sethi	%hi(sfmmu_panic1), %o0;					\
+	call	panic;							\
+	or	%o0, %lo(sfmmu_panic1), %o0;				\
+/*CSTYLED*/								\
+label:
+
+#define	PANIC_IF_INTR_ENABLED_PSTR(label, scr)				\
+	/*								\
+	 * The caller must have disabled interrupts.			\
+	 * If interrupts are not disabled, panic			\
+	 */								\
+	rdpr	%pstate, scr;						\
+	andcc	scr, PSTATE_IE, %g0;					\
+/*CSTYLED*/								\
+	bz,pt	%icc, label;						\
+	nop;								\
+									\
+	sethi	%hi(panicstr), scr;					\
+	ldx	[scr + %lo(panicstr)], scr;				\
+	tst	scr;							\
+/*CSTYLED*/								\
+	bnz,pt	%xcc, label;						\
+	nop;								\
+									\
+	sethi	%hi(sfmmu_panic6), %o0;					\
+	call	panic;							\
+	or	%o0, %lo(sfmmu_panic6), %o0;				\
+/*CSTYLED*/								\
+label:
+
 #endif	/* _ASM */
 
 #ifndef _ASM
@@ -1503,11 +1699,10 @@
 extern void	sfmmu_load_tsbe(struct tsbe *, uint64_t, tte_t *, int);
 extern void	sfmmu_unload_tsbe(struct tsbe *, uint64_t, int);
 extern void	sfmmu_load_mmustate(sfmmu_t *);
-extern void	sfmmu_ctx_steal_tl1(uint64_t, uint64_t);
 extern void	sfmmu_raise_tsb_exception(uint64_t, uint64_t);
 #ifndef sun4v
-extern void	sfmmu_itlb_ld(caddr_t, int, tte_t *);
-extern void	sfmmu_dtlb_ld(caddr_t, int, tte_t *);
+extern void	sfmmu_itlb_ld_kva(caddr_t, tte_t *);
+extern void	sfmmu_dtlb_ld_kva(caddr_t, tte_t *);
 #endif /* sun4v */
 extern void	sfmmu_copytte(tte_t *, tte_t *);
 extern int	sfmmu_modifytte(tte_t *, tte_t *, tte_t *);
@@ -1517,7 +1712,8 @@
 			struct hme_blk *, uint64_t, struct hme_blk *);
 extern void	sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
 			uint64_t);
-
+extern uint_t	sfmmu_disable_intrs(void);
+extern void	sfmmu_enable_intrs(uint_t);
 /*
  * functions exported to machine dependent VM code
  */
@@ -1549,7 +1745,7 @@
 extern pgcnt_t  sfmmu_tte_cnt(sfmmu_t *, uint_t);
 extern void	*sfmmu_tsb_segkmem_alloc(vmem_t *, size_t, int);
 extern void	sfmmu_tsb_segkmem_free(vmem_t *, void *, size_t);
-extern void	sfmmu_steal_context(sfmmu_t *, uint8_t *);
+extern void	sfmmu_reprog_pgsz_arr(sfmmu_t *, uint8_t *);
 
 extern void	hat_kern_setup(void);
 extern int	hat_page_relocate(page_t **, page_t **, spgcnt_t *);
@@ -1557,6 +1753,7 @@
 extern int	sfmmu_get_ppvcolor(struct page *);
 extern int	sfmmu_get_addrvcolor(caddr_t);
 extern int	sfmmu_hat_lock_held(sfmmu_t *);
+extern void	sfmmu_alloc_ctx(sfmmu_t *, int, struct cpu *);
 
 /*
  * Functions exported to xhat_sfmmu.c
@@ -1580,8 +1777,6 @@
 extern void mmu_check_page_sizes(sfmmu_t *, uint64_t *);
 
 extern sfmmu_t 		*ksfmmup;
-extern struct ctx	*ctxs;
-extern uint_t		nctxs;
 extern caddr_t		ktsb_base;
 extern uint64_t		ktsb_pbase;
 extern int		ktsb_sz;
@@ -1700,10 +1895,6 @@
 
 	int		sf_swapout;		/* # times hat swapped out */
 
-	int		sf_ctxfree;		/* ctx alloc from free list */
-	int		sf_ctxdirty;		/* ctx alloc from dirty list */
-	int		sf_ctxsteal;		/* ctx allocated by steal */
-
 	int		sf_tsb_alloc;		/* # TSB allocations */
 	int		sf_tsb_allocfail;	/* # times TSB alloc fail */
 	int		sf_tsb_sectsb_create;	/* # times second TSB added */
@@ -1756,10 +1947,7 @@
 
 	int		sf_user_vtop;		/* # of user vatopfn calls */
 
-	int		sf_ctx_swap;		/* # times switched MMU ctxs */
-	int		sf_tlbflush_all;	/* # times flush all TLBs */
-	int		sf_tlbflush_ctx;	/* # times flush TLB ctx */
-	int		sf_tlbflush_deferred;	/* # times !flush ctx imm. */
+	int		sf_ctx_inv;		/* #times invalidate MMU ctx */
 
 	int		sf_tlb_reprog_pgsz;	/* # times switch TLB pgsz */
 };
@@ -1787,9 +1975,11 @@
 	int	sf_kmod_faults;		/* # of mod (prot viol) flts */
 };
 
-#define	SFMMU_STAT(stat)		sfmmu_global_stat.stat++;
-#define	SFMMU_STAT_ADD(stat, amount)	sfmmu_global_stat.stat += amount;
-#define	SFMMU_STAT_SET(stat, count)	sfmmu_global_stat.stat = count;
+#define	SFMMU_STAT(stat)		sfmmu_global_stat.stat++
+#define	SFMMU_STAT_ADD(stat, amount)	sfmmu_global_stat.stat += (amount)
+#define	SFMMU_STAT_SET(stat, count)	sfmmu_global_stat.stat = (count)
+
+#define	SFMMU_MMU_STAT(stat)		CPU->cpu_m.cpu_mmu_ctxp->stat++
 
 #endif /* !_ASM */
 
--- a/usr/src/uts/sun4/cpu/cpu_module.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4/cpu/cpu_module.c	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -45,7 +44,7 @@
 uint_t adj_shift;
 hrtime_t hrtime_base;
 int traptrace_use_stick;
-uchar_t *ctx_pgsz_array;
+uint_t cpu_impl_dual_pgsz;
 
 void
 cpu_setup(void)
@@ -57,31 +56,25 @@
 
 /*ARGSUSED*/
 void
-vtag_flushpage(caddr_t addr, uint_t ctx)
-{}
-
-/*ARGSUSED*/
-void
-vtag_flushctx(uint_t ctx)
+vtag_flushpage(caddr_t addr, uint64_t sfmmup)
 {}
 
 void
 vtag_flushall(void)
 {}
 
-/*ARGSUSED*/
 void
-vtag_flushpage_tl1(uint64_t addr, uint64_t ctx)
+vtag_flushall_uctxs(void)
 {}
 
 /*ARGSUSED*/
 void
-vtag_flush_pgcnt_tl1(uint64_t addr, uint64_t ctx_pgcnt)
+vtag_flushpage_tl1(uint64_t addr, uint64_t sfmmup)
 {}
 
 /*ARGSUSED*/
 void
-vtag_flushctx_tl1(uint64_t ctx, uint64_t dummy)
+vtag_flush_pgcnt_tl1(uint64_t addr, uint64_t sfmmup_pgcnt)
 {}
 
 /*ARGSUSED*/
--- a/usr/src/uts/sun4/io/trapstat.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4/io/trapstat.c	Tue Jun 20 07:21:09 2006 -0700
@@ -577,12 +577,12 @@
 		if (i < TSTAT_INSTR_PAGES) {
 			tte.tte_intlo = TTE_PFN_INTLO(tcpu->tcpu_pfn[i]) |
 				TTE_LCK_INT | TTE_CP_INT | TTE_PRIV_INT;
-			sfmmu_itlb_ld(va, KCONTEXT, &tte);
+			sfmmu_itlb_ld_kva(va, &tte);
 		} else {
 			tte.tte_intlo = TTE_PFN_INTLO(tcpu->tcpu_pfn[i]) |
 				TTE_LCK_INT | TTE_CP_INT | TTE_CV_INT |
 				TTE_PRIV_INT | TTE_HWWR_INT;
-			sfmmu_dtlb_ld(va, KCONTEXT, &tte);
+			sfmmu_dtlb_ld_kva(va, &tte);
 		}
 	}
 #else /* sun4v */
@@ -1559,7 +1559,8 @@
 	vmem_free(tstat_arena, tcpu->tcpu_data, tstat_data_size);
 
 	for (i = 0; i < tstat_total_pages; i++, va += MMU_PAGESIZE) {
-		xt_one(cpu, vtag_flushpage_tl1, (uint64_t)va, KCONTEXT);
+		xt_one(cpu, vtag_flushpage_tl1, (uint64_t)va,
+		    (uint64_t)ksfmmup);
 	}
 #else
 	xt_one(cpu, vtag_unmap_perm_tl1, (uint64_t)va, KCONTEXT);
--- a/usr/src/uts/sun4/ml/offsets.in	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4/ml/offsets.in	Tue Jun 20 07:21:09 2006 -0700
@@ -1,13 +1,12 @@
 \ offsets.in: input file to produce assym.h using the stabs program
-\ Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+\ Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
 \ Use is subject to license terms.
 \
 \ CDDL HEADER START
 \
 \ The contents of this file are subject to the terms of the
-\ Common Development and Distribution License, Version 1.0 only
-\ (the "License").  You may not use this file except in compliance
-\ with the License.
+\ Common Development and Distribution License (the "License").
+\ You may not use this file except in compliance with the License.
 \
 \ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
 \ or http://www.opensolaris.org/os/licensing.
@@ -226,6 +225,14 @@
 	hblk_hme	HMEBLK_HME1
 	hblk_nextpa	HMEBLK_NEXTPA
 
+mmu_ctx MMU_CTX_SIZE
+	mmu_gnum	MMU_CTX_GNUM
+	mmu_cnum	MMU_CTX_CNUM
+	mmu_nctxs	MMU_CTX_NCTXS
+
+sfmmu_ctx	SFMMU_MMU_CTX_SIZE SFMMU_MMU_CTX_SHIFT
+	gnum		SFMMU_MMU_GC_NUM
+
 user	USIZEBYTES
 	u_comm
 	u_signal
@@ -366,6 +373,8 @@
 	cpu_m.tmp2			CPU_TMP2
 	cpu_m.mpcb			CPU_MPCB
 	cpu_m.cpu_private		CPU_PRIVATE
+	cpu_m.cpu_mmu_idx		CPU_MMU_IDX
+	cpu_m.cpu_mmu_ctxp            	CPU_MMU_CTXP
 	cpu_m.ptl1_state		CPU_PTL1
 
 cpu_core_t	CPU_CORE_SIZE	CPU_CORE_SHIFT
--- a/usr/src/uts/sun4/ml/swtch.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4/ml/swtch.s	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -185,7 +184,6 @@
 	!
 	! IMPORTANT: Registers at this point must be:
 	!	%i0 = new thread
-	!	%i1 = flag (non-zero if unpinning from an interrupt thread)
 	!	%i1 = cpu pointer
 	!	%i2 = old proc pointer
 	!	%i3 = new proc pointer
@@ -200,21 +198,21 @@
 	cmp 	%i2, %i3		! resuming the same process?
 	be,pt	%xcc, 5f		! yes.
 	  nop
+
 	ldx	[%i3 + P_AS], %o0	! load p->p_as
-	ldx	[%o0 + A_HAT], %o3	! load (p->p_as)->a_hat
-	! %o3 is live until the call to sfmmu_setctx_sec below
+	ldx	[%o0 + A_HAT], %i5	! %i5 = new proc hat
 
 	!
 	! update cpusran field
 	!
 	ld	[%i1 + CPU_ID], %o4
-	add	%o3, SFMMU_CPUSRAN, %o5
+	add	%i5, SFMMU_CPUSRAN, %o5
 	CPU_INDEXTOSET(%o5, %o4, %g1)
-	ldx	[%o5], %o2		! o2 = cpusran field
+	ldx	[%o5], %o2		! %o2 = cpusran field
 	mov	1, %g2
-	sllx	%g2, %o4, %o4		! o4 = bit for this cpu
+	sllx	%g2, %o4, %o4		! %o4 = bit for this cpu
 	andcc	%o4, %o2, %g0
-	bnz,pn	%xcc, 4f
+	bnz,pn	%xcc, 0f		! bit already set, go to 0
 	  nop
 3:
 	or	%o2, %o4, %o1		! or in this cpu's bit mask
@@ -224,20 +222,52 @@
 	  ldx	[%o5], %o2		! o2 = cpusran field
 	membar	#LoadLoad|#StoreLoad
 
+0:
+	! 
+	! disable interrupts
 	!
-	! Switch to different address space.
+	! if resume from user to kernel thread
+	!	call sfmmu_setctx_sec
+	! if resume from kernel (or a different user) thread to user thread
+	!	call sfmmu_alloc_ctx
+	! sfmmu_load_mmustate
 	!
-4:
+	! enable interrupts
+	!
+	! %i5 = new proc hat
+	!
+
+	sethi	%hi(ksfmmup), %o2
+        ldx	[%o2 + %lo(ksfmmup)], %o2
+
 	rdpr	%pstate, %i4
-	wrpr	%i4, PSTATE_IE, %pstate		! disable interrupts
+        cmp	%i5, %o2		! new proc hat == ksfmmup ?
+	bne,pt	%xcc, 3f		! new proc is not kernel as, go to 3
+	  wrpr	%i4, PSTATE_IE, %pstate
 
-	call	sfmmu_setctx_sec		! switch to other ctx (maybe 0)
-	  lduh	[%o3 + SFMMU_CNUM], %o0
+	SET_KAS_CTXSEC_ARGS(%i5, %o0, %o1)
+
+	! new proc is kernel as
+
+	call	sfmmu_setctx_sec		! switch to kernel context
+	  or	%o0, %o1, %o0
+
+	ba,a,pt	%icc, 4f
+	
+	!
+	! Switch to user address space.
+	!
+3:
+	mov	%i5, %o0			! %o0 = sfmmup
+	mov	%i1, %o2			! %o2 = CPU
+	call	sfmmu_alloc_ctx
+	  mov	%g0, %o1			! %o1 = allocate flag = 0
+4:
 	call	sfmmu_load_mmustate		! program MMU registers
-	  mov	%o3, %o0
+	  mov	%i5, %o0
+	
+	wrpr	%g0, %i4, %pstate		! enable interrupts
 
-	wrpr	%g0, %i4, %pstate		! enable interrupts
-	
 5:
 	!
 	! spin until dispatched thread's mutex has
--- a/usr/src/uts/sun4/os/mp_startup.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4/os/mp_startup.c	Tue Jun 20 07:21:09 2006 -0700
@@ -431,6 +431,12 @@
 	chip_cpu_init(cp);
 
 	cpu_intrq_setup(cp);
+
+	/*
+	 * Initialize MMU context domain information.
+	 */
+	sfmmu_cpu_init(cp);
+
 }
 
 /*
@@ -522,6 +528,7 @@
 	 */
 	disp_cpu_fini(cp);
 	cpu_pa[cpuid] = 0;
+	sfmmu_cpu_cleanup(cp);
 	bzero(cp, sizeof (*cp));
 
 	/*
--- a/usr/src/uts/sun4/os/startup.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4/os/startup.c	Tue Jun 20 07:21:09 2006 -0700
@@ -64,6 +64,7 @@
 #include <sys/traptrace.h>
 #include <sys/memnode.h>
 #include <sys/mem_cage.h>
+#include <sys/mmu.h>
 
 extern void setup_trap_table(void);
 extern void cpu_intrq_setup(struct cpu *);
@@ -2684,10 +2685,8 @@
 	"h# %p constant khme_hash "
 	"h# %x constant UHMEHASH_SZ "
 	"h# %x constant KHMEHASH_SZ "
+	"h# %p constant KCONTEXT "
 	"h# %p constant KHATID "
-	"h# %x constant CTX_SIZE "
-	"h# %x constant CTX_SFMMU "
-	"h# %p constant ctxs "
 	"h# %x constant ASI_MEM "
 
 	": PHYS-X@ ( phys -- data ) "
@@ -2754,11 +2753,6 @@
 	"   until r> drop 						  "
 	"; "
 
-	": CNUM_TO_SFMMUP ( cnum -- sfmmup ) "
-	"   CTX_SIZE * ctxs + CTX_SFMMU + "
-	"x@ "
-	"; "
-
 	": HME_HASH_TAG ( sfmmup rehash addr -- hblktag ) "
 	"   over HME_HASH_SHIFT HME_HASH_BSPAGE      ( sfmmup rehash bspage ) "
 	"   HTAG_REHASHSZ << or nip		     ( hblktag ) "
@@ -2776,7 +2770,12 @@
 	"; "
 
 	": unix-tte ( addr cnum -- false | tte-data true ) "
-	"      CNUM_TO_SFMMUP                 ( addr sfmmup ) "
+	"    KCONTEXT = if                   ( addr ) "
+	"	KHATID                       ( addr khatid ) "
+	"    else                            ( addr ) "
+	"       drop false exit              ( false ) "
+	"    then "
+	"      ( addr khatid ) "
 	"      mmu_hashcnt 1+ 1  do           ( addr sfmmup ) "
 	"         2dup swap i HME_HASH_SHIFT  "
 					"( addr sfmmup sfmmup addr hmeshift ) "
@@ -2835,10 +2834,8 @@
 	    (caddr_t)va_to_pa((caddr_t)khme_hash),
 	    UHMEHASH_SZ,
 	    KHMEHASH_SZ,
+	    KCONTEXT,
 	    KHATID,
-	    sizeof (struct ctx),
-	    OFFSET(struct ctx, ctx_sfmmu),
-	    ctxs,
 	    ASI_MEM);
 	prom_interpret(bp, 0, 0, 0, 0, 0);
 
--- a/usr/src/uts/sun4/vm/sfmmu.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4/vm/sfmmu.c	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -773,7 +772,6 @@
 int
 ndata_alloc_hat(struct memlist *ndata, pgcnt_t npages, pgcnt_t kpm_npages)
 {
-	size_t 	ctx_sz;
 	size_t	mml_alloc_sz;
 	size_t	cb_alloc_sz;
 	int	max_nucuhme_buckets = MAX_NUCUHME_BUCKETS;
@@ -790,20 +788,6 @@
 	}
 
 	/*
-	 * Allocate ctx structures
-	 *
-	 * based on v_proc to calculate how many ctx structures
-	 * is not possible;
-	 * use whatever module_setup() assigned to nctxs
-	 */
-	PRM_DEBUG(nctxs);
-	ctx_sz = nctxs * sizeof (struct ctx);
-	if ((ctxs = ndata_alloc(ndata, ctx_sz, sizeof (struct ctx))) == NULL)
-		return (-1);
-
-	PRM_DEBUG(ctxs);
-
-	/*
 	 * The number of buckets in the hme hash tables
 	 * is a power of 2 such that the average hash chain length is
 	 * HMENT_HASHAVELEN.  The number of buckets for the user hash is
--- a/usr/src/uts/sun4u/cpu/opl_olympus.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/cpu/opl_olympus.c	Tue Jun 20 07:21:09 2006 -0700
@@ -77,14 +77,7 @@
  */
 static int opl_async_check_interval = 60;		/* 1 min */
 
-/*
- * Maximum number of contexts for Olympus-C.
- */
-#define	MAX_NCTXS	(1 << 13)
-
-/* Will be set !NULL for SPARC64-VI and derivatives. */
-static uchar_t ctx_pgsz_arr[MAX_NCTXS];
-uchar_t *ctx_pgsz_array = ctx_pgsz_arr;
+uint_t cpu_impl_dual_pgsz = 1;
 
 /*
  * PA[22:0] represent Displacement in Jupiter
@@ -529,16 +522,6 @@
 	at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1 | EF_SPARC_SUN_US3;
 
 	/*
-	 * Use the maximum number of contexts available for SPARC64-VI
-	 * unless it has been tuned for debugging.
-	 * We are checking against 0 here since this value can be patched
-	 * while booting.  It can not be patched via /etc/system since it
-	 * will be patched too late and thus cause the system to panic.
-	 */
-	if (nctxs == 0)
-		nctxs = MAX_NCTXS;
-
-	/*
 	 * Due to the number of entries in the fully-associative tlb
 	 * this may have to be tuned lower than in spitfire.
 	 */
@@ -957,9 +940,18 @@
 	ASSERT(pgsz1 < mmu_page_sizes);
 	new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0);
 	if (hat->sfmmu_cext != new_cext) {
+#ifdef DEBUG
+		int i;
+		/*
+		 * assert cnum should be invalid, this is because pagesize
+		 * can only be changed after a proc's ctxs are invalidated.
+		 */
+		for (i = 0; i < max_mmu_ctxdoms; i++) {
+			ASSERT(hat->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
+		}
+#endif /* DEBUG */
 		hat->sfmmu_cext = new_cext;
 	}
-	ctx_pgsz_array[hat->sfmmu_cnum] = hat->sfmmu_cext;
 	/*
 	 * sfmmu_setctx_sec() will take care of the
 	 * rest of the dirty work for us.
--- a/usr/src/uts/sun4u/cpu/opl_olympus_asm.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/cpu/opl_olympus_asm.s	Tue Jun 20 07:21:09 2006 -0700
@@ -73,7 +73,7 @@
 
 /* ARGSUSED */
 void
-vtag_flushpage(caddr_t vaddr, u_int ctxnum)
+vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
 {}
 
 #else	/* lint */
@@ -83,20 +83,11 @@
 	 * flush page from the tlb
 	 *
 	 * %o0 = vaddr
-	 * %o1 = ctxnum
+	 * %o1 = sfmmup
 	 */
 	rdpr	%pstate, %o5
 #ifdef DEBUG
-	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,a,pt %icc, 3f			/* disabled, panic	 */
-	  nop
-	save	%sp, -SA(MINFRAME), %sp
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	  or	%o0, %lo(sfmmu_panic1), %o0
-	ret
-	restore
-3:
+	PANIC_IF_INTR_DISABLED_PSTR(%o5, opl_di_l3, %g1)
 #endif /* DEBUG */
 	/*
 	 * disable ints
@@ -109,41 +100,45 @@
 	 * Interrupts are disabled to prevent the primary ctx register
 	 * from changing underneath us.
 	 */
-	brnz,pt	%o1, 1f			/* KCONTEXT? */
-	sethi	%hi(FLUSH_ADDR), %o3
+	sethi   %hi(ksfmmup), %o3
+        ldx     [%o3 + %lo(ksfmmup)], %o3
+        cmp     %o3, %o1
+        bne,pt   %xcc, 1f			! if not kernel as, go to 1
+	  sethi	%hi(FLUSH_ADDR), %o3
 	/*
-	 * For KCONTEXT demaps use primary. type = page implicitly
+	 * For Kernel demaps use primary. type = page implicitly
 	 */
 	stxa	%g0, [%o0]ASI_DTLB_DEMAP	/* dmmu flush for KCONTEXT */
 	stxa	%g0, [%o0]ASI_ITLB_DEMAP	/* immu flush for KCONTEXT */
 	flush	%o3
-	b	5f
-	nop
+	retl
+	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
 1:
 	/*
 	 * User demap.  We need to set the primary context properly.
 	 * Secondary context cannot be used for SPARC64-VI IMMU.
 	 * %o0 = vaddr
-	 * %o1 = ctxnum
+	 * %o1 = sfmmup
 	 * %o3 = FLUSH_ADDR
 	 */
-	sethi	%hi(ctx_pgsz_array), %o4
-	ldn	[%o4 + %lo(ctx_pgsz_array)], %o4
-	ldub	[%o4 + %o1], %o4
+	SFMMU_CPU_CNUM(%o1, %g1, %g2)		! %g1 = sfmmu cnum on this CPU
+	
+	ldub	[%o1 + SFMMU_CEXT], %o4		! %o4 = sfmmup->sfmmu_cext
 	sll	%o4, CTXREG_EXT_SHIFT, %o4
-	or	%o1, %o4, %o1
+	or	%g1, %o4, %g1			! %g1 = pgsz | cnum
+
 	wrpr	%g0, 1, %tl
 	set	MMU_PCONTEXT, %o4
 	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
-	ldxa	[%o4]ASI_DMMU, %o2		/* rd old ctxnum */
-	stxa	%o1, [%o4]ASI_DMMU		/* wr new ctxum */
-4:
+	ldxa	[%o4]ASI_DMMU, %o2		! %o2 = save old ctxnum
+	stxa	%g1, [%o4]ASI_DMMU		! wr new ctxum 
+
 	stxa	%g0, [%o0]ASI_DTLB_DEMAP
 	stxa	%g0, [%o0]ASI_ITLB_DEMAP
 	stxa	%o2, [%o4]ASI_DMMU		/* restore old ctxnum */
 	flush	%o3
 	wrpr	%g0, 0, %tl
-5:
+
 	retl
 	wrpr	%g0, %o5, %pstate		/* enable interrupts */
 	SET_SIZE(vtag_flushpage)
@@ -153,61 +148,6 @@
 
 #if defined(lint)
 
-/* ARGSUSED */
-void
-vtag_flushctx(u_int ctxnum)
-{}
-
-#else	/* lint */
-
-	ENTRY_NP(vtag_flushctx)
-	/*
-	 * flush context from the tlb
-	 *
-	 * %o0 = ctxnum
-	 * We disable interrupts to prevent the primary ctx register changing
-	 * underneath us.
-	 */
-	sethi	%hi(FLUSH_ADDR), %o3
-	rdpr	%pstate, %o2
-
-#ifdef DEBUG
-	andcc	%o2, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,a,pt %icc, 1f			/* disabled, panic	 */
-	  nop
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	  or	%o0, %lo(sfmmu_panic1), %o0
-1:
-#endif /* DEBUG */
-
-	sethi	%hi(ctx_pgsz_array), %o4
-	ldn	[%o4 + %lo(ctx_pgsz_array)], %o4
-	ldub	[%o4 + %o0], %o4
-	sll	%o4, CTXREG_EXT_SHIFT, %o4
-	or	%o0, %o4, %o0
-	wrpr	%o2, PSTATE_IE, %pstate		/* disable interrupts */
-	set	MMU_PCONTEXT, %o4
-	set	DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g1
-	wrpr	%g0, 1, %tl
-	ldxa	[%o4]ASI_DMMU, %o5		/* rd old ctxnum */
-	stxa	%o0, [%o4]ASI_DMMU		/* wr new ctxum */
-4:
-	stxa	%g0, [%g1]ASI_DTLB_DEMAP
-	stxa	%g0, [%g1]ASI_ITLB_DEMAP
-	stxa	%o5, [%o4]ASI_DMMU		/* restore old ctxnum */
-	flush	%o3
-	wrpr	%g0, 0, %tl
-5:
-	retl
-	wrpr	%g0, %o2, %pstate		/* enable interrupts */
-	SET_SIZE(vtag_flushctx)
-
-#endif	/* lint */
-
-
-#if defined(lint)
-
 void
 vtag_flushall(void)
 {}
@@ -235,7 +175,7 @@
 
 /* ARGSUSED */
 void
-vtag_flushpage_tl1(uint64_t vaddr, uint64_t ctxnum)
+vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
 {}
 
 #else	/* lint */
@@ -245,13 +185,17 @@
 	 * x-trap to flush page from tlb and tsb
 	 *
 	 * %g1 = vaddr, zero-extended on 32-bit kernel
-	 * %g2 = ctxnum
+	 * %g2 = sfmmup
 	 *
 	 * assumes TSBE_TAG = 0
 	 */
 	srln	%g1, MMU_PAGESHIFT, %g1
-	brnz,pt %g2, 1f					/* KCONTEXT */
-	slln	%g1, MMU_PAGESHIFT, %g1			/* g1 = vaddr */
+		
+	sethi   %hi(ksfmmup), %g3
+        ldx     [%g3 + %lo(ksfmmup)], %g3
+        cmp     %g3, %g2
+        bne,pt	%xcc, 1f                        ! if not kernel as, go to 1
+	  slln	%g1, MMU_PAGESHIFT, %g1		/* g1 = vaddr */
 
 	/* We need to demap in the kernel context */
 	or	DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
@@ -261,15 +205,16 @@
 1:
 	/* We need to demap in a user context */
 	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
-	sethi	%hi(ctx_pgsz_array), %g4
-	ldn	[%g4 + %lo(ctx_pgsz_array)], %g4
-	ldub	[%g4 + %g2], %g4
+
+	SFMMU_CPU_CNUM(%g2, %g6, %g3)	! %g6 = sfmmu cnum on this CPU
+	
+	ldub	[%g2 + SFMMU_CEXT], %g4		! %g4 = sfmmup->cext
 	sll	%g4, CTXREG_EXT_SHIFT, %g4
-	or	%g2, %g4, %g2
+	or	%g6, %g4, %g6			! %g6 = pgsz | cnum
 
 	set	MMU_PCONTEXT, %g4
 	ldxa	[%g4]ASI_DMMU, %g5		/* rd old ctxnum */
-	stxa	%g2, [%g4]ASI_DMMU		/* wr new ctxum */
+	stxa	%g6, [%g4]ASI_DMMU		/* wr new ctxum */
 	stxa	%g0, [%g1]ASI_DTLB_DEMAP
 	stxa	%g0, [%g1]ASI_ITLB_DEMAP
 	stxa	%g5, [%g4]ASI_DMMU		/* restore old ctxnum */
@@ -278,11 +223,12 @@
 
 #endif	/* lint */
 
+
 #if defined(lint)
 
 /* ARGSUSED */
 void
-vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t ctx_pgcnt)
+vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
 {}
 
 #else	/* lint */
@@ -292,7 +238,7 @@
 	 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
 	 *
 	 * %g1 = vaddr, zero-extended on 32-bit kernel
-	 * %g2 = <zero32|ctx16|pgcnt16>
+	 * %g2 = <sfmmup58|pgcnt6>
 	 *
 	 * NOTE: this handler relies on the fact that no
 	 *	interrupts or traps can occur during the loop
@@ -302,45 +248,63 @@
 	 *
 	 * assumes TSBE_TAG = 0
 	 */
-	set	0xffff, %g4
-	and	%g4, %g2, %g3			/* g3 = pgcnt */
-	srln	%g2, 16, %g2			/* g2 = ctxnum */
+	set	SFMMU_PGCNT_MASK, %g4
+	and	%g4, %g2, %g3			/* g3 = pgcnt - 1 */
+	add	%g3, 1, %g3			/* g3 = pgcnt */
+
+	andn	%g2, SFMMU_PGCNT_MASK, %g2	/* g2 = sfmmup */
 	srln	%g1, MMU_PAGESHIFT, %g1
-	brnz,pt	%g2, 1f				/* KCONTEXT? */
+
+	sethi   %hi(ksfmmup), %g4
+        ldx     [%g4 + %lo(ksfmmup)], %g4
+        cmp     %g4, %g2
+        bne,pn   %xcc, 1f			/* if not kernel as, go to 1 */
 	  slln	%g1, MMU_PAGESHIFT, %g1		/* g1 = vaddr */
 
 	/* We need to demap in the kernel context */
 	or	DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
 	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
+	sethi   %hi(FLUSH_ADDR), %g5
 4:
 	stxa	%g0, [%g1]ASI_DTLB_DEMAP
 	stxa	%g0, [%g1]ASI_ITLB_DEMAP
+	flush	%g5				! flush required by immu
+
 	deccc	%g3				/* decr pgcnt */
 	bnz,pt	%icc,4b
 	  add	%g1, %g2, %g1			/* next page */
 	retry
 1:
-	/* We need to demap in a user context */
-	sethi	%hi(ctx_pgsz_array), %g4
-	ldn	[%g4 + %lo(ctx_pgsz_array)], %g4
+	/*
+	 * We need to demap in a user context
+	 *
+	 * g2 = sfmmup
+	 * g3 = pgcnt
+	 */
+	SFMMU_CPU_CNUM(%g2, %g5, %g6)		! %g5 = sfmmu cnum on this CPU
+		
 	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
-	ldub	[%g4 + %g2], %g4
+
+	ldub	[%g2 + SFMMU_CEXT], %g4		! %g4 = sfmmup->cext
 	sll	%g4, CTXREG_EXT_SHIFT, %g4
-	or	%g2, %g4, %g2
+	or	%g5, %g4, %g5
 
 	set	MMU_PCONTEXT, %g4
-	ldxa	[%g4]ASI_DMMU, %g5		/* rd old ctxnum */
-	stxa	%g2, [%g4]ASI_DMMU		/* wr new ctxum */
+	ldxa	[%g4]ASI_DMMU, %g6		/* rd old ctxnum */
+	stxa	%g5, [%g4]ASI_DMMU		/* wr new ctxum */
 
 	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
+	sethi   %hi(FLUSH_ADDR), %g5
 3:
 	stxa	%g0, [%g1]ASI_DTLB_DEMAP
 	stxa	%g0, [%g1]ASI_ITLB_DEMAP
+	flush	%g5				! flush required by immu
+
 	deccc	%g3				/* decr pgcnt */
 	bnz,pt	%icc,3b
 	  add	%g1, %g2, %g1			/* next page */
 
-	stxa	%g5, [%g4]ASI_DMMU		/* restore old ctxnum */
+	stxa	%g6, [%g4]ASI_DMMU		/* restore old ctxnum */
 	retry
 	SET_SIZE(vtag_flush_pgcnt_tl1)
 
@@ -349,39 +313,6 @@
 
 #if defined(lint)
 
-/* ARGSUSED */
-void
-vtag_flushctx_tl1(uint64_t ctxnum, uint64_t dummy)
-{}
-
-#else	/* lint */
-
-	ENTRY_NP(vtag_flushctx_tl1)
-	/*
-	 * x-trap to flush context from tlb
-	 *
-	 * %g1 = ctxnum
-	 */
-	sethi	%hi(ctx_pgsz_array), %g4
-	ldn	[%g4 + %lo(ctx_pgsz_array)], %g4
-	ldub	[%g4 + %g1], %g4
-	sll	%g4, CTXREG_EXT_SHIFT, %g4
-	or	%g1, %g4, %g1
-	set	DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g4
-	set	MMU_PCONTEXT, %g3
-	ldxa	[%g3]ASI_DMMU, %g5		/* rd old ctxnum */
-	stxa	%g1, [%g3]ASI_DMMU		/* wr new ctxum */
-	stxa	%g0, [%g4]ASI_DTLB_DEMAP
-	stxa	%g0, [%g4]ASI_ITLB_DEMAP
-	stxa	%g5, [%g3]ASI_DMMU		/* restore old ctxnum */
-	retry
-	SET_SIZE(vtag_flushctx_tl1)
-
-#endif	/* lint */
-
-
-#if defined(lint)
-
 /*ARGSUSED*/
 void
 vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
--- a/usr/src/uts/sun4u/cpu/spitfire.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/cpu/spitfire.c	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -63,7 +62,7 @@
 #include <sys/dtrace.h>
 #include <sys/errclassify.h>
 
-uchar_t	*ctx_pgsz_array = NULL;
+uint_t	cpu_impl_dual_pgsz = 0;
 
 /*
  * Structure for the 8 byte ecache data dump and the associated AFSR state.
@@ -407,11 +406,6 @@
 #define	UDB_FMTSTR	"\020\012UE\011CE"
 
 /*
- * Maximum number of contexts for Spitfire.
- */
-#define	MAX_NCTXS	(1 << 13)
-
-/*
  * Save the cache bootup state for use when internal
  * caches are to be re-enabled after an error occurs.
  */
@@ -457,16 +451,6 @@
 	 */
 	cache_boot_state = get_lsu() & (LSU_IC | LSU_DC);
 
-	/*
-	 * Use the maximum number of contexts available for Spitfire unless
-	 * it has been tuned for debugging.
-	 * We are checking against 0 here since this value can be patched
-	 * while booting.  It can not be patched via /etc/system since it
-	 * will be patched too late and thus cause the system to panic.
-	 */
-	if (nctxs == 0)
-		nctxs = MAX_NCTXS;
-
 	if (use_page_coloring) {
 		do_pg_coloring = 1;
 		if (use_virtual_coloring)
--- a/usr/src/uts/sun4u/cpu/spitfire_asm.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/cpu/spitfire_asm.s	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -195,6 +194,119 @@
 	sub	tmp, linesize, tmp;					\
 1:
 
+#ifdef SF_ERRATA_32
+#define SF_WORKAROUND(tmp1, tmp2)                               \
+        sethi   %hi(FLUSH_ADDR), tmp2                           ;\
+        set     MMU_PCONTEXT, tmp1                              ;\
+        stxa    %g0, [tmp1]ASI_DMMU                             ;\
+        flush   tmp2                                            ;
+#else
+#define SF_WORKAROUND(tmp1, tmp2)
+#endif /* SF_ERRATA_32 */
+
+/*
+ * arg1 = vaddr
+ * arg2 = ctxnum
+ *      - disable interrupts and clear address mask
+ *        to access 64 bit physaddr
+ *      - Blow out the TLB, flush user page.
+ *        . use secondary context.
+ */
+#define VTAG_FLUSHUPAGE(lbl, arg1, arg2, tmp1, tmp2, tmp3, tmp4) \
+        rdpr    %pstate, tmp1                                   ;\
+        andn    tmp1, PSTATE_IE, tmp2				;\
+        wrpr    tmp2, 0, %pstate                                ;\
+        sethi   %hi(FLUSH_ADDR), tmp2                           ;\
+        set     MMU_SCONTEXT, tmp3                              ;\
+        ldxa    [tmp3]ASI_DMMU, tmp4                            ;\
+        or      DEMAP_SECOND | DEMAP_PAGE_TYPE, arg1, arg1      ;\
+        cmp     tmp4, arg2                                      ;\
+        be,a,pt %icc, lbl/**/4                                  ;\
+          nop                                                   ;\
+        stxa    arg2, [tmp3]ASI_DMMU                            ;\
+lbl/**/4:                                                       ;\
+        stxa    %g0, [arg1]ASI_DTLB_DEMAP                       ;\
+        stxa    %g0, [arg1]ASI_ITLB_DEMAP                       ;\
+        flush   tmp2                                            ;\
+        be,a,pt %icc, lbl/**/5                                  ;\
+          nop                                                   ;\
+        stxa    tmp4, [tmp3]ASI_DMMU                            ;\
+        flush   tmp2                                            ;\
+lbl/**/5:                                                       ;\
+        wrpr    %g0, tmp1, %pstate
+
+	
+/*
+ * macro that flushes all the user entries in dtlb
+ * arg1 = dtlb entries
+ *	- Before first compare:
+ *              tmp4 = tte
+ *              tmp5 = vaddr
+ *              tmp6 = cntxnum
+ */
+#define DTLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \
+                                tmp4, tmp5, tmp6) \
+lbl/**/0:                                                       ;\
+        sllx    arg1, 3, tmp3                                   ;\
+        SF_WORKAROUND(tmp1, tmp2)                               ;\
+        ldxa    [tmp3]ASI_DTLB_ACCESS, tmp4                     ;\
+        srlx    tmp4, 6, tmp4                                   ;\
+        andcc   tmp4, 1, %g0                                    ;\
+        bnz,pn  %xcc, lbl/**/1                                  ;\
+        srlx    tmp4, 57, tmp4                                  ;\
+        andcc   tmp4, 1, %g0                                    ;\
+        beq,pn  %xcc, lbl/**/1                                  ;\
+          nop                                                   ;\
+        set     TAGREAD_CTX_MASK, tmp1                          ;\
+        ldxa    [tmp3]ASI_DTLB_TAGREAD, tmp2                    ;\
+        and     tmp2, tmp1, tmp6                                ;\
+        andn    tmp2, tmp1, tmp5                                ;\
+	set	KCONTEXT, tmp4					;\
+	cmp	tmp6, tmp4					;\
+	be	lbl/**/1					;\
+	  nop							;\
+        VTAG_FLUSHUPAGE(VD/**/lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\
+lbl/**/1:                                                       ;\
+        brgz,pt arg1, lbl/**/0                                  ;\
+          sub     arg1, 1, arg1
+
+
+/*
+ * macro that flushes all the user entries in itlb	
+ * arg1 = itlb entries
+ *      - Before first compare:
+ *              tmp4 = tte
+ *              tmp5 = vaddr
+ *              tmp6 = cntxnum
+ */
+#define ITLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \
+                                tmp4, tmp5, tmp6) \
+lbl/**/0:                                                       ;\
+        sllx    arg1, 3, tmp3                                   ;\
+        SF_WORKAROUND(tmp1, tmp2)                               ;\
+        ldxa    [tmp3]ASI_ITLB_ACCESS, tmp4                     ;\
+        srlx    tmp4, 6, tmp4                                   ;\
+        andcc   tmp4, 1, %g0                                    ;\
+        bnz,pn  %xcc, lbl/**/1                                  ;\
+        srlx    tmp4, 57, tmp4                                  ;\
+        andcc   tmp4, 1, %g0                                    ;\
+        beq,pn  %xcc, lbl/**/1                                  ;\
+          nop                                                   ;\
+        set     TAGREAD_CTX_MASK, tmp1                          ;\
+        ldxa    [tmp3]ASI_ITLB_TAGREAD, tmp2                    ;\
+        and     tmp2, tmp1, tmp6                                ;\
+        andn    tmp2, tmp1, tmp5                                ;\
+	set	KCONTEXT, tmp4					;\
+	cmp	tmp6, tmp4					;\
+	be	lbl/**/1					;\
+	  nop							;\
+        VTAG_FLUSHUPAGE(VI/**/lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\
+lbl/**/1:                                                       ;\
+        brgz,pt arg1, lbl/**/0                                  ;\
+        sub     arg1, 1, arg1
+
+
+	
 /*
  * Macro for getting to offset from 'cpu_private' ptr. The 'cpu_private'
  * ptr is in the machcpu structure.
@@ -245,32 +357,27 @@
 
 /*ARGSUSED*/
 void
-vtag_flushpage(caddr_t vaddr, uint_t ctxnum)
-{}
-
-/*ARGSUSED*/
-void
-vtag_flushctx(uint_t ctxnum)
+vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
 {}
 
 /*ARGSUSED*/
 void
 vtag_flushall(void)
 {}
-
+	
 /*ARGSUSED*/
 void
-vtag_flushpage_tl1(uint64_t vaddr, uint64_t ctxnum)
+vtag_flushall_uctxs(void)
+{}
+		
+/*ARGSUSED*/
+void
+vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
 {}
 
 /*ARGSUSED*/
 void
-vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t ctx_pgcnt)
-{}
-
-/*ARGSUSED*/
-void
-vtag_flushctx_tl1(uint64_t ctxnum, uint64_t dummy)
+vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
 {}
 
 /*ARGSUSED*/
@@ -341,20 +448,11 @@
 	 * flush page from the tlb
 	 *
 	 * %o0 = vaddr
-	 * %o1 = ctxnum
+	 * %o1 = sfmmup
 	 */
 	rdpr	%pstate, %o5
 #ifdef DEBUG
-	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,a,pt %icc, 3f			/* disabled, panic */
-	nop
-	save	%sp, -SA(MINFRAME), %sp
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	  or	%o0, %lo(sfmmu_panic1), %o0
-	ret
-	restore
-3:
+	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
 #endif /* DEBUG */
 	/*
 	 * disable ints
@@ -367,34 +465,40 @@
 	 * Interrupts are disabled to prevent the secondary ctx register
 	 * from changing underneath us.
 	 */
-	brnz,pt	%o1, 1f			/* KCONTEXT? */
-	sethi	%hi(FLUSH_ADDR), %o3
+	sethi   %hi(ksfmmup), %o3
+        ldx     [%o3 + %lo(ksfmmup)], %o3
+        cmp     %o3, %o1
+        bne,pt   %xcc, 1f			! if not kernel as, go to 1
+	  sethi	%hi(FLUSH_ADDR), %o3
 	/*
 	 * For KCONTEXT demaps use primary. type = page implicitly
 	 */
 	stxa	%g0, [%o0]ASI_DTLB_DEMAP	/* dmmu flush for KCONTEXT */
 	stxa	%g0, [%o0]ASI_ITLB_DEMAP	/* immu flush for KCONTEXT */
+	flush	%o3
 	b	5f
-	  flush	%o3
+	  nop
 1:
 	/*
 	 * User demap.  We need to set the secondary context properly.
 	 * %o0 = vaddr
-	 * %o1 = ctxnum
+	 * %o1 = sfmmup
 	 * %o3 = FLUSH_ADDR
 	 */
+	SFMMU_CPU_CNUM(%o1, %g1, %g2)	/* %g1 = sfmmu cnum on this CPU */
+	
 	set	MMU_SCONTEXT, %o4
 	ldxa	[%o4]ASI_DMMU, %o2		/* rd old ctxnum */
 	or	DEMAP_SECOND | DEMAP_PAGE_TYPE, %o0, %o0
-	cmp	%o2, %o1
-	be,a,pt	%icc, 4f
+	cmp	%o2, %g1
+	be,pt	%icc, 4f
 	  nop
-	stxa	%o1, [%o4]ASI_DMMU		/* wr new ctxum */
+	stxa	%g1, [%o4]ASI_DMMU		/* wr new ctxum */
 4:
 	stxa	%g0, [%o0]ASI_DTLB_DEMAP
 	stxa	%g0, [%o0]ASI_ITLB_DEMAP
 	flush	%o3
-	be,a,pt	%icc, 5f
+	be,pt	%icc, 5f
 	  nop
 	stxa	%o2, [%o4]ASI_DMMU		/* restore old ctxnum */
 	flush	%o3
@@ -402,75 +506,65 @@
 	retl
 	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
 	SET_SIZE(vtag_flushpage)
+	
+        .seg    ".text"
+.flushallmsg:
+        .asciz  "sfmmu_asm: unimplemented flush operation"
 
-	ENTRY_NP(vtag_flushctx)
-	/*
-	 * flush context from the tlb
-	 *
-	 * %o0 = ctxnum
-	 * We disable interrupts to prevent the secondary ctx register changing
-	 * underneath us.
-	 */
-	sethi	%hi(FLUSH_ADDR), %o3
-	set	DEMAP_CTX_TYPE | DEMAP_SECOND, %g1
-	rdpr	%pstate, %o2
-
-#ifdef DEBUG
-	andcc	%o2, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,a,pt %icc, 1f			/* disabled, panic	 */
-	  nop
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	  or	%o0, %lo(sfmmu_panic1), %o0
-1:
-#endif /* DEBUG */
+        ENTRY_NP(vtag_flushall)
+        sethi   %hi(.flushallmsg), %o0
+        call    panic
+          or    %o0, %lo(.flushallmsg), %o0
+        SET_SIZE(vtag_flushall)
 
-	wrpr	%o2, PSTATE_IE, %pstate		/* disable interrupts */
-	set	MMU_SCONTEXT, %o4
-	ldxa	[%o4]ASI_DMMU, %o5		/* rd old ctxnum */
-	cmp	%o5, %o0
-	be,a,pt	%icc, 4f
-	  nop
-	stxa	%o0, [%o4]ASI_DMMU		/* wr new ctxum */
-4:
-	stxa	%g0, [%g1]ASI_DTLB_DEMAP
-	stxa	%g0, [%g1]ASI_ITLB_DEMAP
-	flush	%o3
-	be,a,pt	%icc, 5f
+	ENTRY_NP(vtag_flushall_uctxs)
+	/*
+	 * flush entire DTLB/ITLB.
+	 */
+	CPU_INDEX(%g1, %g2)
+	mulx	%g1, CPU_NODE_SIZE, %g1
+	set	cpunodes, %g2
+	add	%g1, %g2, %g1
+	lduh	[%g1 + ITLB_SIZE], %g2		! %g2 = # entries in ITLB
+	lduh	[%g1 + DTLB_SIZE], %g1		! %g1 = # entries in DTLB
+	sub	%g2, 1, %g2			! %g2 = # entries in ITLB - 1
+	sub	%g1, 1, %g1			! %g1 = # entries in DTLB - 1
+
+        !
+        ! Flush itlb's
+        !
+        ITLB_FLUSH_UNLOCKED_UCTXS(I, %g2, %g3, %g4, %o2, %o3, %o4, %o5)
+
+	!
+        ! Flush dtlb's
+        !
+        DTLB_FLUSH_UNLOCKED_UCTXS(D, %g1, %g3, %g4, %o2, %o3, %o4, %o5)
+
+	membar  #Sync
+	retl
 	  nop
-	stxa	%o5, [%o4]ASI_DMMU		/* restore old ctxnum */
-	flush	%o3
-5:
-	retl
-	  wrpr	%g0, %o2, %pstate		/* enable interrupts */
-	SET_SIZE(vtag_flushctx)
-
-	.seg	".text"
-.flushallmsg:
-	.asciz	"sfmmu_asm: unimplemented flush operation"
-
-	ENTRY_NP(vtag_flushall)
-	sethi	%hi(.flushallmsg), %o0
-	call	panic
-	  or	%o0, %lo(.flushallmsg), %o0
-	SET_SIZE(vtag_flushall)
+	
+	SET_SIZE(vtag_flushall_uctxs)
 
 	ENTRY_NP(vtag_flushpage_tl1)
 	/*
 	 * x-trap to flush page from tlb and tsb
 	 *
 	 * %g1 = vaddr, zero-extended on 32-bit kernel
-	 * %g2 = ctxnum
+	 * %g2 = sfmmup
 	 *
 	 * assumes TSBE_TAG = 0
 	 */
 	srln	%g1, MMU_PAGESHIFT, %g1
 	slln	%g1, MMU_PAGESHIFT, %g1			/* g1 = vaddr */
+	
+	SFMMU_CPU_CNUM(%g2, %g3, %g4)   /* %g3 = sfmmu cnum on this CPU */
+
 	/* We need to set the secondary context properly. */
 	set	MMU_SCONTEXT, %g4
 	ldxa	[%g4]ASI_DMMU, %g5		/* rd old ctxnum */
 	or	DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1
-	stxa	%g2, [%g4]ASI_DMMU		/* wr new ctxum */
+	stxa	%g3, [%g4]ASI_DMMU		/* wr new ctxum */
 	stxa	%g0, [%g1]ASI_DTLB_DEMAP
 	stxa	%g0, [%g1]ASI_ITLB_DEMAP
 	stxa	%g5, [%g4]ASI_DMMU		/* restore old ctxnum */
@@ -483,7 +577,7 @@
 	 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
 	 *
 	 * %g1 = vaddr, zero-extended on 32-bit kernel
-	 * %g2 = <zero32|ctx16|pgcnt16>
+	 * %g2 = <sfmmup58 | pgcnt6>
 	 *
 	 * NOTE: this handler relies on the fact that no
 	 *	interrupts or traps can occur during the loop
@@ -496,44 +590,35 @@
 	srln	%g1, MMU_PAGESHIFT, %g1
 	slln	%g1, MMU_PAGESHIFT, %g1		/* g1 = vaddr */
 	or	DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1
-	set	0xffff, %g4
-	and	%g4, %g2, %g3			/* g3 = pgcnt */
-	srln	%g2, 16, %g2			/* g2 = ctxnum */
+	
+	set	SFMMU_PGCNT_MASK, %g4
+	and	%g4, %g2, %g3			/* g3 = pgcnt - 1 */
+	add	%g3, 1, %g3			/* g3 = pgcnt */
+
+	andn	%g2, SFMMU_PGCNT_MASK, %g2	/* g2 = sfmmup */
+
+	SFMMU_CPU_CNUM(%g2, %g5, %g6)   ! %g5 = sfmmu cnum on this CPU
+
 	/* We need to set the secondary context properly. */
 	set	MMU_SCONTEXT, %g4
-	ldxa	[%g4]ASI_DMMU, %g5		/* read old ctxnum */
-	stxa	%g2, [%g4]ASI_DMMU		/* write new ctxum */
+	ldxa	[%g4]ASI_DMMU, %g6		/* read old ctxnum */
+	stxa	%g5, [%g4]ASI_DMMU		/* write new ctxum */
 
 	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
+	sethi	 %hi(FLUSH_ADDR), %g5
 1:
 	stxa	%g0, [%g1]ASI_DTLB_DEMAP
 	stxa	%g0, [%g1]ASI_ITLB_DEMAP
+	flush	%g5
 	deccc	%g3				/* decr pgcnt */
 	bnz,pt	%icc,1b
-	add	%g1, %g2, %g1			/* go to nextpage */
+	  add	%g1, %g2, %g1			/* go to nextpage */
 
-	stxa	%g5, [%g4]ASI_DMMU		/* restore old ctxnum */
+	stxa	%g6, [%g4]ASI_DMMU		/* restore old ctxnum */
 	membar #Sync
 	retry
 	SET_SIZE(vtag_flush_pgcnt_tl1)
 
-	ENTRY_NP(vtag_flushctx_tl1)
-	/*
-	 * x-trap to flush context from tlb
-	 *
-	 * %g1 = ctxnum
-	 */
-	set	DEMAP_CTX_TYPE | DEMAP_SECOND, %g4
-	set	MMU_SCONTEXT, %g3
-	ldxa	[%g3]ASI_DMMU, %g5		/* rd old ctxnum */
-	stxa	%g1, [%g3]ASI_DMMU		/* wr new ctxum */
-	stxa	%g0, [%g4]ASI_DTLB_DEMAP
-	stxa	%g0, [%g4]ASI_ITLB_DEMAP
-	stxa	%g5, [%g3]ASI_DMMU		/* restore old ctxnum */
-	membar #Sync
-	retry
-	SET_SIZE(vtag_flushctx_tl1)
-
 	! Not implemented on US1/US2
 	ENTRY_NP(vtag_flushall_tl1)
 	retry
--- a/usr/src/uts/sun4u/cpu/us3_common.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/cpu/us3_common.c	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -373,15 +372,8 @@
 ss_t stick_sync_stats[NCPU];
 #endif /* DEBUG */
 
-/*
- * Maximum number of contexts for Cheetah.
- */
-#define	MAX_NCTXS	(1 << 13)
-
-/* Will be set !NULL for Cheetah+ and derivatives. */
-uchar_t *ctx_pgsz_array = NULL;
+uint_t cpu_impl_dual_pgsz = 0;
 #if defined(CPU_IMP_DUAL_PAGESIZE)
-static uchar_t ctx_pgsz_arr[MAX_NCTXS];
 uint_t disable_dual_pgsz = 0;
 #endif	/* CPU_IMP_DUAL_PAGESIZE */
 
@@ -487,16 +479,6 @@
 	cache_boot_state = get_dcu() & DCU_CACHE;
 
 	/*
-	 * Use the maximum number of contexts available for Cheetah
-	 * unless it has been tuned for debugging.
-	 * We are checking against 0 here since this value can be patched
-	 * while booting.  It can not be patched via /etc/system since it
-	 * will be patched too late and thus cause the system to panic.
-	 */
-	if (nctxs == 0)
-		nctxs = MAX_NCTXS;
-
-	/*
 	 * Due to the number of entries in the fully-associative tlb
 	 * this may have to be tuned lower than in spitfire.
 	 */
@@ -568,7 +550,7 @@
 	 * Use Cheetah+ and later dual page size support.
 	 */
 	if (!disable_dual_pgsz) {
-		ctx_pgsz_array = ctx_pgsz_arr;
+		cpu_impl_dual_pgsz = 1;
 	}
 #endif	/* CPU_IMP_DUAL_PAGESIZE */
 
@@ -699,7 +681,7 @@
 	proc_t *p;
 	struct as *as;
 	struct hat *hat;
-	short  cnum;
+	uint_t  cnum;
 	struct tsb_info *tsbinfop;
 	struct tsbe *tsbep;
 	caddr_t tsbp;
@@ -710,6 +692,7 @@
 	int pages_claimed = 0;
 	tte_t tsbe_tte;
 	int tried_kernel_tsb = 0;
+	mmu_ctx_t *mmu_ctxp;
 
 	CHEETAH_LIVELOCK_STAT(proc_entry);
 
@@ -769,10 +752,13 @@
 		goto badstruct;
 	}
 
-	cnum = hat->sfmmu_cnum;
+	mmu_ctxp = CPU_MMU_CTXP(cp);
+	ASSERT(mmu_ctxp);
+	cnum = hat->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
 	CHEETAH_LIVELOCK_STATSET(proc_cnum, cnum);
 
-	if ((cnum < 0) || (cnum == INVALID_CONTEXT) || (cnum >= nctxs)) {
+	if ((cnum < 0) || (cnum == INVALID_CONTEXT) ||
+	    (cnum >= mmu_ctxp->mmu_nctxs)) {
 		CHEETAH_LIVELOCK_STAT(proc_cnum_bad);
 		goto badstruct;
 	}
--- a/usr/src/uts/sun4u/cpu/us3_common_asm.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/cpu/us3_common_asm.s	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -146,7 +145,7 @@
 
 /* ARGSUSED */
 void
-vtag_flushpage(caddr_t vaddr, u_int ctxnum)
+vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
 {}
 
 #else	/* lint */
@@ -156,20 +155,11 @@
 	 * flush page from the tlb
 	 *
 	 * %o0 = vaddr
-	 * %o1 = ctxnum
+	 * %o1 = sfmmup
 	 */
 	rdpr	%pstate, %o5
 #ifdef DEBUG
-	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,a,pt %icc, 3f			/* disabled, panic	 */
-	  nop
-	save	%sp, -SA(MINFRAME), %sp
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	  or	%o0, %lo(sfmmu_panic1), %o0
-	ret
-	restore
-3:
+	PANIC_IF_INTR_DISABLED_PSTR(%o5, u3_di_label0, %g1)
 #endif /* DEBUG */
 	/*
 	 * disable ints
@@ -182,109 +172,51 @@
 	 * Interrupts are disabled to prevent the primary ctx register
 	 * from changing underneath us.
 	 */
-	brnz,pt	%o1, 1f			/* KCONTEXT */
-	sethi	%hi(FLUSH_ADDR), %o3
+	sethi   %hi(ksfmmup), %o3
+        ldx     [%o3 + %lo(ksfmmup)], %o3
+        cmp     %o3, %o1
+        bne,pt   %xcc, 1f			! if not kernel as, go to 1
+	  sethi	%hi(FLUSH_ADDR), %o3
 	/*
-	 * For KCONTEXT demaps use primary. type = page implicitly
+	 * For Kernel demaps use primary. type = page implicitly
 	 */
 	stxa	%g0, [%o0]ASI_DTLB_DEMAP	/* dmmu flush for KCONTEXT */
 	stxa	%g0, [%o0]ASI_ITLB_DEMAP	/* immu flush for KCONTEXT */
 	flush	%o3
-	b	5f
-	nop
+	retl
+	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
 1:
 	/*
 	 * User demap.  We need to set the primary context properly.
 	 * Secondary context cannot be used for Cheetah IMMU.
 	 * %o0 = vaddr
-	 * %o1 = ctxnum
+	 * %o1 = sfmmup
 	 * %o3 = FLUSH_ADDR
 	 */
-	sethi	%hi(ctx_pgsz_array), %o4
-	ldn     [%o4 + %lo(ctx_pgsz_array)], %o4
-	brz	%o4, 2f
-	nop
-	ldub	[%o4 + %o1], %o4
+	SFMMU_CPU_CNUM(%o1, %g1, %g2)		! %g1 = sfmmu cnum on this CPU
+	
+	ldub	[%o1 + SFMMU_CEXT], %o4		! %o4 = sfmmup->sfmmu_cext
 	sll	%o4, CTXREG_EXT_SHIFT, %o4
-	or	%o1, %o4, %o1
-2:
+	or	%g1, %o4, %g1			! %g1 = pgsz | cnum
+
 	wrpr	%g0, 1, %tl
 	set	MMU_PCONTEXT, %o4
 	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
-	ldxa	[%o4]ASI_DMMU, %o2		/* rd old ctxnum */
-	stxa	%o1, [%o4]ASI_DMMU		/* wr new ctxum */
-4:
+	ldxa	[%o4]ASI_DMMU, %o2		! %o2 = save old ctxnum
+	stxa	%g1, [%o4]ASI_DMMU		! wr new ctxum 
+
 	stxa	%g0, [%o0]ASI_DTLB_DEMAP
 	stxa	%g0, [%o0]ASI_ITLB_DEMAP
 	stxa	%o2, [%o4]ASI_DMMU		/* restore old ctxnum */
 	flush	%o3
 	wrpr	%g0, 0, %tl
-5:
+
 	retl
 	wrpr	%g0, %o5, %pstate		/* enable interrupts */
 	SET_SIZE(vtag_flushpage)
 
 #endif	/* lint */
 
-
-#if defined(lint)
-
-/* ARGSUSED */
-void
-vtag_flushctx(u_int ctxnum)
-{}
-
-#else	/* lint */
-
-	ENTRY_NP(vtag_flushctx)
-	/*
-	 * flush context from the tlb
-	 *
-	 * %o0 = ctxnum
-	 * We disable interrupts to prevent the primary ctx register changing
-	 * underneath us.
-	 */
-	sethi	%hi(FLUSH_ADDR), %o3
-	rdpr	%pstate, %o2
-
-#ifdef DEBUG
-	andcc	%o2, PSTATE_IE, %g0		/* if interrupts already */
-	bnz,a,pt %icc, 1f			/* disabled, panic	 */
-	  nop
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	  or	%o0, %lo(sfmmu_panic1), %o0
-1:
-#endif /* DEBUG */
-
-	sethi	%hi(ctx_pgsz_array), %o4
-	ldn     [%o4 + %lo(ctx_pgsz_array)], %o4
-	brz	%o4, 2f
-	nop
-	ldub	[%o4 + %o0], %o4
-	sll	%o4, CTXREG_EXT_SHIFT, %o4
-	or	%o0, %o4, %o0
-2:
-	wrpr	%o2, PSTATE_IE, %pstate		/* disable interrupts */
-	set	MMU_PCONTEXT, %o4
-	set	DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g1
-	wrpr	%g0, 1, %tl
-	ldxa	[%o4]ASI_DMMU, %o5		/* rd old ctxnum */
-	stxa	%o0, [%o4]ASI_DMMU		/* wr new ctxum */
-4:
-	stxa	%g0, [%g1]ASI_DTLB_DEMAP
-	stxa	%g0, [%g1]ASI_ITLB_DEMAP
-	stxa	%o5, [%o4]ASI_DMMU		/* restore old ctxnum */
-	flush	%o3
-	wrpr	%g0, 0, %tl
-5:
-	retl
-	wrpr	%g0, %o2, %pstate		/* enable interrupts */
-	SET_SIZE(vtag_flushctx)
-
-#endif	/* lint */
-
-
 #if defined(lint)
 
 void
@@ -314,7 +246,7 @@
 
 /* ARGSUSED */
 void
-vtag_flushpage_tl1(uint64_t vaddr, uint64_t ctxnum)
+vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
 {}
 
 #else	/* lint */
@@ -324,12 +256,16 @@
 	 * x-trap to flush page from tlb and tsb
 	 *
 	 * %g1 = vaddr, zero-extended on 32-bit kernel
-	 * %g2 = ctxnum
+	 * %g2 = sfmmup
 	 *
 	 * assumes TSBE_TAG = 0
 	 */
 	srln	%g1, MMU_PAGESHIFT, %g1
-	brnz,pt	%g2, 1f				/* KCONTEXT */
+		
+	sethi   %hi(ksfmmup), %g3
+        ldx     [%g3 + %lo(ksfmmup)], %g3
+        cmp     %g3, %g2
+        bne,pt	%xcc, 1f                        ! if not kernel as, go to 1
 	  slln	%g1, MMU_PAGESHIFT, %g1		/* g1 = vaddr */
 
 	/* We need to demap in the kernel context */
@@ -340,17 +276,16 @@
 1:
 	/* We need to demap in a user context */
 	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
-	sethi	%hi(ctx_pgsz_array), %g4
-	ldn     [%g4 + %lo(ctx_pgsz_array)], %g4
-	brz	%g4, 2f
-	nop
-	ldub	[%g4 + %g2], %g4
+
+	SFMMU_CPU_CNUM(%g2, %g6, %g3)	! %g6 = sfmmu cnum on this CPU
+	
+	ldub	[%g2 + SFMMU_CEXT], %g4		! %g4 = sfmmup->cext
 	sll	%g4, CTXREG_EXT_SHIFT, %g4
-	or	%g2, %g4, %g2
-2:
+	or	%g6, %g4, %g6			! %g6 = pgsz | cnum
+
 	set	MMU_PCONTEXT, %g4
 	ldxa	[%g4]ASI_DMMU, %g5		/* rd old ctxnum */
-	stxa	%g2, [%g4]ASI_DMMU		/* wr new ctxum */
+	stxa	%g6, [%g4]ASI_DMMU		/* wr new ctxum */
 	stxa	%g0, [%g1]ASI_DTLB_DEMAP
 	stxa	%g0, [%g1]ASI_ITLB_DEMAP
 	stxa	%g5, [%g4]ASI_DMMU		/* restore old ctxnum */
@@ -364,7 +299,7 @@
 
 /* ARGSUSED */
 void
-vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t ctx_pgcnt)
+vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
 {}
 
 #else	/* lint */
@@ -374,7 +309,7 @@
 	 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
 	 *
 	 * %g1 = vaddr, zero-extended on 32-bit kernel
-	 * %g2 = <zero32|ctx16|pgcnt16>
+	 * %g2 = <sfmmup58|pgcnt6>, (pgcnt - 1) is pass'ed in via pgcnt6 bits.
 	 *
 	 * NOTE: this handler relies on the fact that no
 	 *	interrupts or traps can occur during the loop
@@ -384,88 +319,68 @@
 	 *
 	 * assumes TSBE_TAG = 0
 	 */
-	set	0xffff, %g4
-	and	%g4, %g2, %g3			/* g3 = pgcnt */
-	srln	%g2, 16, %g2			/* g2 = ctxnum */
+	set	SFMMU_PGCNT_MASK, %g4
+	and	%g4, %g2, %g3			/* g3 = pgcnt - 1 */
+	add	%g3, 1, %g3			/* g3 = pgcnt */
+
+	andn	%g2, SFMMU_PGCNT_MASK, %g2	/* g2 = sfmmup */
 	srln	%g1, MMU_PAGESHIFT, %g1
-	brnz,pt	%g2, 1f				/* KCONTEXT? */
+
+	sethi   %hi(ksfmmup), %g4
+        ldx     [%g4 + %lo(ksfmmup)], %g4
+        cmp     %g4, %g2
+        bne,pn   %xcc, 1f			/* if not kernel as, go to 1 */
 	  slln	%g1, MMU_PAGESHIFT, %g1		/* g1 = vaddr */
 
 	/* We need to demap in the kernel context */
 	or	DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
 	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
+	sethi   %hi(FLUSH_ADDR), %g5
 4:
 	stxa	%g0, [%g1]ASI_DTLB_DEMAP
 	stxa	%g0, [%g1]ASI_ITLB_DEMAP
+	flush	%g5				! flush required by immu
+
 	deccc	%g3				/* decr pgcnt */
 	bnz,pt	%icc,4b
 	  add	%g1, %g2, %g1			/* next page */
 	retry
 1:
-	/* We need to demap in a user context */
-	sethi	%hi(ctx_pgsz_array), %g4
-	ldn     [%g4 + %lo(ctx_pgsz_array)], %g4
-	brz	%g4, 2f
-	  or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
-	ldub	[%g4 + %g2], %g4
+	/*
+	 * We need to demap in a user context
+	 *
+	 * g2 = sfmmup
+	 * g3 = pgcnt
+	 */
+	SFMMU_CPU_CNUM(%g2, %g5, %g6)		! %g5 = sfmmu cnum on this CPU
+		
+	or	DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
+
+	ldub	[%g2 + SFMMU_CEXT], %g4		! %g4 = sfmmup->cext
 	sll	%g4, CTXREG_EXT_SHIFT, %g4
-	or	%g2, %g4, %g2
-2:
+	or	%g5, %g4, %g5
+
 	set	MMU_PCONTEXT, %g4
-	ldxa	[%g4]ASI_DMMU, %g5		/* rd old ctxnum */
-	stxa	%g2, [%g4]ASI_DMMU		/* wr new ctxum */
+	ldxa	[%g4]ASI_DMMU, %g6		/* rd old ctxnum */
+	stxa	%g5, [%g4]ASI_DMMU		/* wr new ctxum */
 
 	set	MMU_PAGESIZE, %g2		/* g2 = pgsize */
+	sethi   %hi(FLUSH_ADDR), %g5
 3:
 	stxa	%g0, [%g1]ASI_DTLB_DEMAP
 	stxa	%g0, [%g1]ASI_ITLB_DEMAP
+	flush	%g5				! flush required by immu
+
 	deccc	%g3				/* decr pgcnt */
 	bnz,pt	%icc,3b
 	  add	%g1, %g2, %g1			/* next page */
 
-	stxa	%g5, [%g4]ASI_DMMU		/* restore old ctxnum */
+	stxa	%g6, [%g4]ASI_DMMU		/* restore old ctxnum */
 	retry
 	SET_SIZE(vtag_flush_pgcnt_tl1)
 
 #endif	/* lint */
 
-
-#if defined(lint)
-
-/* ARGSUSED */
-void
-vtag_flushctx_tl1(uint64_t ctxnum, uint64_t dummy)
-{}
-
-#else	/* lint */
-
-	ENTRY_NP(vtag_flushctx_tl1)
-	/*
-	 * x-trap to flush context from tlb
-	 *
-	 * %g1 = ctxnum
-	 */
-	sethi	%hi(ctx_pgsz_array), %g4
-	ldn     [%g4 + %lo(ctx_pgsz_array)], %g4
-	brz	%g4, 2f
-	nop
-	ldub	[%g4 + %g1], %g4
-	sll	%g4, CTXREG_EXT_SHIFT, %g4
-	or	%g1, %g4, %g1
-2:
-	set	DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g4
-	set	MMU_PCONTEXT, %g3
-	ldxa	[%g3]ASI_DMMU, %g5		/* rd old ctxnum */
-	stxa	%g1, [%g3]ASI_DMMU		/* wr new ctxum */
-	stxa	%g0, [%g4]ASI_DTLB_DEMAP
-	stxa	%g0, [%g4]ASI_ITLB_DEMAP
-	stxa	%g5, [%g3]ASI_DMMU		/* restore old ctxnum */
-	retry
-	SET_SIZE(vtag_flushctx_tl1)
-
-#endif	/* lint */
-
-
 #if defined(lint)
 
 /*ARGSUSED*/
@@ -3180,13 +3095,7 @@
 	ENTRY_NP(itlb_erratum34_fixup)
 	rdpr	%pstate, %o3
 #ifdef DEBUG
-	andcc	%o3, PSTATE_IE, %g0		! If interrupts already
-	bnz,pt %icc, 0f				!   disabled, panic
-	  nop
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	 or	%o0, %lo(sfmmu_panic1), %o0
-0:
+	PANIC_IF_INTR_DISABLED_PSTR(%o3, u3_di_label1, %g1)
 #endif /* DEBUG */
 	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
 	ldxa	[%g0]ASI_ITLB_ACCESS, %o1	! %o1 = entry 0 data
--- a/usr/src/uts/sun4u/cpu/us3_common_mmu.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/cpu/us3_common_mmu.c	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -47,9 +46,6 @@
  *   SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III
  */
 
-/* Will be set !NULL for Cheetah+ and derivatives. */
-extern uchar_t *ctx_pgsz_array;
-
 /*
  * pan_disable_ism_large_pages and pan_disable_large_pages are the Panther-
  * specific versions of disable_ism_large_pages and disable_large_pages,
@@ -115,7 +111,7 @@
  *
  * The effect of these restrictions is to limit the allowable values in
  * sfmmu_pgsz[0] and sfmmu_pgsz[1], since these hat variables are used in
- * mmu_set_ctx_page_sizes to set up the values in the ctx_pgsz_array that
+ * mmu_set_ctx_page_sizes to set up the values in the sfmmu_cext that
  * are used at context switch time. The value in sfmmu_pgsz[0] is used in
  * P_pgsz0 and sfmmu_pgsz[1] is used in P_pgsz1, as per Figure F-1-1
  * IMMU and DMMU Primary Context Register in the Panther Implementation
@@ -152,7 +148,7 @@
 void
 mmu_init_large_pages(size_t ism_pagesize)
 {
-	if (ctx_pgsz_array == NULL) {	/* disable_dual_pgsz flag */
+	if (cpu_impl_dual_pgsz == 0) {	/* disable_dual_pgsz flag */
 		pan_disable_ism_large_pages = ((1 << TTE64K) |
 			(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
 		pan_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M));
@@ -310,7 +306,7 @@
 	 */
 	ASSERT(hat->sfmmu_ismhat == NULL);
 	ASSERT(hat != ksfmmup);
-	ASSERT(ctx_pgsz_array != NULL);
+	ASSERT(cpu_impl_dual_pgsz == 1);
 
 	ASSERT((!SFMMU_FLAGS_ISSET(hat, HAT_32M_FLAG)) ||
 		(!SFMMU_FLAGS_ISSET(hat, HAT_256M_FLAG)));
@@ -362,7 +358,7 @@
 	ASSERT(hat->sfmmu_ismhat == NULL);
 	ASSERT(hat != ksfmmup);
 
-	if (ctx_pgsz_array == NULL)	/* disable_dual_pgsz flag */
+	if (cpu_impl_dual_pgsz == 0)	/* disable_dual_pgsz flag */
 		return;
 
 	/*
@@ -451,7 +447,7 @@
 	ASSERT(sfmmu_hat_lock_held(hat));
 	ASSERT(hat != ksfmmup);
 
-	if (ctx_pgsz_array == NULL)	/* disable_dual_pgsz flag */
+	if (cpu_impl_dual_pgsz == 0)	/* disable_dual_pgsz flag */
 		return;
 
 	/*
@@ -473,12 +469,22 @@
 #endif /* DEBUG */
 	new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0);
 	if (hat->sfmmu_cext != new_cext) {
+#ifdef DEBUG
+		int i;
+		/*
+		 * assert cnum should be invalid, this is because pagesize
+		 * can only be changed after a proc's ctxs are invalidated.
+		 */
+		for (i = 0; i < max_mmu_ctxdoms; i++) {
+			ASSERT(hat->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
+		}
+#endif /* DEBUG */
 		hat->sfmmu_cext = new_cext;
 	}
-	ctx_pgsz_array[hat->sfmmu_cnum] = hat->sfmmu_cext;
+
 	/*
 	 * sfmmu_setctx_sec() will take care of the
-	 * rest of the chores reprogramming the ctx_pgsz_array
+	 * rest of the chores reprogramming the hat->sfmmu_cext
 	 * page size values into the DTLBs.
 	 */
 }
@@ -537,7 +543,7 @@
 		}
 		newval = tmp_pgsz[0] << 8 | tmp_pgsz[1];
 		if (newval != oldval) {
-			sfmmu_steal_context(sfmmup, tmp_pgsz);
+			sfmmu_reprog_pgsz_arr(sfmmup, tmp_pgsz);
 		}
 	}
 }
@@ -603,8 +609,6 @@
 	new_cext_nucleus = TAGACCEXT_MKSZPAIR(tte, TTE8K);
 	new_cext_primary = TAGACCEXT_MKSZPAIR(TTE8K, tte);
 
-	if (ctx_pgsz_array)
-		ctx_pgsz_array[KCONTEXT] = new_cext_primary;
 	hat->sfmmu_cext = new_cext_primary;
 	kcontextreg = ((uint64_t)new_cext_nucleus << CTXREG_NEXT_SHIFT) |
 		((uint64_t)new_cext_primary << CTXREG_EXT_SHIFT);
@@ -618,6 +622,11 @@
 	int impl = cpunodes[getprocessorid()].implementation;
 	uint_t tte = TTE8K;
 
+	if (cpu_impl_dual_pgsz == 0) {
+		heaplp_use_dt512 = 0;
+		return (MMU_PAGESIZE);
+	}
+
 	pend_lpgsz = (struct heap_lp_page_size *)
 	    ((char *)heap_lp_pgsz + sizeof (heap_lp_pgsz));
 
--- a/usr/src/uts/sun4u/lw8/os/lw8_platmod.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/lw8/os/lw8_platmod.c	Tue Jun 20 07:21:09 2006 -0700
@@ -126,13 +126,12 @@
 	IOMMU_PER_SCHIZO);
 
 /*
- * sg_max_ncpus is the maximum number of CPUs supported on Serengeti
- * and Wildcat at GA.  We assume that the maximum number of SSM nodes
- * supported at GA is 4.  sg_max_ncpus is set to be smaller than NCPU
- * to reduce the amount of memory the logs take up until we have a
- * dynamic log memory allocation solution.
+ * sg_max_ncpus is the maximum number of CPUs supported on lw8.
+ * sg_max_ncpus is set to be smaller than NCPU to reduce the amount of
+ * memory the logs take up until we have a dynamic log memory allocation
+ * solution.
  */
-int sg_max_ncpus = (24 * 4);	/* (CPUs per node * max number of nodes) */
+int sg_max_ncpus = (12 * 2);    /* (max # of processors * # of cores/proc) */
 
 /*
  * variables to control mailbox message timeouts.
--- a/usr/src/uts/sun4u/ml/mach_offsets.in	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/ml/mach_offsets.in	Tue Jun 20 07:21:09 2006 -0700
@@ -83,8 +83,8 @@
 #include <sys/traptrace.h>
 
 machcpu
-	intrstat	MCPU_INTRSTAT
-	pil_high_start	MCPU_PIL_HIGH_START
+	intrstat		MCPU_INTRSTAT
+	pil_high_start		MCPU_PIL_HIGH_START
 
 trap_trace_record	TRAP_ENT_SIZE
 	tt_tl		TRAP_ENT_TL
@@ -100,11 +100,13 @@
 	tt_f4		TRAP_ENT_F4
 
 hat	HAT_SIZE
-	sfmmu_cnum
 	sfmmu_cpusran
 	sfmmu_tsb
 	sfmmu_ismblkpa
 	sfmmu_flags
+	sfmmu_cext
+	sfmmu_ctx_lock
+	sfmmu_ctxs
 
 sfmmu_global_stat HATSTAT_SIZE
 	sf_pagefaults		HATSTAT_PAGEFAULT
@@ -113,9 +115,6 @@
 	sf_khash_searches	HATSTAT_KHASH_SEARCH
 	sf_khash_links		HATSTAT_KHASH_LINKS
 
-ctx	CTX_SIZE		CTX_SZ_SHIFT
-	ctx_un.ctx_sfmmup	CTX_SFMMUP
-
 sf_hment	SFHME_SIZE	SFHME_SHIFT
 	hme_tte		SFHME_TTE
 
@@ -212,6 +211,8 @@
 	ecache_size	ECACHE_SIZE
 	ecache_linesize	ECACHE_LINESIZE
 	device_id	DEVICE_ID
+	itlb_size	ITLB_SIZE
+	dtlb_size	DTLB_SIZE
 
 spitfire_scrub_misc_t
 	ec_scrub_outstanding
--- a/usr/src/uts/sun4u/opl/io/drmach.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/opl/io/drmach.c	Tue Jun 20 07:21:09 2006 -0700
@@ -3681,12 +3681,12 @@
 	int i;
 
 	for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
-		vtag_flushpage(va, KCONTEXT);
+		vtag_flushpage(va, (uint64_t)ksfmmup);
 		sfmmu_memtte(&tte, va_to_pfn(va),
 			PROC_DATA|HAT_NOSYNC, TTE8K);
 		tte.tte_intlo |= TTE_LCK_INT;
-		sfmmu_dtlb_ld(va, KCONTEXT, &tte);
-		sfmmu_itlb_ld(va, KCONTEXT, &tte);
+		sfmmu_dtlb_ld_kva(va, &tte);
+		sfmmu_itlb_ld_kva(va, &tte);
 		va += PAGESIZE;
 	}
 }
@@ -3697,7 +3697,7 @@
 	int i;
 
 	for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
-		vtag_flushpage(va, KCONTEXT);
+		vtag_flushpage(va, (uint64_t)ksfmmup);
 		va += PAGESIZE;
 	}
 }
--- a/usr/src/uts/sun4u/opl/io/mc-opl.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/opl/io/mc-opl.c	Tue Jun 20 07:21:09 2006 -0700
@@ -19,6 +19,10 @@
  * CDDL HEADER END
  */
 /*
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
  * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
  */
 
@@ -30,6 +34,7 @@
 #include <sys/modctl.h>
 #include <sys/stat.h>
 #include <sys/async.h>
+#include <sys/machcpuvar.h>
 #include <sys/machsystm.h>
 #include <sys/promif.h>
 #include <sys/ksynch.h>
--- a/usr/src/uts/sun4u/opl/os/opl.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/opl/os/opl.c	Tue Jun 20 07:21:09 2006 -0700
@@ -72,6 +72,17 @@
 
 pgcnt_t opl_startup_cage_size = 0;
 
+static opl_model_info_t opl_models[] = {
+	{ "FF1", OPL_MAX_BOARDS_FF1 },
+	{ "FF2", OPL_MAX_BOARDS_FF2 },
+	{ "DC1", OPL_MAX_BOARDS_DC1 },
+	{ "DC2", OPL_MAX_BOARDS_DC2 },
+	{ "DC3", OPL_MAX_BOARDS_DC3 },
+};
+static	int	opl_num_models = sizeof (opl_models)/sizeof (opl_model_info_t);
+
+static	opl_model_info_t *opl_cur_model = NULL;
+
 static struct memlist *opl_memlist_per_board(struct memlist *ml);
 
 static enum {
@@ -92,6 +103,53 @@
 	return (MIN(opl_tsb_spares, MAX_UPA));
 }
 
+static void
+set_model_info()
+{
+	char	name[MAXSYSNAME];
+	int	i;
+
+	/*
+	 * Get model name from the root node.
+	 *
+	 * We are using the prom device tree since, at this point,
+	 * the Solaris device tree is not yet setup.
+	 */
+	(void) prom_getprop(prom_rootnode(), "model", (caddr_t)name);
+
+	for (i = 0; i < opl_num_models; i++) {
+		if (strncmp(name, opl_models[i].model_name, MAXSYSNAME) == 0) {
+			opl_cur_model = &opl_models[i];
+			break;
+		}
+	}
+	if (i == opl_num_models)
+		cmn_err(CE_WARN, "No valid OPL model is found!"
+		    "Set max_mmu_ctxdoms to the default.");
+}
+
+static void
+set_max_mmu_ctxdoms()
+{
+	extern uint_t	max_mmu_ctxdoms;
+	int		max_boards;
+
+	/*
+	 * From the model, get the maximum number of boards
+	 * supported and set the value accordingly. If the model
+	 * could not be determined or recognized, we assume the max value.
+	 */
+	if (opl_cur_model == NULL)
+		max_boards = OPL_MAX_BOARDS;
+	else
+		max_boards = opl_cur_model->model_max_boards;
+
+	/*
+	 * On OPL, cores and MMUs are one-to-one.
+	 */
+	max_mmu_ctxdoms = OPL_MAX_CORE_UNITS_PER_BOARD * max_boards;
+}
+
 #pragma weak mmu_init_large_pages
 
 void
@@ -123,6 +181,9 @@
 	}
 
 	tsb_lgrp_affinity = 1;
+
+	set_model_info();
+	set_max_mmu_ctxdoms();
 }
 
 /*
@@ -906,6 +967,23 @@
 {
 }
 
+void
+plat_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *info)
+{
+	int	impl;
+
+	impl = cpunodes[cpuid].implementation;
+	if (IS_OLYMPUS_C(impl)) {
+		/*
+		 * Olympus-C processor supports 2 strands per core.
+		 */
+		info->mmu_idx = cpuid >> 1;
+		info->mmu_nctxs = 8192;
+	} else {
+		cmn_err(CE_PANIC, "Unknown processor %d", impl);
+	}
+}
+
 int
 plat_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
 {
--- a/usr/src/uts/sun4u/os/cpr_impl.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/os/cpr_impl.c	Tue Jun 20 07:21:09 2006 -0700
@@ -707,7 +707,7 @@
 		tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(ppn);
 		tte.tte_intlo = TTE_PFN_INTLO(ppn) | TTE_LCK_INT |
 		    TTE_CP_INT | TTE_PRIV_INT | TTE_HWWR_INT;
-		sfmmu_dtlb_ld(vaddr, KCONTEXT, &tte);
+		sfmmu_dtlb_ld_kva(vaddr, &tte);
 	}
 }
 
@@ -721,7 +721,7 @@
 		curthreadremapped = 0;
 
 	for (; pages--; vaddr += MMU_PAGESIZE)
-		vtag_flushpage(vaddr, KCONTEXT);
+		vtag_flushpage(vaddr, (uint64_t)ksfmmup);
 }
 
 /*
--- a/usr/src/uts/sun4u/os/ppage.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/os/ppage.c	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -340,7 +339,7 @@
 	    TTE_CV_INT | TTE_PRIV_INT | TTE_LCK_INT | prot;
 
 	ASSERT(CPU->cpu_id == cpu);
-	sfmmu_dtlb_ld(va, KCONTEXT, &tte);
+	sfmmu_dtlb_ld_kva(va, &tte);
 
 	*pslot = myslot;	/* Return ptr to the slot we used. */
 
@@ -352,7 +351,7 @@
 {
 	ASSERT(*pslot == va);
 
-	vtag_flushpage(va, KCONTEXT);
+	vtag_flushpage(va, (uint64_t)ksfmmup);
 	*pslot = NULL;				/* release the slot */
 }
 
--- a/usr/src/uts/sun4u/serengeti/io/sbdp_cpu.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/serengeti/io/sbdp_cpu.c	Tue Jun 20 07:21:09 2006 -0700
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -668,8 +668,8 @@
 	    TTE_PFN_INTHI(bbsram_pfn);
 	tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) |
 	    TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
-	sfmmu_dtlb_ld(sbdp_shutdown_va, KCONTEXT, &tte); /* load dtlb */
-	sfmmu_itlb_ld(sbdp_shutdown_va, KCONTEXT, &tte); /* load itlb */
+	sfmmu_dtlb_ld_kva(sbdp_shutdown_va, &tte); /* load dtlb */
+	sfmmu_itlb_ld_kva(sbdp_shutdown_va, &tte); /* load itlb */
 
 	for (src = (uint_t *)sbdp_shutdown_asm, dst = (uint_t *)bbsram_addr;
 	    src < (uint_t *)sbdp_shutdown_asm_end; src++, dst++)
--- a/usr/src/uts/sun4u/serengeti/os/serengeti.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/serengeti/os/serengeti.c	Tue Jun 20 07:21:09 2006 -0700
@@ -126,13 +126,12 @@
 	IOMMU_PER_SCHIZO);
 
 /*
- * sg_max_ncpus is the maximum number of CPUs supported on Serengeti
- * and Wildcat at GA.  We assume that the maximum number of SSM nodes
- * supported at GA is 4.  sg_max_ncpus is set to be smaller than NCPU
- * to reduce the amount of memory the logs take up until we have a
- * dynamic log memory allocation solution.
+ * sg_max_ncpus is the maximum number of CPUs supported on Serengeti.
+ * sg_max_ncpus is set to be smaller than NCPU to reduce the amount of
+ * memory the logs take up until we have a dynamic log memory allocation
+ * solution.
  */
-int sg_max_ncpus = (24 * 4);	/* (CPUs per node * max number of nodes) */
+int sg_max_ncpus = (24 * 2);    /* (max # of processors * # of cores/proc) */
 
 /*
  * variables to control mailbox message timeouts.
--- a/usr/src/uts/sun4u/starcat/io/drmach.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/starcat/io/drmach.c	Tue Jun 20 07:21:09 2006 -0700
@@ -3350,8 +3350,8 @@
 	tte = &drmach_cpu_sram_tte[CPU->cpu_id];
 	ASSERT(TTE_IS_VALID(tte) && TTE_IS_8K(tte) &&
 	    TTE_IS_PRIVILEGED(tte) && TTE_IS_LOCKED(tte));
-	sfmmu_dtlb_ld(drmach_cpu_sram_va, KCONTEXT, tte);
-	sfmmu_itlb_ld(drmach_cpu_sram_va, KCONTEXT, tte);
+	sfmmu_dtlb_ld_kva(drmach_cpu_sram_va, tte);
+	sfmmu_itlb_ld_kva(drmach_cpu_sram_va, tte);
 
 	bp = wp = drmach_cpu_sram_va;
 
@@ -3377,7 +3377,7 @@
 	if (err) {
 cleanup:
 		xt_one(CPU->cpu_id, vtag_flushpage_tl1,
-			(uint64_t)drmach_cpu_sram_va, (uint64_t)KCONTEXT);
+			(uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup);
 		return (err);
 	}
 
@@ -3436,7 +3436,7 @@
 	axq_cdc_enable_all();
 
 	xt_one(CPU->cpu_id, vtag_flushpage_tl1,
-		(uint64_t)drmach_cpu_sram_va, (uint64_t)KCONTEXT);
+		(uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup);
 
 	switch (cr->ecode) {
 	case DRMACH_CR_OK:
@@ -5447,7 +5447,7 @@
 	}
 
 	xt_one(cpuid, vtag_flushpage_tl1,
-		(uint64_t)drmach_cpu_sram_va, (uint64_t)KCONTEXT);
+		(uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup);
 
 	return (0);
 }
@@ -5499,8 +5499,8 @@
 	tte = &drmach_cpu_sram_tte[CPU->cpu_id];
 	ASSERT(TTE_IS_VALID(tte) && TTE_IS_8K(tte) &&
 	    TTE_IS_PRIVILEGED(tte) && TTE_IS_LOCKED(tte));
-	sfmmu_dtlb_ld(drmach_cpu_sram_va, KCONTEXT, tte);
-	sfmmu_itlb_ld(drmach_cpu_sram_va, KCONTEXT, tte);
+	sfmmu_dtlb_ld_kva(drmach_cpu_sram_va, tte);
+	sfmmu_itlb_ld_kva(drmach_cpu_sram_va, tte);
 
 	/* copy text. standard bcopy not designed to work in nc space */
 	p = (uint_t *)drmach_cpu_sram_va;
--- a/usr/src/uts/sun4u/starcat/ml/drmach_asm.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/starcat/ml/drmach_asm.s	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -602,12 +601,16 @@
 	ldx	[%g1], %g1
 	or	%g1, KCONTEXT, %g2	! preserve %g1
 	set	MMU_TAG_ACCESS, %g4
-	sethi	%hi(ctx_pgsz_array), %g6
-	ldn	[%g6 + %lo(ctx_pgsz_array)], %g6
+	set	cpu_impl_dual_pgsz, %g6 
+	ld      [%g6], %g6 
 	brz	%g6, 1f
 	  nop
-	ldub	[%g6 + KCONTEXT], %g6
-	sll	%g6, TAGACCEXT_SHIFT, %g6
+	
+	sethi	%hi(ksfmmup), %g6
+	ldx	[%g6 + %lo(ksfmmup)], %g6
+	ldub    [%g6 + SFMMU_CEXT], %g6
+        sll     %g6, TAGACCEXT_SHIFT, %g6
+
 	set	MMU_TAG_ACCESS_EXT, %g7
 	stxa	%g6, [%g7]ASI_DMMU
 1:
--- a/usr/src/uts/sun4u/starcat/sys/starcat.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/starcat/sys/starcat.h	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -33,7 +32,6 @@
 extern "C" {
 #endif
 
-
 /*
  * Manifest constants of Starcat configuration
  */
@@ -80,6 +78,30 @@
 #define	STARCAT_DMV_IDN_BASE	(MAX_UPA)
 
 /*
+ * The CPU ID on starcat looks like this:
+ *
+ *     9        5  4     3     2    1    0
+ *    --------------------------------------
+ *    | Expander |   | Slot | Core | LPORT |
+ *    --------------------------------------
+ *
+ * Expander   Starcat has STARCAT_BDSET_MAX (18) expanders.
+ * Slot       Starcat has STARCAT_BDSET_SLOT_MAX (2) slots per expander.
+ *            Slot 0 carries a CPU-MEM board which has 4 processor chips.
+ *            Slot 1 carries an I/O board typically. But it can be
+ *            configured to carry a MAXCAT board which has 2 processor
+ *            chips on board.
+ * LPORT      Port number within the slot for a chip. This is also the
+ *            chip number within the slot. Note that Slot 1 can have only
+ *            2 chips, but this representation allows for 4. This is just
+ *            the theoretical max.
+ * Core       Core number within the chip.
+ *
+ * Currently, the maximum number of cores supported is 2 per chip (on
+ * Panther and Jaguar).
+ *
+ */
+/*
  * Macros for manipulating CPU IDs
  */
 #define	STARCAT_CPUID_TO_EXPANDER(p)	(((p) >> 5) & 0x1f)
--- a/usr/src/uts/sun4u/starfire/io/drmach.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/starfire/io/drmach.c	Tue Jun 20 07:21:09 2006 -0700
@@ -2452,7 +2452,7 @@
 		drmach_cpu_ntries - ntries, drmach_cpu_ntries, cpuid);
 
 	xt_one(cpuid, vtag_flushpage_tl1,
-		(uint64_t)drmach_shutdown_va, (uint64_t)KCONTEXT);
+		(uint64_t)drmach_shutdown_va, (uint64_t)ksfmmup);
 
 	return (0);
 }
@@ -2516,8 +2516,8 @@
 			TTE_PFN_INTHI(bbsram_pfn);
 	tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) |
 			TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
-	sfmmu_dtlb_ld(drmach_shutdown_va, KCONTEXT, &tte);	/* load dtlb */
-	sfmmu_itlb_ld(drmach_shutdown_va, KCONTEXT, &tte);	/* load itlb */
+	sfmmu_dtlb_ld_kva(drmach_shutdown_va, &tte);	/* load dtlb */
+	sfmmu_itlb_ld_kva(drmach_shutdown_va, &tte);	/* load itlb */
 
 	for (src = (uint_t *)drmach_shutdown_asm, dst = (uint_t *)bbsram_addr;
 		src < (uint_t *)drmach_shutdown_asm_end; src++, dst++)
--- a/usr/src/uts/sun4u/sunfire/io/ac_test.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/sunfire/io/ac_test.c	Tue Jun 20 07:21:09 2006 -0700
@@ -83,14 +83,14 @@
 	    TTE_PFN_INTHI(pfn);
 	tte.tte_intlo = TTE_PFN_INTLO(pfn) | TTE_CP_INT |
 	    TTE_PRIV_INT | TTE_LCK_INT | TTE_HWWR_INT;
-	sfmmu_dtlb_ld(va, KCONTEXT, &tte);
+	sfmmu_dtlb_ld_kva(va, &tte);
 
 }
 
 void
 ac_unmap(caddr_t va)
 {
-	vtag_flushpage(va, KCONTEXT);
+	vtag_flushpage(va, (uint64_t)ksfmmup);
 }
 
 int
--- a/usr/src/uts/sun4u/sunfire/io/fhc.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/sunfire/io/fhc.c	Tue Jun 20 07:21:09 2006 -0700
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -3402,8 +3402,8 @@
 		TTE_PFN_INTHI(pfn);
 	tte.tte_intlo = TTE_PFN_INTLO(pfn) |
 		TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT; /* un$ */
-	sfmmu_dtlb_ld(shutdown_va, KCONTEXT, &tte);	/* load dtlb */
-	sfmmu_itlb_ld(shutdown_va, KCONTEXT, &tte);	/* load itlb */
+	sfmmu_dtlb_ld_kva(shutdown_va, &tte);	/* load dtlb */
+	sfmmu_itlb_ld_kva(shutdown_va, &tte);	/* load itlb */
 
 	/*
 	 * copy the special shutdown function to sram
--- a/usr/src/uts/sun4u/sys/cpu_module.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/sys/cpu_module.h	Tue Jun 20 07:21:09 2006 -0700
@@ -74,12 +74,12 @@
 /*
  * virtual demap flushes (tlbs & virtual tag caches)
  */
-void	vtag_flushpage(caddr_t addr, uint_t ctx);
-void	vtag_flushctx(uint_t ctx);
+void	vtag_flushpage(caddr_t addr, uint64_t sfmmup);
 void	vtag_flushall(void);
-void	vtag_flushpage_tl1(uint64_t addr, uint64_t ctx);
-void	vtag_flush_pgcnt_tl1(uint64_t addr, uint64_t ctx_pgcnt);
-void	vtag_flushctx_tl1(uint64_t ctx, uint64_t dummy);
+#pragma weak vtag_flushall_uctxs
+void	vtag_flushall_uctxs(void);
+void	vtag_flushpage_tl1(uint64_t addr, uint64_t sfmmup);
+void	vtag_flush_pgcnt_tl1(uint64_t addr, uint64_t sfmmup_pgcnt);
 void	vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2);
 
 /*
@@ -100,10 +100,10 @@
 #endif
 
 /*
- * Calculate, set optimal dtlb pagesize, for ISM and mpss, to support
- * cpus with non-fully-associative dtlbs.
+ * flag to support optimal dtlb pagesize setting, for ISM and mpss, to support
+ * cpus with non-fully-associative dtlbs. Page size is stored in hat sfmmu_cext
  */
-extern uchar_t *ctx_pgsz_array;
+extern uint_t cpu_impl_dual_pgsz;
 
 /*
  * flush instruction cache if needed
--- a/usr/src/uts/sun4u/sys/machcpuvar.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/sys/machcpuvar.h	Tue Jun 20 07:21:09 2006 -0700
@@ -115,6 +115,11 @@
 	 * E$ data, which is needed for the specific cpu type.
 	 */
 	void		*cpu_private;		/* ptr to cpu private data */
+	/*
+	 * per-MMU ctxdom CPU data.
+	 */
+	uint_t		cpu_mmu_idx;
+	struct mmu_ctx	*cpu_mmu_ctxp;
 
 	ptl1_state_t	ptl1_state;
 
@@ -134,6 +139,8 @@
 typedef	struct machcpu	machcpu_t;
 
 #define	cpu_startup_thread	cpu_m.startup_thread
+#define	CPU_MMU_IDX(cp)		((cp)->cpu_m.cpu_mmu_idx)
+#define	CPU_MMU_CTXP(cp)	((cp)->cpu_m.cpu_mmu_ctxp)
 #define	NINTR_THREADS	(LOCK_LEVEL)	/* number of interrupt threads */
 
 /*
--- a/usr/src/uts/sun4u/sys/machparam.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/sys/machparam.h	Tue Jun 20 07:21:09 2006 -0700
@@ -320,6 +320,7 @@
 #define	PTL1_BAD_CTX_STEAL	12
 #define	PTL1_BAD_ECC		13
 #define	PTL1_BAD_CTX		14
+#define	PTL1_BAD_RAISE_TSBEXCP	20
 
 /*
  * Defines used for ptl1 related data structs.
--- a/usr/src/uts/sun4u/sys/opl.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/sys/opl.h	Tue Jun 20 07:21:09 2006 -0700
@@ -67,6 +67,23 @@
 	(OPL_MAX_COREID_PER_CMP - 1))
 #define	STRAND_ID(x)	((uint_t)(x) & (OPL_MAX_STRANDID_PER_CORE - 1))
 
+/*
+ * Max. boards supported in a domain per model.
+ */
+#define	OPL_MAX_BOARDS_FF1	1
+#define	OPL_MAX_BOARDS_FF2	2
+#define	OPL_MAX_BOARDS_DC1	4
+#define	OPL_MAX_BOARDS_DC2	8
+#define	OPL_MAX_BOARDS_DC3	16
+
+/*
+ * Structure to gather model-specific information at boot.
+ */
+typedef struct opl_model_info {
+	char	model_name[MAXSYSNAME];
+	int	model_max_boards;
+} opl_model_info_t;
+
 extern int	plat_max_boards(void);
 extern int	plat_max_cpu_units_per_board(void);
 extern int	plat_max_mem_units_per_board(void);
--- a/usr/src/uts/sun4u/vm/mach_sfmmu.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/vm/mach_sfmmu.c	Tue Jun 20 07:21:09 2006 -0700
@@ -204,7 +204,7 @@
 	va = utsb_vabase;
 	end_va = va + tsb_slab_size;
 	while (va < end_va) {
-		vtag_flushpage(va, KCONTEXT);
+		vtag_flushpage(va, (uint64_t)ksfmmup);
 		va += MMU_PAGESIZE;
 	}
 
@@ -212,7 +212,7 @@
 	va = utsb4m_vabase;
 	end_va = va + tsb_slab_size;
 	while (va < end_va) {
-		vtag_flushpage(va, KCONTEXT);
+		vtag_flushpage(va, (uint64_t)ksfmmup);
 		va += MMU_PAGESIZE;
 	}
 }
@@ -294,18 +294,18 @@
 	tte.tte_intlo = TTE_PFN_INTLO(pfn) | TTE_LCK_INT | TTE_CP_INT |
 	    TTE_PRIV_INT | TTE_HWWR_INT;
 
-	vtag_flushpage(va, KCONTEXT);
+	vtag_flushpage(va, (uint64_t)ksfmmup);
 
-	sfmmu_itlb_ld(va, KCONTEXT, &tte);
+	sfmmu_itlb_ld_kva(va, &tte);
 	if (do_dtlb)
-		sfmmu_dtlb_ld(va, KCONTEXT, &tte);
+		sfmmu_dtlb_ld_kva(va, &tte);
 }
 
 /*ARGSUSED*/
 void
 kdi_tlb_page_unlock(caddr_t va, int do_dtlb)
 {
-	vtag_flushpage(va, KCONTEXT);
+	vtag_flushpage(va, (uint64_t)ksfmmup);
 }
 
 /* clear user TSB information (applicable to hardware TSB walkers) */
--- a/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s	Tue Jun 20 07:21:09 2006 -0700
@@ -61,25 +61,19 @@
 /*
  * sfmmu related subroutines
  */
-
 /* ARGSUSED */
 void
-sfmmu_ctx_steal_tl1(uint64_t sctx, uint64_t rctx)
+sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
 {}
 
 /* ARGSUSED */
 void
-sfmmu_raise_tsb_exception(uint64_t sctx, uint64_t rctx)
+sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
 {}
 
 /* ARGSUSED */
 void
-sfmmu_itlb_ld(caddr_t vaddr, int ctxnum, tte_t *tte)
-{}
-
-/* ARGSUSED */
-void
-sfmmu_dtlb_ld(caddr_t vaddr, int ctxnum, tte_t *tte)
+sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
 {}
 
 int
@@ -102,103 +96,113 @@
 }
 
 #else	/* lint */
-
+	
 /*
- * 1. If stealing ctx, flush all TLB entries whose ctx is ctx-being-stolen.
- * 2. If processor is running in the ctx-being-stolen, set the
- *    context to the resv context. That is 
- *    If processor in User-mode - pri/sec-ctx both set to ctx-being-stolen,
- *		change both pri/sec-ctx registers to resv ctx.
- *    If processor in Kernel-mode - pri-ctx is 0, sec-ctx is ctx-being-stolen,
- *		just change sec-ctx register to resv ctx. When it returns to
- *		kernel-mode, user_rtt will change pri-ctx.
+ * Invalidate either the context of a specific victim or any process
+ * currently running on this CPU. 
  *
- * Note: For multiple page size TLB, no need to set page sizes for
- *       DEMAP context.
- *
- * %g1 = ctx being stolen (victim)
- * %g2 = invalid ctx to replace victim with
+ * %g1 = sfmmup whose ctx is being invalidated
+ *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
+ * Note %g1 is the only input argument used by this xcall handler.
  */
-	ENTRY(sfmmu_ctx_steal_tl1)
-	/*
-	 * Flush TLBs.
-	 */
-	set	MMU_PCONTEXT, %g3
-	set	DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g4
-	ldxa	[%g3]ASI_MMU_CTX, %g5		/* get pri-ctx */
-	sethi	%hi(FLUSH_ADDR), %g6
-	stxa	%g1, [%g3]ASI_MMU_CTX		/* temporarily set our */
-						/*   pri-ctx to victim */
-	stxa	%g0, [%g4]ASI_DTLB_DEMAP	/* flush DTLB */
-	stxa	%g0, [%g4]ASI_ITLB_DEMAP	/* flush ITLB */
-	stxa	%g5, [%g3]ASI_MMU_CTX		/* restore original pri-ctx */
-	flush	%g6				/* ensure stxa's committed */
-	/* fall through to the code below */
-
-	/*
-	 * We enter here if we're just raising a TSB miss
-	 * exception, without switching MMU contexts.  In
-	 * this case, there is no need to flush the TLB.
-	 */
-	ALTENTRY(sfmmu_raise_tsb_exception)
+	ENTRY(sfmmu_raise_tsb_exception)
 	!
-	! if (sec-ctx != victim) {
+	! if (victim == INVALID_CONTEXT) {
+	!	if (sec-ctx > INVALID_CONTEXT) {
+	!		write INVALID_CONTEXT to sec-ctx
+	!	}
+	!	if (pri-ctx > INVALID_CONTEXT) {
+	!		write INVALID_CONTEXT to pri-ctx
+	!	}
+	! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
 	!	return
 	! } else {
-	!	if (pri-ctx == victim) {
+	!	if (sec-ctx > INVALID_CONTEXT)
 	!		write INVALID_CONTEXT to sec-ctx
+	!	
+	!	if (pri-ctx > INVALID_CONTEXT)
 	!		write INVALID_CONTEXT to pri-ctx
-	!	} else {
-	!		write INVALID_CONTEXT to sec-ctx
-	!	}
 	! }
 	!
-	cmp	%g1, NUM_LOCKED_CTXS
-	blt,a,pn %icc, ptl1_panic		/* can't steal locked ctx */
-	  mov	PTL1_BAD_CTX_STEAL, %g1
-	set	CTXREG_CTX_MASK, %g6
-	set	MMU_SCONTEXT, %g3
-	ldxa	[%g3]ASI_MMU_CTX, %g5		/* get sec-ctx */
-	and	%g5, %g6, %g5
-	cmp	%g5, %g1			/* is it the victim? */
-	bne,pn	%icc, 2f			/* was our sec-ctx a victim? */
+
+	sethi   %hi(ksfmmup), %g3
+        ldx     [%g3 + %lo(ksfmmup)], %g3
+	cmp	%g1, %g3
+	be,a,pn %xcc, ptl1_panic	/* can't invalidate kernel ctx */
+	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
+
+	set	INVALID_CONTEXT, %g2
+	
+	cmp	%g1, INVALID_CONTEXT
+	bne,pt	%xcc, 1f			/* called from wrap_around? */
+	  mov	MMU_SCONTEXT, %g3
+
+	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = pgsz | sec-ctx */
+	set     CTXREG_CTX_MASK, %g4
+	and	%g5, %g4, %g5			/* %g5 = sec-ctx */
+	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
+	ble,pn	%xcc, 0f			/* yes, no need to change */
 	  mov	MMU_PCONTEXT, %g7
-	ldxa	[%g7]ASI_MMU_CTX, %g4		/* get pri-ctx */
-	and	%g4, %g6, %g4
-	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid ctx */
+
+	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
 	membar	#Sync
-	cmp	%g1, %g4			/* is it the victim? */
-	bne 	%icc, 2f			/* nope, no need to change it */
+
+0:
+	ldxa	[%g7]ASI_MMU_CTX, %g5		/* get pgz | pri-ctx */
+	and     %g5, %g4, %g5			/* %g5 = pri-ctx */
+	cmp	%g5, INVALID_CONTEXT		/* kernel ctx or invald ctx? */
+	ble,pn	%xcc, 2f			/* yes, no need to change */
+	  nop
+
+	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
+	retry
+
+1:
+	/* %g3 = MMU_SCONTEXT	*/
+	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
+	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
+
+	cmp	%g5, %g1			/* hat toBe-invalid running? */
+	bne,pt	%xcc, 2f
 	  nop
-	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid ctx */
+
+	ldxa    [%g3]ASI_MMU_CTX, %g5           /* %g5 = pgsz | sec-ctx */
+	set     CTXREG_CTX_MASK, %g4
+	and     %g5, %g4, %g5			/* %g5 = sec-ctx */
+	cmp     %g5, INVALID_CONTEXT            /* kernel  or invalid ctx ? */
+	ble,pn  %xcc, 0f                        /* yes, no need to change */
+	  mov	MMU_PCONTEXT, %g7
+
+	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid */
+	membar	#Sync
+
+0:
+	ldxa	[%g7]ASI_MMU_CTX, %g4		/* %g4 = pgsz | pri-ctx */
+	set     CTXREG_CTX_MASK, %g6
+	and	%g4, %g6, %g4			/* %g4 = pri-ctx */
+	cmp	%g4, INVALID_CONTEXT		/* is pri-ctx the victim? */
+	ble 	%icc, 2f			/* no, no need to change it */
+	  nop
+	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid */
 	/* next instruction is retry so no membar sync */
 2:
 	retry
-	SET_SIZE(sfmmu_ctx_steal_tl1)
+	SET_SIZE(sfmmu_raise_tsb_exception)
 
-	ENTRY_NP(sfmmu_itlb_ld)
+	/*
+	 * %o0 = virtual address
+	 * %o1 = address of TTE to be loaded
+	 */
+	ENTRY_NP(sfmmu_itlb_ld_kva)
 	rdpr	%pstate, %o3
 #ifdef DEBUG
-	andcc	%o3, PSTATE_IE, %g0		! If interrupts already
-	bnz,pt %icc, 1f				!   disabled, panic
-	  nop
-
-	sethi	%hi(panicstr), %g1
-	ldx	[%g1 + %lo(panicstr)], %g1
-	tst	%g1
-	bnz,pt	%icc, 1f
-	  nop
-	
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	 or	%o0, %lo(sfmmu_panic1), %o0
-1:
+	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1)
 #endif /* DEBUG */
 	wrpr	%o3, PSTATE_IE, %pstate		! Disable interrupts
 	srln	%o0, MMU_PAGESHIFT, %o0
 	slln	%o0, MMU_PAGESHIFT, %o0		! Clear page offset
-	or	%o0, %o1, %o0
-	ldx	[%o2], %g1
+
+	ldx	[%o1], %g1
 	set	MMU_TAG_ACCESS, %o5
 #ifdef	CHEETAHPLUS_ERRATUM_34
 	!
@@ -241,7 +245,7 @@
 	call    panic                           ! found no unlocked TTE so
 	  or    %o0, %lo(sfmmu_panic5), %o0     ! give up.
 
-	
+
 2:
 	!
 	! We have found an unlocked or non-valid entry; we'll explicitly load
@@ -252,7 +256,7 @@
 	stxa	%g1, [%g3]ASI_ITLB_ACCESS
 	flush	%o1				! Flush required for I-MMU
 	ba	3f				! Delay slot of ba is empty
-	nop					!   per Erratum 64
+	  nop					!   per Erratum 64
 
 0:
 #endif	/* CHEETAHPLUS_ERRATUM_34 */
@@ -263,7 +267,7 @@
 3:
 	retl
 	  wrpr	%g0, %o3, %pstate		! Enable interrupts
-	SET_SIZE(sfmmu_itlb_ld)
+	SET_SIZE(sfmmu_itlb_ld_kva)
 
 	/*
 	 * Load an entry into the DTLB.
@@ -272,36 +276,33 @@
 	 * are some TLB slots that are reserved for the kernel but not
 	 * always held locked.  We want to avoid loading locked TTEs
 	 * into those slots since they could be displaced.
+	 *
+	 * %o0 = virtual address
+	 * %o1 = address of TTE to be loaded
 	 */
-	ENTRY_NP(sfmmu_dtlb_ld)
+	ENTRY_NP(sfmmu_dtlb_ld_kva)
 	rdpr	%pstate, %o3
 #ifdef DEBUG
-	andcc	%o3, PSTATE_IE, %g0		! if interrupts already
-	bnz,pt	%icc, 1f			! disabled, panic
-	  nop
-
-	sethi	%hi(panicstr), %g1
-	ldx	[%g1 + %lo(panicstr)], %g1
-	tst	%g1
-	bnz,pt	%icc, 1f
-	  nop
-
-	sethi	%hi(sfmmu_panic1), %o0
-	call	panic
-	 or	%o0, %lo(sfmmu_panic1), %o0
-1:
+	PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1)
 #endif /* DEBUG */
 	wrpr	%o3, PSTATE_IE, %pstate		! disable interrupts
 	srln	%o0, MMU_PAGESHIFT, %o0
 	slln	%o0, MMU_PAGESHIFT, %o0		! clear page offset
-	or	%o0, %o1, %o0			! or in ctx to form tagacc
-	ldx	[%o2], %g1
-	sethi	%hi(ctx_pgsz_array), %o2	! Check for T8s
-	ldn	[%o2 + %lo(ctx_pgsz_array)], %o2
+
+	ldx	[%o1], %g1
+
+	set	MMU_TAG_ACCESS, %o5
+	
+	set	cpu_impl_dual_pgsz, %o2
+	ld	[%o2], %o2
 	brz	%o2, 1f
-	set	MMU_TAG_ACCESS, %o5
-	ldub	[%o2 + %o1], %o2		! Cheetah+: set up tag access
-	sll	%o2, TAGACCEXT_SHIFT, %o2	! extension register so entry
+	  nop
+
+	sethi	%hi(ksfmmup), %o2
+	ldx	[%o2 + %lo(ksfmmup)], %o2
+	ldub    [%o2 + SFMMU_CEXT], %o2
+        sll     %o2, TAGACCEXT_SHIFT, %o2
+
 	set	MMU_TAG_ACCESS_EXT, %o4		! can go into T8 if unlocked
 	stxa	%o2,[%o4]ASI_DMMU
 	membar	#Sync
@@ -330,7 +331,7 @@
 	bz,pn	%icc, 4f			! If unlocked, go displace
 	  nop
 	sub	%g3, (1 << 3), %g3		! Decrement idx
-	brgez	%g3, 3b			
+	brgez	%g3, 3b
 	  nop
 	sethi	%hi(sfmmu_panic5), %o0		! We searched all entries and
 	call	panic				! found no unlocked TTE so
@@ -345,7 +346,7 @@
 	membar	#Sync
 	retl
 	  wrpr	%g0, %o3, %pstate		! enable interrupts
-	SET_SIZE(sfmmu_dtlb_ld)
+	SET_SIZE(sfmmu_dtlb_ld_kva)
 
 	ENTRY_NP(sfmmu_getctx_pri)
 	set	MMU_PCONTEXT, %o0
@@ -358,45 +359,43 @@
 	set	CTXREG_CTX_MASK, %o1
 	ldxa	[%o0]ASI_MMU_CTX, %o0
 	retl
-	and	%o0, %o1, %o0
+	  and	%o0, %o1, %o0
 	SET_SIZE(sfmmu_getctx_sec)
 
 	/*
 	 * Set the secondary context register for this process.
-	 * %o0 = context number for this process.
+	 * %o0 = page_size | context number for this process.
 	 */
 	ENTRY_NP(sfmmu_setctx_sec)
 	/*
 	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
 	 * But we can also get called from C with interrupts enabled. So,
-	 * we need to check first. Also, resume saves state in %o3 and %o5
-	 * so we can't use those registers here.
+	 * we need to check first.
 	 */
 
 	/* If interrupts are not disabled, then disable them */
 	rdpr	%pstate, %g1
 	btst	PSTATE_IE, %g1
 	bnz,a,pt %icc, 1f
-	wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
+	  wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
+
 1:
 	mov	MMU_SCONTEXT, %o1
-	sethi	%hi(ctx_pgsz_array), %g2
-	ldn	[%g2 + %lo(ctx_pgsz_array)], %g2
-	brz	%g2, 2f
-	nop
-	ldub	[%g2 + %o0], %g2
-	sll	%g2, CTXREG_EXT_SHIFT, %g2
-	or	%g2, %o0, %o0
-2:
+
 	sethi	%hi(FLUSH_ADDR), %o4
 	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
 	flush	%o4
 
+	/*
+	 * if the routine was entered with intr enabled, then enable intr now.
+	 * otherwise, keep intr disabled, return without enabing intr.
+	 * %g1 - old intr state
+	 */
 	btst	PSTATE_IE, %g1
-	bnz,a,pt %icc, 1f
-	wrpr	%g0, %g1, %pstate		/* enable interrupts */
-1:	retl
-	nop
+	bnz,a,pt %icc, 2f
+	  wrpr	%g0, %g1, %pstate		/* enable interrupts */
+2:	retl
+	  nop
 	SET_SIZE(sfmmu_setctx_sec)
 
 	/*
@@ -432,25 +431,17 @@
 	 * %o0 - hat pointer
 	 */
 	ENTRY_NP(sfmmu_load_mmustate)
-	/*
-	 * From resume we call sfmmu_load_mmustate with interrupts disabled.
-	 * But we can also get called from C with interrupts enabled. So,
-	 * we need to check first. Also, resume saves state in %o5 and we
-	 * can't use this register here.
-	 */
 
-	sethi	%hi(ksfmmup), %o3
+#ifdef DEBUG
+	PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1)
+#endif /* DEBUG */
+
+ 	sethi	%hi(ksfmmup), %o3
 	ldx	[%o3 + %lo(ksfmmup)], %o3
 	cmp	%o3, %o0
 	be,pn	%xcc, 3f			! if kernel as, do nothing
 	  nop
 
-	/* If interrupts are not disabled, then disable them */
-	rdpr	%pstate, %g1
-	btst	PSTATE_IE, %g1
-	bnz,a,pt %icc, 1f
-	wrpr	%g1, PSTATE_IE, %pstate		! disable interrupts
-1:
 	/*
 	 * We need to set up the TSB base register, tsbmiss
 	 * area, and load locked TTE(s) for the TSB.
@@ -477,7 +468,7 @@
 	brz,pt  %g2, 4f
 	  nop
 	/*
-	 * We have a second TSB for this process, so we need to 
+	 * We have a second TSB for this process, so we need to
 	 * encode data for both the first and second TSB in our single
 	 * TSB base register.  See hat_sfmmu.h for details on what bits
 	 * correspond to which TSB.
@@ -522,11 +513,8 @@
 	stx	%o0, [%o2 + TSBMISS_UHATID]
 	stuh	%o3, [%o2 + TSBMISS_HATFLAGS]
 
-	btst	PSTATE_IE, %g1
-	bnz,a,pt %icc, 3f
-	wrpr	%g0, %g1, %pstate		! enable interrupts
 3:	retl
-	nop
+	  nop
 	SET_SIZE(sfmmu_load_mmustate)
 
 #endif /* lint */
@@ -624,7 +612,7 @@
 	dec	%l3
 	stb	%l3, [THREAD_REG + T_PREEMPT]
 	ret
-	restore
+	  restore
 	SET_SIZE(sfmmu_inv_tsb_fast)
 
 #endif /* lint */
@@ -656,12 +644,12 @@
 
 	ENTRY(prefetch_tsbe_read)
 	retl
-	prefetch	[%o0+448], #n_reads
+	  prefetch	[%o0+448], #n_reads
 	SET_SIZE(prefetch_tsbe_read)
 
 	ENTRY(prefetch_tsbe_write)
 	retl
-	prefetch	[%o0], #n_writes
+	  prefetch	[%o0], #n_writes
 	SET_SIZE(prefetch_tsbe_write)
 #endif /* lint */
 
--- a/usr/src/uts/sun4v/cpu/common_asm.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4v/cpu/common_asm.s	Tue Jun 20 07:21:09 2006 -0700
@@ -77,6 +77,7 @@
 
 #include <sys/asm_linkage.h>
 #include <sys/privregs.h>
+#include <vm/hat_sfmmu.h>
 #include <sys/machparam.h>	/* To get SYSBASE and PAGESIZE */
 #include <sys/machthread.h>
 #include <sys/clock.h>
@@ -118,12 +119,12 @@
 
 	cmp	%o2, %o0		! If the value we wrote was in the
 	bg,pt	%xcc, 2f		!   future, then blow out of here.
-	sllx	%o3, 1, %o3		! If not, then double our step size,
+	  sllx	%o3, 1, %o3		! If not, then double our step size,
 	ba,pt	%xcc, 1b		!   and take another lap.
-	add	%o0, %o3, %o2		!
+	  add	%o0, %o3, %o2		!
 2:
 	retl
-	nop
+	  nop
 	SET_SIZE(tickcmpr_set)
 
 #endif  /* lint */
@@ -141,7 +142,7 @@
 	sllx	%g1, TICKINT_DIS_SHFT, %o0
 	WR_TICKCMPR(%o0,%o4,%o5,__LINE__)	! Write to TICK_CMPR
 	retl
-	nop
+	  nop
 	SET_SIZE(tickcmpr_disable)
 
 #endif
@@ -174,10 +175,10 @@
 	sethi	%hi(tick_write_delta_panic), %o1
         save    %sp, -SA(MINFRAME), %sp ! get a new window to preserve caller
 	call	panic
-	or	%i1, %lo(tick_write_delta_panic), %o0
+	  or	%i1, %lo(tick_write_delta_panic), %o0
 	/*NOTREACHED*/
 	retl
-	nop
+	  nop
 #endif
 
 #if defined(lint)
@@ -194,7 +195,7 @@
 	ENTRY_NP(tickcmpr_disabled)
 	RD_TICKCMPR(%g1, %o0)
 	retl
-	srlx	%g1, TICKINT_DIS_SHFT, %o0
+	  srlx	%g1, TICKINT_DIS_SHFT, %o0
 	SET_SIZE(tickcmpr_disabled)
 
 #endif  /* lint */
@@ -213,7 +214,7 @@
 	ENTRY(gettick)
 	GET_NATIVE_TIME(%o0, %o2, %o3)
 	retl
-	nop
+	  nop
 	SET_SIZE(gettick)
 
 #endif  /* lint */
@@ -235,7 +236,7 @@
 	rdpr	%tick, %o0
 	sllx	%o0, 1, %o0
 	retl
-	srlx	%o0, 1, %o0		! shake off npt bit
+	  srlx	%o0, 1, %o0		! shake off npt bit
 	SET_SIZE(gettick_counter)
 #endif	/* lint */
 
@@ -307,13 +308,13 @@
 	GET_HRTIME(%g1, %o0, %o1, %o2, %o3, %o4, %o5, %g2)
 							! %g1 = hrtime
 	retl
-	mov	%g1, %o0
+	  mov	%g1, %o0
 	SET_SIZE(gethrtime)
 
 	ENTRY_NP(gethrtime_unscaled)
 	GET_NATIVE_TIME(%g1, %o2, %o3)			! %g1 = native time
 	retl
-	mov	%g1, %o0
+	  mov	%g1, %o0
 	SET_SIZE(gethrtime_unscaled)
 
 	ENTRY_NP(gethrtime_waitfree)
@@ -321,7 +322,7 @@
 	GET_NATIVE_TIME(%g1, %o2, %o3)			! %g1 = native time
 	NATIVE_TIME_TO_NSEC(%g1, %o2, %o3)
 	retl
-	mov	%g1, %o0
+	  mov	%g1, %o0
 	SET_SIZE(dtrace_gethrtime)
 	SET_SIZE(gethrtime_waitfree)
 
@@ -332,17 +333,17 @@
 	! hrtime_t's are signed, max hrtime_t must be positive
 	mov	-1, %o2
 	brlz,a	%g1, 1f
-	srlx	%o2, 1, %g1
+	  srlx	%o2, 1, %g1
 1:
 	retl
-	mov	%g1, %o0
+	  mov	%g1, %o0
 	SET_SIZE(gethrtime_max)
 
 	ENTRY(scalehrtime)
 	ldx	[%o0], %o1
 	NATIVE_TIME_TO_NSEC(%o1, %o2, %o3)
 	retl
-	stx	%o1, [%o0]
+	  stx	%o1, [%o0]
 	SET_SIZE(scalehrtime)
 
 /*
@@ -369,18 +370,18 @@
 	brz,pt	adj, 3f;		/* no adjustments, it's easy */	\
 	add	hrestnsec, nslt, hrestnsec; /* hrest.tv_nsec += nslt */	\
 	brlz,pn	adj, 2f;		/* if hrestime_adj negative */	\
-	srl	nslt, ADJ_SHIFT, nslt;	/* delay: nslt >>= 4 */		\
+	  srl	nslt, ADJ_SHIFT, nslt;	/* delay: nslt >>= 4 */		\
 	subcc	adj, nslt, %g0;		/* hrestime_adj - nslt/16 */	\
 	movg	%xcc, nslt, adj;	/* adj by min(adj, nslt/16) */	\
 	ba	3f;			/* go convert to sec/nsec */	\
-	add	hrestnsec, adj, hrestnsec; /* delay: apply adjustment */ \
+	  add	hrestnsec, adj, hrestnsec; /* delay: apply adjustment */ \
 2:	addcc	adj, nslt, %g0;		/* hrestime_adj + nslt/16 */	\
 	bge,a,pt %xcc, 3f;		/* is adj less negative? */	\
-	add	hrestnsec, adj, hrestnsec; /* yes: hrest.nsec += adj */	\
+	  add	hrestnsec, adj, hrestnsec; /* yes: hrest.nsec += adj */	\
 	sub	hrestnsec, nslt, hrestnsec; /* no: hrest.nsec -= nslt/16 */ \
 3:	cmp	hrestnsec, nano;	/* more than a billion? */	\
 	bl,pt	%xcc, 4f;		/* if not, we're done */	\
-	nop;				/* delay: do nothing :( */	\
+	  nop;				/* delay: do nothing :( */	\
 	add	hrestsec, 1, hrestsec;	/* hrest.tv_sec++; */		\
 	sub	hrestnsec, nano, hrestnsec; /* hrest.tv_nsec -= NANOSEC; */	\
 4:
@@ -390,7 +391,7 @@
 	CONV_HRESTIME(%o1, %o2, %o3, %o4, %o5)
 	stn	%o1, [%o0]
 	retl
-	stn	%o2, [%o0 + CLONGSIZE]
+	  stn	%o2, [%o0 + CLONGSIZE]
 	SET_SIZE(gethrestime)
 
 /*
@@ -401,7 +402,7 @@
 	GET_HRESTIME(%o0, %o2, %o3, %o4, %o5, %g1, %g2, %g3, %g4)
 	CONV_HRESTIME(%o0, %o2, %o3, %o4, %o5)
 	retl					! %o0 current hrestime seconds
-	nop
+	  nop
 	SET_SIZE(gethrestime_sec)
 
 /*
@@ -425,9 +426,9 @@
 	lduw	[%o1 + %lo(hres_lock)], %o3	! Reload lock value
 	cmp	%o3, %o2			! If lock is locked or has
 	bne	0b				!   changed, retry.
-	stn	%g1, [%o0]			! Delay: store seconds
+	  stn	%g1, [%o0]			! Delay: store seconds
 	retl
-	stn	%g2, [%o0 + CLONGSIZE]		! Delay: store nanoseconds
+	  stn	%g2, [%o0 + CLONGSIZE]		! Delay: store nanoseconds
 	SET_SIZE(gethrestime_lasttick)
 
 /*
@@ -496,13 +497,13 @@
 	ldstub	[%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5	! try locking
 7:	tst	%l5
 	bz,pt	%xcc, 8f			! if we got it, drive on
-	ld	[%l4 + %lo(nsec_scale)], %l5	! delay: %l5 = scaling factor
+	  ld	[%l4 + %lo(nsec_scale)], %l5	! delay: %l5 = scaling factor
 	ldub	[%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
 9:	tst	%l5
 	bz,a,pn	%xcc, 7b
-	ldstub	[%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
+	  ldstub	[%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
 	ba,pt	%xcc, 9b
-	ldub	[%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
+	  ldub	[%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
 8:
 	membar	#StoreLoad|#StoreStore
 
@@ -520,7 +521,7 @@
 	ldx	[%l4 + %lo(hrtime_base)], %l1	
 	cmp	%l1, %l0
 	bg,pn	%xcc, 9f
-	nop
+	  nop
 
 	stx	%l0, [%l4 + %lo(hrtime_base)]	! update hrtime_base
 
@@ -531,20 +532,20 @@
 	brz	%l0, 2f
 						! hrestime_adj == 0 ?
 						! yes, skip adjustments
-	clr	%l5				! delay: set adj to zero
+	  clr	%l5				! delay: set adj to zero
 	tst	%l0				! is hrestime_adj >= 0 ?
 	bge,pt	%xcc, 1f			! yes, go handle positive case
-	srl	%i1, ADJ_SHIFT, %l5		! delay: %l5 = adj
+	  srl	%i1, ADJ_SHIFT, %l5		! delay: %l5 = adj
 
 	addcc	%l0, %l5, %g0			! hrestime_adj < -adj ?
 	bl,pt	%xcc, 2f			! yes, use current adj
-	neg	%l5				! delay: %l5 = -adj
+	  neg	%l5				! delay: %l5 = -adj
 	ba,pt	%xcc, 2f
-	mov	%l0, %l5			! no, so set adj = hrestime_adj
+	  mov	%l0, %l5			! no, so set adj = hrestime_adj
 1:
 	subcc	%l0, %l5, %g0			! hrestime_adj < adj ?
 	bl,a,pt	%xcc, 2f			! yes, set adj = hrestime_adj
-	mov	%l0, %l5			! delay: adj = hrestime_adj
+	  mov	%l0, %l5			! delay: adj = hrestime_adj
 2:
 	ldx	[%l4 + %lo(timedelta)], %l0	! %l0 = timedelta
 	sub	%l0, %l5, %l0			! timedelta -= adj
@@ -561,7 +562,7 @@
 	set	NANOSEC, %l5			! %l5 = NANOSEC
 	cmp	%i3, %l5
 	bl,pt	%xcc, 5f			! if hrestime.tv_nsec < NANOSEC
-	sethi	%hi(one_sec), %i1		! delay
+	  sethi	%hi(one_sec), %i1		! delay
 	add	%i2, 0x1, %i2			! hrestime.tv_sec++
 	sub	%i3, %l5, %i3			! hrestime.tv_nsec - NANOSEC
 	mov	0x1, %l5
@@ -589,7 +590,7 @@
 
 	sethi	%hi(hrtime_base_panic), %o0
 	call	panic
-	or	%o0, %lo(hrtime_base_panic), %o0
+	  or	%o0, %lo(hrtime_base_panic), %o0
 
 	SET_SIZE(hres_tick)
 
@@ -605,7 +606,7 @@
 	save	%sp, -SA(MINFRAME), %sp
 	sethi	%hi(kstat_q_panic_msg), %o0
 	call	panic
-	or	%o0, %lo(kstat_q_panic_msg), %o0
+	  or	%o0, %lo(kstat_q_panic_msg), %o0
 	/*NOTREACHED*/
 	SET_SIZE(kstat_q_panic)
 
@@ -750,7 +751,7 @@
 	ENTRY(drv_usecwait)
 	ALTENTRY(usec_delay)
 	brlez,a,pn %o0, 0f
-	mov	1, %o0
+	  mov	1, %o0
 0:
 	sethi	%hi(sticks_per_usec), %o1
 	lduw	[%o1 + %lo(sticks_per_usec)], %o1
@@ -762,9 +763,9 @@
 1:	cmp	%o1, %o2
 	GET_NATIVE_TIME(%o2, %o3, %o4)
 	bgeu,pt	%xcc, 1b
-	nop
+	  nop
 	retl
-	nop
+	  nop
 	SET_SIZE(usec_delay)
 	SET_SIZE(drv_usecwait)
 #endif	/* lint */
@@ -789,12 +790,12 @@
 	rdpr	%tpc, %g5
 	btst	TSTATE_PRIV, %g6		! trap from supervisor mode?
 	bnz,a,pt %xcc, 1f
-	stn	%g5, [%g1 + CPU_PROFILE_PC]	! if so, record kernel PC
+	  stn	%g5, [%g1 + CPU_PROFILE_PC]	! if so, record kernel PC
 	stn	%g5, [%g1 + CPU_PROFILE_UPC]	! if not, record user PC
 	ba	pil_interrupt_common		! must be large-disp branch
-	stn	%g0, [%g1 + CPU_PROFILE_PC]	! zero kernel PC
+	  stn	%g0, [%g1 + CPU_PROFILE_PC]	! zero kernel PC
 1:	ba	pil_interrupt_common		! must be large-disp branch
-	stn	%g0, [%g1 + CPU_PROFILE_UPC]	! zero user PC
+	  stn	%g0, [%g1 + CPU_PROFILE_UPC]	! zero user PC
 	SET_SIZE(pil14_interrupt)
 
 	ENTRY_NP(tick_rtt)
@@ -813,7 +814,7 @@
 	RD_TICKCMPR(%o5, %g1)
 	srlx	%o5, TICKINT_DIS_SHFT, %g1
 	brnz,pt	%g1, 2f
-	nop
+	  nop
 
 	rdpr 	%pstate, %g5
 	andn	%g5, PSTATE_IE, %g1
@@ -822,14 +823,14 @@
 	sethi	%hi(cbe_level14_inum), %o1
 	ld	[%o1 + %lo(cbe_level14_inum)], %o1
 	call	intr_enqueue_req ! preserves %o5 and %g5
-	mov	PIL_14, %o0
+	  mov	PIL_14, %o0
 
 	! Check SOFTINT for TICKINT/STICKINT
 	rd	SOFTINT, %o4
 	set	(TICK_INT_MASK | STICK_INT_MASK), %o0
 	andcc	%o4, %o0, %g0
 	bz,a,pn	%icc, 2f
-	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
+	  wrpr	%g0, %g5, %pstate		! Enable vec interrupts
 
 	! clear TICKINT/STICKINT
 	wr	%o0, CLEAR_SOFTINT
@@ -844,7 +845,7 @@
 	srlx	%o0, 1, %o0
 	cmp	%o5, %o0			! In the future?
 	bg,a,pt	%xcc, 2f			! Yes, drive on.
-	wrpr	%g0, %g5, %pstate		!   delay: enable vec intr
+	  wrpr	%g0, %g5, %pstate		!   delay: enable vec intr
 
 	!
 	! If we're here, then we have programmed TICK_COMPARE with a %tick
@@ -859,12 +860,12 @@
 	srlx	%o0, 1, %o0
 	cmp	%o5, %o0			! In the future?
 	bg,a,pt	%xcc, 2f			! Yes, drive on.
-	wrpr	%g0, %g5, %pstate		!    delay: enable vec intr
+	  wrpr	%g0, %g5, %pstate		!    delay: enable vec intr
 	ba	1b				! No, try again.
-	sllx	%o4, 1, %o4			!    delay: double step size
+	  sllx	%o4, 1, %o4			!    delay: double step size
 
 2:	ba	current_thread_complete
-	nop
+	  nop
 	SET_SIZE(tick_rtt)
 
 #endif /* lint */
@@ -888,12 +889,12 @@
 /* XXXQ These should be inline templates, not functions */
         ENTRY(prefetch_page_w)
         retl
-	nop
+	  nop
         SET_SIZE(prefetch_page_w)
 
         ENTRY(prefetch_page_r)
         retl
-	nop
+	  nop
         SET_SIZE(prefetch_page_r)
 
 #endif	/* lint */
@@ -911,7 +912,7 @@
 /* XXXQ These should be inline templates, not functions */
 	ENTRY(prefetch_smap_w)
 	retl
-	nop
+	  nop
 	SET_SIZE(prefetch_smap_w)
 
 #endif	/* lint */
@@ -924,12 +925,7 @@
 
 /*ARGSUSED*/
 void
-vtag_flushpage(caddr_t vaddr, uint_t ctxnum)
-{}
-
-/*ARGSUSED*/
-void
-vtag_flushctx(uint_t ctxnum)
+vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
 {}
 
 /*ARGSUSED*/
@@ -944,17 +940,12 @@
 
 /*ARGSUSED*/
 void
-vtag_flushpage_tl1(uint64_t vaddr, uint64_t ctxnum)
+vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
 {}
 
 /*ARGSUSED*/
 void
-vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t ctx_pgcnt)
-{}
-
-/*ARGSUSED*/
-void
-vtag_flushctx_tl1(uint64_t ctxnum, uint64_t dummy)
+vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
 {}
 
 /*ARGSUSED*/
@@ -984,40 +975,22 @@
 	 * flush page from the tlb
 	 *
 	 * %o0 = vaddr
-	 * %o1 = ctxnum
+	 * %o1 = sfmmup
 	 */
+	SFMMU_CPU_CNUM(%o1, %g1, %g2)   /* %g1 = sfmmu cnum on this CPU */
+
+	mov	%g1, %o1 
 	mov	MAP_ITLB | MAP_DTLB, %o2
 	ta	MMU_UNMAP_ADDR
 	brz,pt	%o0, 1f
-	nop
+	  nop
 	ba	panic_bad_hcall
-	mov	MMU_UNMAP_ADDR, %o1
+	  mov	MMU_UNMAP_ADDR, %o1
 1:
  	retl
-	nop
+	  nop
 	SET_SIZE(vtag_flushpage)
 
-	ENTRY_NP(vtag_flushctx)
-	/*
-	 * flush context from the tlb
-	 *
-	 * %o0 = ctxnum
-	 */
-	mov	%o0, %o2
-	mov	%g0, %o0	! XXXQ no cpu list yet
-	mov	%g0, %o1	! XXXQ no cpu list yet
-	mov	MAP_ITLB | MAP_DTLB, %o3
-	mov	MMU_DEMAP_CTX, %o5
-	ta	FAST_TRAP
-	brz,pt	%o0, 1f
-	nop
-	ba	panic_bad_hcall
-	mov	MMU_DEMAP_CTX, %o1
-1:
-	retl
-	  nop
-	SET_SIZE(vtag_flushctx)
-
 	ENTRY_NP(vtag_flushall)
 	mov	%g0, %o0	! XXX no cpu list yet
 	mov	%g0, %o1	! XXX no cpu list yet
@@ -1025,19 +998,19 @@
 	mov	MMU_DEMAP_ALL, %o5
 	ta	FAST_TRAP
 	brz,pt	%o0, 1f
-	nop
+	  nop
 	ba	panic_bad_hcall
-	mov	MMU_DEMAP_ALL, %o1
+	  mov	MMU_DEMAP_ALL, %o1
 1:
 	retl
-	nop
+	  nop
 	SET_SIZE(vtag_flushall)
 
 	ENTRY_NP(vtag_unmap_perm_tl1)
 	/*
 	 * x-trap to unmap perm map entry
 	 * %g1 = vaddr
-	 * %g2 = ctxnum
+	 * %g2 = ctxnum (KCONTEXT only)
 	 */
 	mov	%o0, %g3
 	mov	%o1, %g4
@@ -1073,7 +1046,7 @@
 	 * x-trap to flush page from tlb and tsb
 	 *
 	 * %g1 = vaddr, zero-extended on 32-bit kernel
-	 * %g2 = ctxnum
+	 * %g2 = sfmmup
 	 *
 	 * assumes TSBE_TAG = 0
 	 */
@@ -1082,13 +1055,15 @@
 	mov	%o0, %g3
 	mov	%o1, %g4
 	mov	%o2, %g5
-	mov	%g1, %o0			! vaddr
-	mov	%g2, %o1			! ctx
+	mov	%g1, %o0			/* vaddr */
+
+	SFMMU_CPU_CNUM(%g2, %o1, %g6)   /* %o1 = sfmmu cnum on this CPU */
+
 	mov	MAP_ITLB | MAP_DTLB, %o2
 	ta	MMU_UNMAP_ADDR
 	brz,pt	%o0, 1f
 	nop
-	ba	ptl1_panic
+	  ba	ptl1_panic
 	mov	PTL1_BAD_HCALL, %g1
 1:
 	mov	%g5, %o2
@@ -1103,7 +1078,7 @@
 	 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
 	 *
 	 * %g1 = vaddr, zero-extended on 32-bit kernel
-	 * %g2 = <zero32|ctx16|pgcnt16>
+	 * %g2 = <sfmmup58|pgcnt6>, (pgcnt - 1) is pass'ed in via pgcnt6 bits.
 	 *
 	 * NOTE: this handler relies on the fact that no
 	 *	interrupts or traps can occur during the loop
@@ -1119,24 +1094,28 @@
 	mov	%o1, %g4
 	mov	%o2, %g5
 
-	set	0xffff, %g6
-	and	%g6, %g2, %g7			/* g7 = pgcnt */
-	srln	%g2, 16, %g2			/* g2 = ctxnum */
+	and	%g2, SFMMU_PGCNT_MASK, %g7	/* g7 = pgcnt - 1 */
+	add	%g7, 1, %g7			/* g7 = pgcnt */
+
+        andn    %g2, SFMMU_PGCNT_MASK, %o0      /* %o0 = sfmmup */
 
-	set	MMU_PAGESIZE, %g6		/* g2 = pgsize */
-1:
-	mov	%g1, %o0			! vaddr
-	mov	%g2, %o1			! ctx
+	SFMMU_CPU_CNUM(%o0, %g2, %g6)    /* %g2 = sfmmu cnum on this CPU */
+
+	set	MMU_PAGESIZE, %g6		/* g6 = pgsize */
+
+1:	
+	mov	%g1, %o0			/* vaddr */
+	mov	%g2, %o1			/* cnum */
 	mov	MAP_ITLB | MAP_DTLB, %o2
 	ta	MMU_UNMAP_ADDR
 	brz,pt	%o0, 2f
-	nop
+	  nop
 	ba	ptl1_panic
-	mov	PTL1_BAD_HCALL, %g1
+	  mov	PTL1_BAD_HCALL, %g1
 2:
 	deccc	%g7				/* decr pgcnt */
 	bnz,pt	%icc,1b
-	add	%g1, %g6, %g1			/* go to nextpage */
+	  add	%g1, %g6, %g1			/* go to nextpage */
 
 	mov	%g5, %o2
 	mov	%g4, %o1
@@ -1145,37 +1124,6 @@
 	retry
 	SET_SIZE(vtag_flush_pgcnt_tl1)
 
-	ENTRY_NP(vtag_flushctx_tl1)
-	/*
-	 * x-trap to flush context from tlb
-	 *
-	 * %g1 = ctxnum
-	 */
-	mov	%o0, %g3
-	mov	%o1, %g4
-	mov	%o2, %g5
-	mov	%o3, %g6
-	mov	%o5, %g7
-	mov	%g1, %o2
-	mov	%g0, %o0	! XXXQ no cpu list yet
-	mov	%g0, %o1	! XXXQ no cpu list yet
-	mov	MAP_ITLB | MAP_DTLB, %o3
-	mov	MMU_DEMAP_CTX, %o5
-	ta	FAST_TRAP
-	brz,pt	%o0, 1f
-	nop
-	ba	ptl1_panic
-	mov	PTL1_BAD_HCALL, %g1
-1:
-	mov	%g7, %o5
-	mov	%g6, %o3
-	mov	%g5, %o2
-	mov	%g4, %o1
-	mov	%g3, %o0
-	membar #Sync
-	retry
-	SET_SIZE(vtag_flushctx_tl1)
-
 	! Not implemented on US1/US2
 	ENTRY_NP(vtag_flushall_tl1)
 	mov	%o0, %g3
@@ -1189,9 +1137,9 @@
 	mov	MMU_DEMAP_ALL, %o5
 	ta	FAST_TRAP
 	brz,pt	%o0, 1f
-	nop
+	  nop
 	ba	ptl1_panic
-	mov	PTL1_BAD_HCALL, %g1
+	  mov	PTL1_BAD_HCALL, %g1
 1:
 	mov	%g7, %o5
 	mov	%g6, %o3	! XXXQ not used?
@@ -1220,7 +1168,7 @@
 	 */
 	! XXXQ
 	retl
-	nop
+	  nop
 	SET_SIZE(vac_flushpage)
 
 	ENTRY_NP(vac_flushpage_tl1)
@@ -1264,10 +1212,10 @@
 	flush	%o0
 	subcc	%o1, ICACHE_FLUSHSZ, %o1		! bytes = bytes-0x20
 	bgu,pt	%ncc, 1b
-	add	%o0, ICACHE_FLUSHSZ, %o0		! vaddr = vaddr+0x20
+	  add	%o0, ICACHE_FLUSHSZ, %o0		! vaddr = vaddr+0x20
 
 	retl
-	nop
+	  nop
 	SET_SIZE(flush_instr_mem)
 
 #endif /* !lint */
--- a/usr/src/uts/sun4v/ml/mach_offsets.in	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4v/ml/mach_offsets.in	Tue Jun 20 07:21:09 2006 -0700
@@ -134,12 +134,14 @@
 htrap_trace_record	HTRAP_ENT_SIZE
 
 hat	HAT_SIZE
-	sfmmu_cnum
 	sfmmu_cpusran
 	sfmmu_tsb
 	sfmmu_ismblkpa
 	sfmmu_flags
 	sfmmu_hvblock
+	sfmmu_cext
+	sfmmu_ctx_lock
+	sfmmu_ctxs
 
 sfmmu_global_stat HATSTAT_SIZE
 	sf_pagefaults		HATSTAT_PAGEFAULT
@@ -148,9 +150,6 @@
 	sf_khash_searches	HATSTAT_KHASH_SEARCH
 	sf_khash_links		HATSTAT_KHASH_LINKS
 
-ctx	CTX_SIZE		CTX_SZ_SHIFT
-	ctx_un.ctx_sfmmup	CTX_SFMMUP
-
 sf_hment	SFHME_SIZE	SFHME_SHIFT
 	hme_tte		SFHME_TTE
 
--- a/usr/src/uts/sun4v/os/fillsysinfo.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4v/os/fillsysinfo.c	Tue Jun 20 07:21:09 2006 -0700
@@ -339,17 +339,11 @@
 	}
 
 	/*
-	 * Get the valid contexts, mmu page sizes mask, Q sizes and isalist/r
+	 * Get the valid mmu page sizes mask, Q sizes and isalist/r
 	 * from the MD for the first available CPU in cpulist.
+	 *
+	 * Do not expect the MMU page sizes mask to be more than 32-bit.
 	 */
-
-	if (nctxs == 0)
-		nctxs = (uint_t)(1 << get_mmu_ctx_bits(mdp, cpulist[0]));
-
-	if (nctxs > MAX_NCTXS)
-		nctxs = MAX_NCTXS;
-
-	/* Do not expect the MMU page sizes mask to be more than 32-bit. */
 	mmu_exported_pagesize_mask = (int)get_cpu_pagesizes(mdp, cpulist[0]);
 
 	for (i = 0; i < nocpus; i++)
--- a/usr/src/uts/sun4v/os/ppage.c	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4v/os/ppage.c	Tue Jun 20 07:21:09 2006 -0700
@@ -261,7 +261,7 @@
 {
 	ASSERT(*pslot == va);
 
-	vtag_flushpage(va, KCONTEXT);
+	vtag_flushpage(va, (uint64_t)ksfmmup);
 	*pslot = NULL;				/* release the slot */
 }
 
--- a/usr/src/uts/sun4v/sys/cpu_module.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4v/sys/cpu_module.h	Tue Jun 20 07:21:09 2006 -0700
@@ -69,12 +69,12 @@
 /*
  * virtual demap flushes (tlbs & virtual tag caches)
  */
-void	vtag_flushpage(caddr_t addr, uint_t ctx);
-void	vtag_flushctx(uint_t ctx);
+void	vtag_flushpage(caddr_t addr, uint64_t sfmmup);
 void	vtag_flushall(void);
-void	vtag_flushpage_tl1(uint64_t addr, uint64_t ctx);
-void	vtag_flush_pgcnt_tl1(uint64_t addr, uint64_t ctx_pgcnt);
-void	vtag_flushctx_tl1(uint64_t ctx, uint64_t dummy);
+#pragma weak vtag_flushall_uctxs
+void    vtag_flushall_uctxs(void);
+void	vtag_flushpage_tl1(uint64_t addr,  uint64_t sfmmup);
+void	vtag_flush_pgcnt_tl1(uint64_t addr, uint64_t sfmmup_pgcnt);
 void	vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2);
 void	vtag_unmap_perm_tl1(uint64_t addr, uint64_t ctx);
 
@@ -87,12 +87,6 @@
 void	vac_flushcolor_tl1(uint64_t color, uint64_t dummy);
 
 /*
- * Calculate, set optimal dtlb pagesize, for ISM and mpss, to support
- * cpus with non-fully-associative dtlbs.
- */
-extern uchar_t *ctx_pgsz_array;
-
-/*
  * flush instruction cache if needed
  */
 void	flush_instr_mem(caddr_t addr, size_t len);
--- a/usr/src/uts/sun4v/sys/machcpuvar.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4v/sys/machcpuvar.h	Tue Jun 20 07:21:09 2006 -0700
@@ -122,6 +122,11 @@
 	 * E$ data, which is needed for the specific cpu type.
 	 */
 	void		*cpu_private;		/* ptr to cpu private data */
+	/*
+	 * per-MMU ctxdom CPU data.
+	 */
+	uint_t		cpu_mmu_idx;
+	struct mmu_ctx	*cpu_mmu_ctxp;
 
 	ptl1_state_t	ptl1_state;
 
@@ -163,6 +168,8 @@
 typedef	struct machcpu	machcpu_t;
 
 #define	cpu_startup_thread	cpu_m.startup_thread
+#define	CPU_MMU_IDX(cp)		((cp)->cpu_m.cpu_mmu_idx)
+#define	CPU_MMU_CTXP(cp)	((cp)->cpu_m.cpu_mmu_ctxp)
 #define	NINTR_THREADS	(LOCK_LEVEL)	/* number of interrupt threads */
 
 /*
--- a/usr/src/uts/sun4v/sys/machparam.h	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4v/sys/machparam.h	Tue Jun 20 07:21:09 2006 -0700
@@ -300,6 +300,7 @@
 #define	PTL1_BAD_RED				17
 #define	PTL1_BAD_HCALL_UNMAP_PERM_EINVAL	18
 #define	PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP	19
+#define	PTL1_BAD_RAISE_TSBEXCP			20
 
 /*
  * Defines the max trap level allowed
--- a/usr/src/uts/sun4v/vm/mach_sfmmu_asm.s	Tue Jun 20 03:51:06 2006 -0700
+++ b/usr/src/uts/sun4v/vm/mach_sfmmu_asm.s	Tue Jun 20 07:21:09 2006 -0700
@@ -2,9 +2,8 @@
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -61,12 +60,7 @@
 
 /* ARGSUSED */
 void
-sfmmu_ctx_steal_tl1(uint64_t sctx, uint64_t rctx)
-{}
-
-/* ARGSUSED */
-void
-sfmmu_raise_tsb_exception(uint64_t sctx, uint64_t rctx)
+sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
 {}
 
 int
@@ -91,92 +85,115 @@
 #else	/* lint */
 
 /*
- * 1. If stealing ctx, flush all TLB entries whose ctx is ctx-being-stolen.
- * 2. If processor is running in the ctx-being-stolen, set the
- *    context to the resv context. That is 
- *    If processor in User-mode - pri/sec-ctx both set to ctx-being-stolen,
- *		change both pri/sec-ctx registers to resv ctx.
- *    If processor in Kernel-mode - pri-ctx is 0, sec-ctx is ctx-being-stolen,
- *		just change sec-ctx register to resv ctx. When it returns to
- *		kernel-mode, user_rtt will change pri-ctx.
+ * Invalidate either the context of a specific victim or any process
+ * currently running on this CPU. 
  *
- * Note: For multiple page size TLB, no need to set page sizes for
- *       DEMAP context.
- *
- * %g1 = ctx being stolen (victim)
- * %g2 = invalid ctx to replace victim with
+ * %g1 = sfmmup whose ctx is being stolen (victim)
+ *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT.
+ * Note %g1 is the only input argument used by this xcall handler.
  */
-	ENTRY(sfmmu_ctx_steal_tl1)
-	/*
-	 * Flush TLBs.
-	 */
+
+	ENTRY(sfmmu_raise_tsb_exception)
+	!
+	! if (victim == INVALID_CONTEXT) {
+	!	if (sec-ctx > INVALID_CONTEXT)
+	!		write INVALID_CONTEXT to sec-ctx
+	!	if (pri-ctx > INVALID_CONTEXT) 
+	!		write INVALID_CONTEXT to pri-ctx
+	!
+	! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
+	!	return
+	! } else {
+	!	if (sec-ctx > INVALID_CONTEXT)
+	!		write INVALID_CONTEXT to sec-ctx
+	!	
+	!	if (pri-ctx > INVALID_CONTEXT)
+	!		write INVALID_CONTEXT to pri-ctx
+	! }
+	!
 
-	/* flush context from the tlb via HV call */
+	sethi   %hi(ksfmmup), %g3
+	ldx	[%g3 + %lo(ksfmmup)], %g3
+	cmp	%g1, %g3
+	be,a,pn %xcc, ptl1_panic	/* can't invalidate kernel ctx */
+	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
+
+	set	INVALID_CONTEXT, %g2
+	
+	cmp	%g1, INVALID_CONTEXT
+	bne,pt	%xcc, 1f			/* called from wrap_around? */
+	  mov	MMU_SCONTEXT, %g3
+
+	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = sec-ctx */
+	cmp	%g5, INVALID_CONTEXT		/* kernel  or invalid ctx ? */
+	ble,pn	%xcc, 0f			/* yes, no need to change */
+	  mov	MMU_PCONTEXT, %g7
+	
+	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
+	membar	#Sync
+
+0:	
+	ldxa	[%g7]ASI_MMU_CTX, %g5		/* %g5 = pri-ctx */
+	cmp	%g5, INVALID_CONTEXT		/* kernel or invalid ctx? */
+	ble,pn	%xcc, 6f			/* yes, no need to change */
+	  nop
+
+	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid  */
+	membar	#Sync
+
+6:	/* flushall tlb */
 	mov	%o0, %g3
 	mov	%o1, %g4
-	mov	%o2, %g5
-	mov	%o3, %g6
+	mov	%o2, %g6 
 	mov	%o5, %g7
 
-	mov	%g1, %o2	! ctx#
-	mov	%g0, %o0	! Current CPU only (use NULL)
-	mov	%g0, %o1	! Current CPU only (use NULL)
-	mov	MAP_ITLB | MAP_DTLB, %o3
-	mov	MMU_DEMAP_CTX, %o5
-	ta	FAST_TRAP
-	brnz,a,pn %o0, ptl1_panic
-	  mov	PTL1_BAD_HCALL, %g1
-
+        mov     %g0, %o0        ! XXX no cpu list yet
+        mov     %g0, %o1        ! XXX no cpu list yet
+        mov     MAP_ITLB | MAP_DTLB, %o2
+        mov     MMU_DEMAP_ALL, %o5
+        ta      FAST_TRAP
+        brz,pt  %o0, 5f
+          nop
+        ba      panic_bad_hcall
+          mov   MMU_DEMAP_ALL, %o1
+5:	
 	mov	%g3, %o0
 	mov	%g4, %o1
-	mov	%g5, %o2
-	mov	%g6, %o3
+	mov	%g6, %o2
 	mov	%g7, %o5
-
-	/* fall through to the code below */
-
+	
+	ba	3f
+	  nop
+1:
 	/*
-	 * We enter here if we're just raising a TSB miss
-	 * exception, without switching MMU contexts.  In
-	 * this case, there is no need to flush the TLB.
+	 * %g1 = sfmmup
+	 * %g2 = INVALID_CONTEXT
+	 * %g3 = MMU_SCONTEXT
 	 */
-	ALTENTRY(sfmmu_raise_tsb_exception)
-	!
-	! %g1 = ctx being stolen (victim)
-	! %g2 = invalid ctx to replace victim with
-	!
-	! if (sec-ctx != victim) {
-	!	return
-	! } else {
-	!	if (pri-ctx == victim) {
-	!		write INVALID_CONTEXT to sec-ctx
-	!		write INVALID_CONTEXT to pri-ctx
-	!	} else {
-	!		write INVALID_CONTEXT to sec-ctx
-	!	}
-	! }
-	!
-	cmp	%g1, NUM_LOCKED_CTXS
-	blt,a,pn %icc, ptl1_panic		/* can't steal locked ctx */
-	  mov	PTL1_BAD_CTX_STEAL, %g1
-	set	CTXREG_CTX_MASK, %g6
-	set	MMU_SCONTEXT, %g3
-	ldxa	[%g3]ASI_MMU_CTX, %g5		/* get sec-ctx */
-	and	%g5, %g6, %g5
+	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
+	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
+
 	cmp	%g5, %g1			/* is it the victim? */
-	bne,pn	%icc, 2f			/* was our sec-ctx a victim? */
+	bne,pt	%xcc, 2f			/* is our sec-ctx a victim? */
+	  nop
+
+	ldxa    [%g3]ASI_MMU_CTX, %g5           /* %g5 = sec-ctx */
+	cmp     %g5, INVALID_CONTEXT            /* kernel  or invalid ctx ? */
+	ble,pn  %xcc, 0f                        /* yes, no need to change */
 	  mov	MMU_PCONTEXT, %g7
-	ldxa	[%g7]ASI_MMU_CTX, %g4		/* get pri-ctx */
-	and	%g4, %g6, %g4
-	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid ctx */
+
+	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid */
 	membar	#Sync
-	cmp	%g1, %g4			/* is it the victim? */
-	bne 	%icc, 3f			/* nope, no need to change it */
+
+0:
+	ldxa	[%g7]ASI_MMU_CTX, %g4		/* %g4 = pri-ctx */
+	cmp	%g4, INVALID_CONTEXT		/* is pri-ctx the victim? */
+	ble 	%icc, 3f			/* no need to change pri-ctx */
 	  nop
-	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid ctx */
-	/* next instruction is retry so no membar sync */
+	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid  */
+	membar	#Sync
+
 3:
-	membar	#Sync
 	/* TSB program must be cleared - walkers do not check a context. */
 	mov	%o0, %g3
 	mov	%o1, %g4
@@ -192,7 +209,7 @@
 	mov	%g7, %o5
 2:
 	retry
-	SET_SIZE(sfmmu_ctx_steal_tl1)
+	SET_SIZE(sfmmu_raise_tsb_exception)
 
 	ENTRY_NP(sfmmu_getctx_pri)
 	set	MMU_PCONTEXT, %o0
@@ -212,14 +229,13 @@
 
 	/*
 	 * Set the secondary context register for this process.
-	 * %o0 = context number for this process.
+	 * %o0 = context number
 	 */
 	ENTRY_NP(sfmmu_setctx_sec)
 	/*
 	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
 	 * But we can also get called from C with interrupts enabled. So,
-	 * we need to check first. Also, resume saves state in %o3 and %o5
-	 * so we can't use those registers here.
+	 * we need to check first.
 	 */
 
 	/* If interrupts are not disabled, then disable them */
@@ -233,10 +249,15 @@
 	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
 	flush	%o4
 
+	/*
+	 * if the routine is entered with intr enabled, then enable intr now.
+	 * otherwise, keep intr disabled, return without enabing intr.
+	 * %g1 - old intr state
+	 */
 	btst	PSTATE_IE, %g1
-	bnz,a,pt %icc, 1f
+	bnz,a,pt %icc, 2f
 	wrpr	%g0, %g1, %pstate		/* enable interrupts */
-1:	retl
+2:	retl
 	nop
 	SET_SIZE(sfmmu_setctx_sec)
 
@@ -260,12 +281,10 @@
 	 * %o0 - hat pointer
 	 */
 	ENTRY_NP(sfmmu_load_mmustate)
-	/*
-	 * From resume we call sfmmu_load_mmustate with interrupts disabled.
-	 * But we can also get called from C with interrupts enabled. So,
-	 * we need to check first. Also, resume saves state in %o5 and we
-	 * can't use this register here.
-	 */
+
+#ifdef DEBUG
+	PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l1, %g1)
+#endif /* DEBUG */
 
 	sethi	%hi(ksfmmup), %o3
 	ldx	[%o3 + %lo(ksfmmup)], %o3
@@ -273,12 +292,6 @@
 	be,pn	%xcc, 3f			! if kernel as, do nothing
 	  nop
 
-	/* If interrupts are not disabled, then disable them */
-	rdpr	%pstate, %g1
-	btst	PSTATE_IE, %g1
-	bnz,a,pt %icc, 1f
-	wrpr	%g1, PSTATE_IE, %pstate		! disable interrupts
-1:
 	/*
 	 * We need to set up the TSB base register, tsbmiss
 	 * area, and pass the TSB information into the hypervisor
@@ -307,7 +320,14 @@
 #endif /* DEBUG */
 	CPU_ADDR(%o2, %o4)	! load CPU struct addr to %o2 using %o4
 	ldub    [%o2 + CPU_TSTAT_FLAGS], %o1	! load cpu_tstat_flag to %o1
-	lduh	[%o0 + SFMMU_CNUM], %o2
+    
+        /*
+         * %o0 = sfmmup
+	 * %o2 = returned sfmmu cnum on this CPU
+	 * %o4 = scratch
+         */
+	SFMMU_CPU_CNUM(%o0, %o2, %o4)
+
 	mov	%o5, %o4			! preserve %o5 for resume
 	mov	%o0, %o3			! preserve %o0
 	btst	TSTAT_TLB_STATS, %o1
@@ -333,9 +353,6 @@
 	stx	%o0, [%o2 + TSBMISS_UHATID]
 	stuh	%o3, [%o2 + TSBMISS_HATFLAGS]
 
-	btst	PSTATE_IE, %g1
-	bnz,a,pt %icc, 3f
-	wrpr	%g0, %g1, %pstate		! enable interrupts
 3:	retl
 	nop
 	SET_SIZE(sfmmu_load_mmustate)