6311933 rdmsr/wrmsr do not need to set/pass values via memory pointers
authorkucharsk
Thu, 27 Oct 2005 14:59:45 -0700
changeset 770 0eda482eb80f
parent 769 9710ecbd3653
child 771 1c25a2120ec0
6311933 rdmsr/wrmsr do not need to set/pass values via memory pointers
usr/src/cmd/mdb/intel/kmdb/kctl/kctl_isadep.c
usr/src/uts/i86pc/os/mlsetup.c
usr/src/uts/i86pc/os/mp_startup.c
usr/src/uts/i86pc/os/startup.c
usr/src/uts/i86pc/vm/mach_i86mmu.c
usr/src/uts/intel/ia32/ml/i86_subr.s
usr/src/uts/intel/pcbe/opteron_pcbe.c
usr/src/uts/intel/pcbe/p123_pcbe.c
usr/src/uts/intel/pcbe/p4_pcbe.c
usr/src/uts/intel/sys/x86_archext.h
--- a/usr/src/cmd/mdb/intel/kmdb/kctl/kctl_isadep.c	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/cmd/mdb/intel/kmdb/kctl/kctl_isadep.c	Thu Oct 27 14:59:45 2005 -0700
@@ -260,19 +260,16 @@
 	 * and fail horribly if it hasn't.  We'll install a pointer to a dummy
 	 * cpu_t for use during our initialization.
 	 */
-	cpu_t *cpu = kobj_zalloc(sizeof (cpu_t), KM_TMP);
-	cpu_t *old;
+	cpu_t *old = (cpu_t *)rdmsr(MSR_AMD_GSBASE);
 
-	(void) rdmsr(MSR_AMD_GSBASE, (uint64_t *)&old);
-	wrmsr(MSR_AMD_GSBASE, (uint64_t *)&cpu);
-
+	wrmsr(MSR_AMD_GSBASE, (uint64_t)kobj_zalloc(sizeof (cpu_t), KM_TMP));
 	return (old);
 }
 
 void
 kctl_boot_tmpfini(void *old)
 {
-	wrmsr(MSR_AMD_GSBASE, (uint64_t *)&old);
+	wrmsr(MSR_AMD_GSBASE, (uint64_t)old);
 }
 
 #else
--- a/usr/src/uts/i86pc/os/mlsetup.c	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/i86pc/os/mlsetup.c	Thu Oct 27 14:59:45 2005 -0700
@@ -188,8 +188,7 @@
 		/*
 		 * setup %gs for the kernel
 		 */
-		uint64_t addr64 = (uint64_t)&cpus[0];
-		wrmsr(MSR_AMD_GSBASE, &addr64);
+		wrmsr(MSR_AMD_GSBASE, (uint64_t)&cpus[0]);
 		/*
 		 * XX64 We should never dereference off "other gsbase" or
 		 * "fsbase".  So, we should arrange to point FSBASE and
@@ -200,9 +199,8 @@
 		 * For now, point it at 8G -- at least it should be unmapped
 		 * until some 64-bit processes run.
 		 */
-		addr64 = 0x200000000ul;
-		wrmsr(MSR_AMD_FSBASE, &addr64);
-		wrmsr(MSR_AMD_KGSBASE, &addr64);
+		wrmsr(MSR_AMD_FSBASE, 0x200000000UL);
+		wrmsr(MSR_AMD_KGSBASE, 0x200000000UL);
 	}
 
 #elif defined(__i386)
--- a/usr/src/uts/i86pc/os/mp_startup.c	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/i86pc/os/mp_startup.c	Thu Oct 27 14:59:45 2005 -0700
@@ -135,8 +135,6 @@
 static void
 init_cpu_syscall(struct cpu *cp)
 {
-	uint64_t value;
-
 	kpreempt_disable();
 
 #if defined(__amd64)
@@ -160,19 +158,16 @@
 		/*
 		 * Program the magic registers ..
 		 */
-		value = ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32;
-		wrmsr(MSR_AMD_STAR, &value);
-		value = (uintptr_t)sys_syscall;
-		wrmsr(MSR_AMD_LSTAR, &value);
-		value = (uintptr_t)sys_syscall32;
-		wrmsr(MSR_AMD_CSTAR, &value);
+		wrmsr(MSR_AMD_STAR, ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) <<
+		    32);
+		wrmsr(MSR_AMD_LSTAR, (uint64_t)(uintptr_t)sys_syscall);
+		wrmsr(MSR_AMD_CSTAR, (uint64_t)(uintptr_t)sys_syscall32);
 
 		/*
 		 * This list of flags is masked off the incoming
 		 * %rfl when we enter the kernel.
 		 */
-		value = PS_IE | PS_T;
-		wrmsr(MSR_AMD_SFMASK, &value);
+		wrmsr(MSR_AMD_SFMASK, (uint64_t)(uintptr_t)(PS_IE | PS_T));
 	}
 #endif
 
@@ -208,11 +203,8 @@
 		 * resume() sets this value to the base of the threads stack
 		 * via a context handler.
 		 */
-		value = 0;
-		wrmsr(MSR_INTC_SEP_ESP, &value);
-
-		value = (uintptr_t)sys_sysenter;
-		wrmsr(MSR_INTC_SEP_EIP, &value);
+		wrmsr(MSR_INTC_SEP_ESP, 0ULL);
+		wrmsr(MSR_INTC_SEP_EIP, (uint64_t)(uintptr_t)sys_sysenter);
 	}
 
 	kpreempt_enable();
@@ -665,11 +657,9 @@
 		 * Certain Reverse REP MOVS May Produce Unpredictable Behaviour
 		 */
 #if defined(OPTERON_ERRATUM_109)
-		uint64_t	patchlevel;
 
-		(void) rdmsr(MSR_AMD_PATCHLEVEL, &patchlevel);
 		/* workaround is to print a warning to upgrade BIOS */
-		if (patchlevel == 0)
+		if (rdmsr(MSR_AMD_PATCHLEVEL) == 0)
 			opteron_erratum_109++;
 #else
 		WARNING(cpu, 109);
@@ -695,16 +685,13 @@
 		 * sequential execution across the va hole boundary.
 		 */
 		if (lma == 0) {
-			uint64_t	efer;
-
 			/*
 			 * check LMA once: assume all cpus are in long mode
 			 * or not.
 			 */
 			lma = 1;
 
-			(void) rdmsr(MSR_AMD_EFER, &efer);
-			if (efer & AMD_EFER_LMA) {
+			if (rdmsr(MSR_AMD_EFER) & AMD_EFER_LMA) {
 				if (hole_start) {
 					hole_start -= PAGESIZE;
 				} else {
@@ -738,12 +725,9 @@
 
 		if (opteron_erratum_122 || lgrp_plat_node_cnt > 1 ||
 		    cpuid_get_ncpu_per_chip(cpu) > 1) {
-			uint64_t	hwcrval;
-
 			/* disable TLB Flush Filter */
-			(void) rdmsr(MSR_AMD_HWCR, &hwcrval);
-			hwcrval |= AMD_HWCR_FFDIS;
-			wrmsr(MSR_AMD_HWCR, &hwcrval);
+			wrmsr(MSR_AMD_HWCR, rdmsr(MSR_AMD_HWCR) |
+			    (uint64_t)(uintptr_t)AMD_HWCR_FFDIS);
 			opteron_erratum_122++;
 		}
 
@@ -765,11 +749,8 @@
 		 */
 
 		if (cpuid_get_ncpu_per_chip(cpu) > 1) {
-			uint64_t	patchlevel;
-
-			(void) rdmsr(MSR_AMD_PATCHLEVEL, &patchlevel);
 			/* workaround is to print a warning to upgrade BIOS */
-			if (patchlevel == 0)
+			if (rdmsr(MSR_AMD_PATCHLEVEL) == 0)
 				opteron_erratum_123++;
 		}
 	}
@@ -787,14 +768,11 @@
 		 */
 		if ((opteron_erratum_131 == 0) && ((lgrp_plat_node_cnt *
 		    cpuid_get_ncpu_per_chip(cpu)) >= 4)) {
-			uint64_t nbcfg;
-
 			/*
 			 * Workaround is to print a warning to upgrade
 			 * the BIOS
 			 */
-			(void) rdmsr(MSR_AMD_NB_CFG, &nbcfg);
-			if (!(nbcfg & AMD_NB_CFG_SRQ_HEARTBEAT))
+			if (!(rdmsr(MSR_AMD_NB_CFG) & AMD_NB_CFG_SRQ_HEARTBEAT))
 				opteron_erratum_131++;
 		}
 #endif
@@ -1267,20 +1245,15 @@
 static void
 cpu_sep_enable(void)
 {
-	uint64_t value;
-
 	ASSERT(x86_feature & X86_SEP);
 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
 
-	value = KCS_SEL;
-	wrmsr(MSR_INTC_SEP_CS, &value);
+	wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL);
 }
 
 static void
 cpu_sep_disable(void)
 {
-	uint64_t value;
-
 	ASSERT(x86_feature & X86_SEP);
 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
 
@@ -1288,28 +1261,22 @@
 	 * Setting the SYSENTER_CS_MSR register to 0 causes software executing
 	 * the sysenter or sysexit instruction to trigger a #gp fault.
 	 */
-	value = 0;
-	wrmsr(MSR_INTC_SEP_CS, &value);
+	wrmsr(MSR_INTC_SEP_CS, 0ULL);
 }
 
 static void
 cpu_asysc_enable(void)
 {
-	uint64_t value;
-
 	ASSERT(x86_feature & X86_ASYSC);
 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
 
-	(void) rdmsr(MSR_AMD_EFER, &value);
-	value |= AMD_EFER_SCE;
-	wrmsr(MSR_AMD_EFER, &value);
+	wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) |
+	    (uint64_t)(uintptr_t)AMD_EFER_SCE);
 }
 
 static void
 cpu_asysc_disable(void)
 {
-	uint64_t value;
-
 	ASSERT(x86_feature & X86_ASYSC);
 	ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL);
 
@@ -1317,7 +1284,6 @@
 	 * Turn off the SCE (syscall enable) bit in the EFER register. Software
 	 * executing syscall or sysret with this bit off will incur a #ud trap.
 	 */
-	(void) rdmsr(MSR_AMD_EFER, &value);
-	value &= ~AMD_EFER_SCE;
-	wrmsr(MSR_AMD_EFER, &value);
+	wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) &
+	    ~((uint64_t)(uintptr_t)AMD_EFER_SCE));
 }
--- a/usr/src/uts/i86pc/os/startup.c	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/i86pc/os/startup.c	Thu Oct 27 14:59:45 2005 -0700
@@ -2411,24 +2411,20 @@
 setup_mca()
 {
 	int 		i;
-	uint64_t	allzeros;
-	uint64_t	allones;
 	uint64_t	mca_cap;
 
 	if (!(x86_feature & X86_MCA))
 		return;
-	(void) rdmsr(REG_MCG_CAP, &mca_cap);
-	allones = 0xffffffffffffffffULL;
+	mca_cap = rdmsr(REG_MCG_CAP);
 	if (mca_cap & MCG_CAP_CTL_P)
-		(void) wrmsr(REG_MCG_CTL, &allones);
+		wrmsr(REG_MCG_CTL, -1ULL);	/* all ones */
 	mca_cnt = mca_cap & MCG_CAP_COUNT_MASK;
 	if (mca_cnt > P6_MCG_CAP_COUNT)
 		mca_cnt = P6_MCG_CAP_COUNT;
 	for (i = 1; i < mca_cnt; i++)
-		(void) wrmsr(mci_ctl[i], &allones);
-	allzeros = 0;
+		wrmsr(mci_ctl[i], -1ULL);	/* all ones */
 	for (i = 0; i < mca_cnt; i++)
-		(void) wrmsr(mci_status[i], &allzeros);
+		wrmsr(mci_status[i], 0ULL);
 	setcr4(getcr4() | CR4_MCE);
 
 }
@@ -2437,21 +2433,16 @@
 mca_exception(struct regs *rp)
 {
 	uint64_t	status, addr;
-	uint64_t	allzeros;
-	uint64_t	buf;
 	int		i, ret = 1, errcode, mserrcode;
 
-	allzeros = 0;
-	(void) rdmsr(REG_MCG_STATUS, &buf);
-	status = buf;
+	status = rdmsr(REG_MCG_STATUS);
 	if (status & MCG_STATUS_RIPV)
 		ret = 0;
 	if (status & MCG_STATUS_EIPV)
 		cmn_err(CE_WARN, "MCE at 0x%lx", rp->r_pc);
-	(void) wrmsr(REG_MCG_STATUS, &allzeros);
+	wrmsr(REG_MCG_STATUS, 0ULL);
 	for (i = 0; i < mca_cnt; i++) {
-		(void) rdmsr(mci_status[i], &buf);
-		status = buf;
+		status = rdmsr(mci_status[i]);
 		/*
 		 * If status register not valid skip this bank
 		 */
@@ -2464,8 +2455,7 @@
 			 * If mci_addr contains the address where
 			 * error occurred, display the address
 			 */
-			(void) rdmsr(mci_addr[i], &buf);
-			addr = buf;
+			addr = rdmsr(mci_addr[i]);
 			cmn_err(CE_WARN, "MCE: Bank %d: error code 0x%x:"\
 			    "addr = 0x%" PRIx64 ", model errcode = 0x%x", i,
 			    errcode, addr, mserrcode);
@@ -2474,7 +2464,7 @@
 			    "MCE: Bank %d: error code 0x%x, mserrcode = 0x%x",
 			    i, errcode, mserrcode);
 		}
-		(void) wrmsr(mci_status[i], &allzeros);
+		wrmsr(mci_status[i], 0ULL);
 	}
 	return (ret);
 }
@@ -2489,28 +2479,28 @@
 	if (!(x86_feature & X86_MTRR))
 		return;
 
-	(void) rdmsr(REG_MTRRCAP, &mtrrcap);
-	(void) rdmsr(REG_MTRRDEF, &mtrrdef);
+	mtrrcap = rdmsr(REG_MTRRCAP);
+	mtrrdef = rdmsr(REG_MTRRDEF);
 	if (mtrrcap & MTRRCAP_FIX) {
-		(void) rdmsr(REG_MTRR64K, &mtrr64k);
-		(void) rdmsr(REG_MTRR16K1, &mtrr16k1);
-		(void) rdmsr(REG_MTRR16K2, &mtrr16k2);
-		(void) rdmsr(REG_MTRR4K1, &mtrr4k1);
-		(void) rdmsr(REG_MTRR4K2, &mtrr4k2);
-		(void) rdmsr(REG_MTRR4K3, &mtrr4k3);
-		(void) rdmsr(REG_MTRR4K4, &mtrr4k4);
-		(void) rdmsr(REG_MTRR4K5, &mtrr4k5);
-		(void) rdmsr(REG_MTRR4K6, &mtrr4k6);
-		(void) rdmsr(REG_MTRR4K7, &mtrr4k7);
-		(void) rdmsr(REG_MTRR4K8, &mtrr4k8);
+		mtrr64k = rdmsr(REG_MTRR64K);
+		mtrr16k1 = rdmsr(REG_MTRR16K1);
+		mtrr16k2 = rdmsr(REG_MTRR16K2);
+		mtrr4k1 = rdmsr(REG_MTRR4K1);
+		mtrr4k2 = rdmsr(REG_MTRR4K2);
+		mtrr4k3 = rdmsr(REG_MTRR4K3);
+		mtrr4k4 = rdmsr(REG_MTRR4K4);
+		mtrr4k5 = rdmsr(REG_MTRR4K5);
+		mtrr4k6 = rdmsr(REG_MTRR4K6);
+		mtrr4k7 = rdmsr(REG_MTRR4K7);
+		mtrr4k8 = rdmsr(REG_MTRR4K8);
 	}
 	if ((vcnt = (mtrrcap & MTRRCAP_VCNTMASK)) > MAX_MTRRVAR)
 		vcnt = MAX_MTRRVAR;
 
 	for (i = 0, ecx = REG_MTRRPHYSBASE0, mtrrphys = mtrrphys_arr;
 		i <  vcnt - 1; i++, ecx += 2, mtrrphys++) {
-		(void) rdmsr(ecx, &mtrrphys->mtrrphys_base);
-		(void) rdmsr(ecx + 1, &mtrrphys->mtrrphys_mask);
+		mtrrphys->mtrrphys_base = rdmsr(ecx);
+		mtrrphys->mtrrphys_mask = rdmsr(ecx + 1);
 		if ((x86_feature & X86_PAT) && enable_relaxed_mtrr) {
 			mtrrphys->mtrrphys_mask &= ~MTRRPHYSMASK_V;
 		}
@@ -2534,7 +2524,6 @@
 void
 mtrr_sync()
 {
-	uint64_t my_mtrrdef;
 	uint_t	crvalue, cr0_orig;
 	int	vcnt, i, ecx;
 	struct	mtrrvar	*mtrrphys;
@@ -2546,33 +2535,33 @@
 	invalidate_cache();
 	setcr3(getcr3());
 
-	if (x86_feature & X86_PAT) {
-		(void) wrmsr(REG_MTRRPAT, &pat_attr_reg);
-	}
-	(void) rdmsr(REG_MTRRDEF, &my_mtrrdef);
-	my_mtrrdef &= ~MTRRDEF_E;
-	(void) wrmsr(REG_MTRRDEF, &my_mtrrdef);
+	if (x86_feature & X86_PAT)
+		wrmsr(REG_MTRRPAT, pat_attr_reg);
+
+	wrmsr(REG_MTRRDEF, rdmsr(REG_MTRRDEF) &
+	    ~((uint64_t)(uintptr_t)MTRRDEF_E));
+
 	if (mtrrcap & MTRRCAP_FIX) {
-		(void) wrmsr(REG_MTRR64K, &mtrr64k);
-		(void) wrmsr(REG_MTRR16K1, &mtrr16k1);
-		(void) wrmsr(REG_MTRR16K2, &mtrr16k2);
-		(void) wrmsr(REG_MTRR4K1, &mtrr4k1);
-		(void) wrmsr(REG_MTRR4K2, &mtrr4k2);
-		(void) wrmsr(REG_MTRR4K3, &mtrr4k3);
-		(void) wrmsr(REG_MTRR4K4, &mtrr4k4);
-		(void) wrmsr(REG_MTRR4K5, &mtrr4k5);
-		(void) wrmsr(REG_MTRR4K6, &mtrr4k6);
-		(void) wrmsr(REG_MTRR4K7, &mtrr4k7);
-		(void) wrmsr(REG_MTRR4K8, &mtrr4k8);
+		wrmsr(REG_MTRR64K, mtrr64k);
+		wrmsr(REG_MTRR16K1, mtrr16k1);
+		wrmsr(REG_MTRR16K2, mtrr16k2);
+		wrmsr(REG_MTRR4K1, mtrr4k1);
+		wrmsr(REG_MTRR4K2, mtrr4k2);
+		wrmsr(REG_MTRR4K3, mtrr4k3);
+		wrmsr(REG_MTRR4K4, mtrr4k4);
+		wrmsr(REG_MTRR4K5, mtrr4k5);
+		wrmsr(REG_MTRR4K6, mtrr4k6);
+		wrmsr(REG_MTRR4K7, mtrr4k7);
+		wrmsr(REG_MTRR4K8, mtrr4k8);
 	}
 	if ((vcnt = (mtrrcap & MTRRCAP_VCNTMASK)) > MAX_MTRRVAR)
 		vcnt = MAX_MTRRVAR;
 	for (i = 0, ecx = REG_MTRRPHYSBASE0, mtrrphys = mtrrphys_arr;
-		i <  vcnt - 1; i++, ecx += 2, mtrrphys++) {
-		(void) wrmsr(ecx, &mtrrphys->mtrrphys_base);
-		(void) wrmsr(ecx + 1, &mtrrphys->mtrrphys_mask);
+	    i <  vcnt - 1; i++, ecx += 2, mtrrphys++) {
+		wrmsr(ecx, mtrrphys->mtrrphys_base);
+		wrmsr(ecx + 1, mtrrphys->mtrrphys_mask);
 	}
-	(void) wrmsr(REG_MTRRDEF, &mtrrdef);
+	wrmsr(REG_MTRRDEF, mtrrdef);
 	setcr3(getcr3());
 	invalidate_cache();
 	setcr0(cr0_orig);
--- a/usr/src/uts/i86pc/vm/mach_i86mmu.c	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/i86pc/vm/mach_i86mmu.c	Thu Oct 27 14:59:45 2005 -0700
@@ -578,17 +578,14 @@
 static void
 set_nxe(void)
 {
-	uint64_t efer;
-
 	if (mmu.pt_nx == 0)
 		return;
 
 	/*
 	 * AMD64 EFER is model specific register #0xc0000080 and NXE is bit 11
 	 */
-	(void) rdmsr(MSR_AMD_EFER, &efer);
-	efer |= AMD_EFER_NXE;
-	wrmsr(MSR_AMD_EFER, &efer);
+	wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) |
+	    (uint64_t)(uintptr_t)AMD_EFER_NXE);
 }
 
 void (*set_nxe_func)(void) = set_nxe;
--- a/usr/src/uts/intel/ia32/ml/i86_subr.s	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/intel/ia32/ml/i86_subr.s	Thu Oct 27 14:59:45 2005 -0700
@@ -2710,12 +2710,12 @@
 
 /*ARGSUSED*/
 uint64_t
-rdmsr(uint_t r, uint64_t *mtr)
+rdmsr(uint_t r)
 { return (0); }
 
 /*ARGSUSED*/
 void
-wrmsr(uint_t r, const uint64_t *mtr)
+wrmsr(uint_t r, const uint64_t val)
 {}
 
 void
@@ -2729,16 +2729,15 @@
 	ENTRY(rdmsr)
 	movl	%edi, %ecx
 	rdmsr
-	movl	%eax, (%rsi)
-	movl	%edx, 4(%rsi)
 	shlq	$32, %rdx
 	orq	%rdx, %rax
 	ret
 	SET_SIZE(rdmsr)
 
 	ENTRY(wrmsr)
-	movl	(%rsi), %eax
-	movl	4(%rsi), %edx
+	movq	%rsi, %rdx
+	shrq	$32, %rdx
+	movl	%esi, %eax
 	movl	%edi, %ecx
 	wrmsr
 	ret
@@ -2749,17 +2748,13 @@
 	ENTRY(rdmsr)
 	movl	4(%esp), %ecx
 	rdmsr
-	movl	8(%esp), %ecx
-	movl	%eax, (%ecx)
-	movl	%edx, 4(%ecx)
 	ret
 	SET_SIZE(rdmsr)
 
 	ENTRY(wrmsr)
-	movl	8(%esp), %ecx
-	movl	(%ecx), %eax
-	movl	4(%ecx), %edx
 	movl	4(%esp), %ecx
+	movl	8(%esp), %eax
+	movl	12(%esp), %edx 
 	wrmsr
 	ret
 	SET_SIZE(wrmsr)
--- a/usr/src/uts/intel/pcbe/opteron_pcbe.c	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/intel/pcbe/opteron_pcbe.c	Thu Oct 27 14:59:45 2005 -0700
@@ -20,7 +20,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -403,7 +403,6 @@
 						&nullcfgs[2], &nullcfgs[3] };
 	opt_pcbe_config_t	*pcfg = NULL;
 	int			i;
-	uint64_t		tmp;
 	uint32_t		curcr4 = getcr4();
 
 	/*
@@ -433,13 +432,13 @@
 	 */
 
 	for (i = 0; i < 4; i++) {
-		wrmsr(PES_BASE_ADDR + i, &cfgs[i]->opt_evsel);
-		wrmsr(PIC_BASE_ADDR + i, &cfgs[i]->opt_rawpic);
+		wrmsr(PES_BASE_ADDR + i, cfgs[i]->opt_evsel);
+		wrmsr(PIC_BASE_ADDR + i, cfgs[i]->opt_rawpic);
 	}
 
 	for (i = 0; i < 4; i++) {
-		tmp = cfgs[i]->opt_evsel | OPT_PES_ENABLE;
-		wrmsr(PES_BASE_ADDR + i, &tmp);
+		wrmsr(PES_BASE_ADDR + i, cfgs[i]->opt_evsel |
+		    (uint64_t)(uintptr_t)OPT_PES_ENABLE);
 	}
 }
 
@@ -447,10 +446,9 @@
 opt_pcbe_allstop(void)
 {
 	int		i;
-	uint64_t	tmp = 0;
 
 	for (i = 0; i < 4; i++)
-		wrmsr(PES_BASE_ADDR + i, &tmp);
+		wrmsr(PES_BASE_ADDR + i, 0ULL);
 
 	/*
 	 * Disable non-privileged access to the counter registers.
@@ -470,7 +468,7 @@
 	int64_t			diff;
 
 	for (i = 0; i < 4; i++)
-		(void) rdmsr(PIC_BASE_ADDR + i, &curpic[i]);
+		curpic[i] = rdmsr(PIC_BASE_ADDR);
 
 	/*
 	 * Query kernel for all configs which are co-programmed.
--- a/usr/src/uts/intel/pcbe/p123_pcbe.c	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/intel/pcbe/p123_pcbe.c	Thu Oct 27 14:59:45 2005 -0700
@@ -20,7 +20,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -95,7 +95,7 @@
  */
 static int ptm_rdpmc_avail = 0;
 
-static const uint64_t allstopped = 0;
+#define	ALL_STOPPED	0ULL
 
 typedef struct _ptm_pcbe_config {
 	uint8_t		ptm_picno;	/* 0 for pic0 or 1 for pic1 */
@@ -573,7 +573,7 @@
 	 * 2) Either counter has requested an interrupt
 	 */
 
-	(void) rdmsr(REG_PERFEVNT0, &pes[0]);
+	pes[0] = rdmsr(REG_PERFEVNT0);
 	if (((uint32_t)pes[0] & P6_PES_EN) != P6_PES_EN)
 		return (0);
 
@@ -583,7 +583,7 @@
 	 * on this hardware other than by using unreliable heuristics.
 	 */
 
-	(void) rdmsr(REG_PERFEVNT1, &pes[1]);
+	pes[1] = rdmsr(REG_PERFEVNT1);
 	if ((uint32_t)pes[0] & P6_PES_INT)
 		ret |= 0x1;
 	if ((uint32_t)pes[1] & P6_PES_INT)
@@ -759,25 +759,23 @@
 	}
 
 	if (ptm_ver == PTM_VER_P5) {
-		uint64_t	cesr = 0;
-		wrmsr(P5_CESR, &allstopped);
-		wrmsr(P5_CTR0, &pic0->ptm_rawpic);
-		wrmsr(P5_CTR1, &pic1->ptm_rawpic);
-		cesr = pic0->ptm_ctl | pic1->ptm_ctl;
-		wrmsr(P5_CESR, &cesr);
-		(void) rdmsr(P5_CTR0, &pic0->ptm_rawpic);
-		(void) rdmsr(P5_CTR1, &pic1->ptm_rawpic);
+		wrmsr(P5_CESR, ALL_STOPPED);
+		wrmsr(P5_CTR0, pic0->ptm_rawpic);
+		wrmsr(P5_CTR1, pic1->ptm_rawpic);
+		wrmsr(P5_CESR, pic0->ptm_ctl | pic1->ptm_ctl);
+		pic0->ptm_rawpic = rdmsr(P5_CTR0);
+		pic1->ptm_rawpic = rdmsr(P5_CTR1);
 	} else {
 		uint64_t	pes;
-		wrmsr(REG_PERFEVNT0, &allstopped);
-		wrmsr(REG_PERFCTR0, &pic0->ptm_rawpic);
-		wrmsr(REG_PERFCTR1, &pic1->ptm_rawpic);
+		wrmsr(REG_PERFEVNT0, ALL_STOPPED);
+		wrmsr(REG_PERFCTR0, pic0->ptm_rawpic);
+		wrmsr(REG_PERFCTR1, pic1->ptm_rawpic);
 		pes = pic1->ptm_ctl;
 		DTRACE_PROBE1(ptm__pes1, uint64_t, pes);
-		wrmsr(REG_PERFEVNT1, &pes);
+		wrmsr(REG_PERFEVNT1, pes);
 		pes = pic0->ptm_ctl | (1 << CPC_P6_PES_EN);
 		DTRACE_PROBE1(ptm__pes0, uint64_t, pes);
-		wrmsr(REG_PERFEVNT0, &pes);
+		wrmsr(REG_PERFEVNT0, pes);
 	}
 }
 
@@ -785,9 +783,9 @@
 ptm_pcbe_allstop(void)
 {
 	if (ptm_ver == PTM_VER_P5)
-		wrmsr(P5_CESR, &allstopped);
+		wrmsr(P5_CESR, ALL_STOPPED);
 	else {
-		wrmsr(REG_PERFEVNT0, &allstopped);
+		wrmsr(REG_PERFEVNT0, ALL_STOPPED);
 		setcr4((uint32_t)getcr4() & ~CR4_PCE);
 	}
 }
@@ -826,11 +824,11 @@
 	ASSERT(pic0->ptm_picno == 0 && pic1->ptm_picno == 1);
 
 	if (ptm_ver == PTM_VER_P5) {
-		(void) rdmsr(P5_CTR0, &curpic[0]);
-		(void) rdmsr(P5_CTR1, &curpic[1]);
+		curpic[0] = rdmsr(P5_CTR0);
+		curpic[1] = rdmsr(P5_CTR1);
 	} else {
-		(void) rdmsr(REG_PERFCTR0, &curpic[0]);
-		(void) rdmsr(REG_PERFCTR1, &curpic[1]);
+		curpic[0] = rdmsr(REG_PERFCTR0);
+		curpic[1] = rdmsr(REG_PERFCTR1);
 	}
 
 	DTRACE_PROBE1(ptm__curpic0, uint64_t, curpic[0]);
--- a/usr/src/uts/intel/pcbe/p4_pcbe.c	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/intel/pcbe/p4_pcbe.c	Thu Oct 27 14:59:45 2005 -0700
@@ -20,7 +20,7 @@
  * CDDL HEADER END
  */
 /*
- * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
@@ -493,7 +493,6 @@
 {
 	extern int	kcpc_hw_overflow_intr_installed;
 	uint64_t	ret = 0;
-	uint64_t	tmp;
 	int		i;
 
 	/*
@@ -502,8 +501,7 @@
 	 * safe to read the CCCR values here.
 	 */
 	for (i = 0; i < 18; i++) {
-		(void) rdmsr(p4_ctrs[i].pc_ctladdr, &tmp);
-		if (tmp & CCCR_OVF)
+		if (rdmsr(p4_ctrs[i].pc_ctladdr) & CCCR_OVF)
 			ret |= (1 << i);
 	}
 
@@ -786,7 +784,6 @@
 p4_pcbe_program(void *token)
 {
 	int			i;
-	uint64_t		escr;
 	uint64_t		cccr;
 	p4_pcbe_config_t	*cfgs[18];
 
@@ -813,6 +810,8 @@
 		int	lid = chip_plat_get_clogid(CPU); /* Logical ID of CPU */
 
 		for (i = 0; i < 18; i++) {
+			uint64_t escr;
+
 			if (cfgs[i] == NULL)
 				continue;
 			escr = (uint64_t)cfgs[i]->p4_escr;
@@ -826,8 +825,8 @@
 			if (cfgs[i]->p4_flags & P4_SIBLING_SYS)
 				escr |= (lid == 0) ? ESCR_T1_OS : ESCR_T0_OS;
 
-			wrmsr(p4_ctrs[i].pc_caddr, &cfgs[i]->p4_rawpic);
-			wrmsr(p4_escrs[cfgs[i]->p4_escr_ndx].pe_addr, &escr);
+			wrmsr(p4_ctrs[i].pc_caddr, cfgs[i]->p4_rawpic);
+			wrmsr(p4_escrs[cfgs[i]->p4_escr_ndx].pe_addr, escr);
 		}
 
 		for (i = 0; i < 18; i++) {
@@ -841,22 +840,22 @@
 			if (cfgs[i]->p4_flags & P4_PMI)
 				cccr |= (lid == 0) ?
 				    CCCR_OVF_PMI_T0 : CCCR_OVF_PMI_T1;
-			wrmsr(p4_ctrs[i].pc_ctladdr, &cccr);
+			wrmsr(p4_ctrs[i].pc_ctladdr, cccr);
 		}
 	} else {
 		for (i = 0; i < 18; i++) {
 			if (cfgs[i] == NULL)
 				continue;
-			escr = (uint64_t)cfgs[i]->p4_escr;
-			wrmsr(p4_ctrs[i].pc_caddr, &cfgs[i]->p4_rawpic);
-			wrmsr(p4_escrs[cfgs[i]->p4_escr_ndx].pe_addr, &escr);
+			wrmsr(p4_ctrs[i].pc_caddr, cfgs[i]->p4_rawpic);
+			wrmsr(p4_escrs[cfgs[i]->p4_escr_ndx].pe_addr,
+			    (uint64_t)cfgs[i]->p4_escr);
 		}
 
 		for (i = 0; i < 18; i++) {
 			if (cfgs[i] == NULL)
 				continue;
-			cccr = (uint64_t)cfgs[i]->p4_cccr;
-			wrmsr(p4_ctrs[i].pc_ctladdr, &cccr);
+			wrmsr(p4_ctrs[i].pc_ctladdr,
+			    (uint64_t)cfgs[i]->p4_cccr);
 		}
 	}
 }
@@ -865,10 +864,9 @@
 p4_pcbe_allstop(void)
 {
 	int		i;
-	uint64_t	tmp = 0;
 
 	for (i = 0; i < 18; i++)
-		wrmsr(p4_ctrs[i].pc_ctladdr, &tmp);
+		wrmsr(p4_ctrs[i].pc_ctladdr, 0ULL);
 
 	setcr4(getcr4() & ~CR4_PCE);
 }
@@ -884,7 +882,7 @@
 	int			i;
 
 	for (i = 0; i < 18; i++)
-		(void) rdmsr(p4_ctrs[i].pc_caddr, &curpic[i]);
+		curpic[i] = rdmsr(p4_ctrs[i].pc_caddr);
 
 	build_cfgs(cfgs, addrs, token);
 
--- a/usr/src/uts/intel/sys/x86_archext.h	Thu Oct 27 13:18:49 2005 -0700
+++ b/usr/src/uts/intel/sys/x86_archext.h	Thu Oct 27 14:59:45 2005 -0700
@@ -457,8 +457,8 @@
 #if defined(_KERNEL)
 
 
-extern uint64_t rdmsr(uint_t, uint64_t *);
-extern void wrmsr(uint_t, const uint64_t *);
+extern uint64_t rdmsr(uint_t);
+extern void wrmsr(uint_t, const uint64_t);
 extern void invalidate_cache(void);
 struct regs;
 extern int mca_exception(struct regs *);