6841893 Need to add quiesce method to amd_iommu in order for fast reboot to work.
authorVikram Hegde <Vikram.Hegde@Sun.COM>
Mon, 14 Sep 2009 21:48:22 -0700
changeset 10567 f8c0a7fe6191
parent 10566 b09132fd6cd8
child 10568 05d6365b5963
6841893 Need to add quiesce method to amd_iommu in order for fast reboot to work.
usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c
usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c
usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
usr/src/uts/i86pc/sys/amd_iommu.h
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c	Mon Sep 14 21:48:21 2009 -0700
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c	Mon Sep 14 21:48:22 2009 -0700
@@ -54,6 +54,7 @@
 static int amd_iommu_close(dev_t dev, int flag, int otyp, cred_t *credp);
 static int amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
     cred_t *credp, int *rvalp);
+static int amd_iommu_quiesce(dev_info_t *dip);
 
 static struct cb_ops amd_iommu_cb_ops = {
 	amd_iommu_open,		/* cb_open */
@@ -87,7 +88,8 @@
 	nodev,			/* devo_reset */
 	&amd_iommu_cb_ops,	/* devo_cb_ops */
 	NULL,			/* devo_bus_ops */
-	nulldev			/* devo_power */
+	nulldev,		/* devo_power */
+	amd_iommu_quiesce,	/* devo_quiesce */
 };
 
 static struct modldrv modldrv = {
@@ -442,3 +444,26 @@
 
 	return (ENOTTY);
 }
+
+static int
+amd_iommu_quiesce(dev_info_t *dip)
+{
+	int instance = ddi_get_instance(dip);
+	struct amd_iommu_state *statep;
+	const char *f = "amd_iommu_quiesce";
+
+	statep = ddi_get_soft_state(amd_iommu_statep, instance);
+	if (statep == NULL) {
+		cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
+		    f, instance);
+		return (DDI_FAILURE);
+	}
+
+	if (amd_iommu_teardown(dip, statep, AMD_IOMMU_QUIESCE) != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: Unable to quiesce AMD IOMMU "
+		    "%s%d", f, ddi_driver_name(dip), instance);
+		return (DDI_FAILURE);
+	}
+
+	return (DDI_SUCCESS);
+}
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c	Mon Sep 14 21:48:21 2009 -0700
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c	Mon Sep 14 21:48:22 2009 -0700
@@ -35,7 +35,7 @@
 #include "amd_iommu_acpi.h"
 #include "amd_iommu_page_tables.h"
 
-static int amd_iommu_fini(amd_iommu_t *iommu);
+static int amd_iommu_fini(amd_iommu_t *iommu, int type);
 static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
 static void amd_iommu_stop(amd_iommu_t *iommu);
 
@@ -481,7 +481,7 @@
 }
 
 static void
-amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu)
+amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
 {
 	dev_info_t *dip = iommu->aiomt_dip;
 	int instance = ddi_get_instance(dip);
@@ -493,12 +493,22 @@
 	    AMD_IOMMU_EVENTBASE, 0);
 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
 	    AMD_IOMMU_EVENTLEN, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
+	    AMD_IOMMU_EVENTHEADPTR, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
+	    AMD_IOMMU_EVENTTAILPTR, 0);
+
 
 	iommu->aiomt_cmdbuf = NULL;
 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
 	    AMD_IOMMU_COMBASE, 0);
 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
 	    AMD_IOMMU_COMLEN, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
+	    AMD_IOMMU_CMDHEADPTR, 0);
+	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
+	    AMD_IOMMU_CMDTAILPTR, 0);
+
 
 	iommu->aiomt_devtbl = NULL;
 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
@@ -506,7 +516,7 @@
 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
 	    AMD_IOMMU_DEVTABSIZE, 0);
 
-	if (iommu->aiomt_dmahdl == NULL)
+	if (iommu->aiomt_dmahdl == NULL || type == AMD_IOMMU_QUIESCE)
 		return;
 
 	/* Unbind the handle */
@@ -1050,7 +1060,7 @@
 		    "control regs. Skipping IOMMU idx=%d", f, driver,
 		    instance, idx);
 		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
+		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
 		return (NULL);
 	}
 
@@ -1094,13 +1104,13 @@
 	 */
 	if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
 		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
+		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
 		return (NULL);
 	}
 
 	if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
 		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
+		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
 		return (NULL);
 	}
 
@@ -1108,7 +1118,7 @@
 
 	if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
 		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
+		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
 		return (NULL);
 	}
 
@@ -1125,20 +1135,20 @@
 	 */
 	if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
 		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
+		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
 		return (NULL);
 	}
 
 	if (amd_iommu_start(iommu) != DDI_SUCCESS) {
 		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
+		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
 		return (NULL);
 	}
 
 	/* xxx register/start race  */
 	if (amd_iommu_register(iommu) != DDI_SUCCESS) {
 		mutex_exit(&iommu->aiomt_mutex);
-		(void) amd_iommu_fini(iommu);
+		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
 		return (NULL);
 	}
 
@@ -1151,7 +1161,7 @@
 }
 
 static int
-amd_iommu_fini(amd_iommu_t *iommu)
+amd_iommu_fini(amd_iommu_t *iommu, int type)
 {
 	int idx = iommu->aiomt_idx;
 	dev_info_t *dip = iommu->aiomt_dip;
@@ -1159,17 +1169,28 @@
 	const char *driver = ddi_driver_name(dip);
 	const char *f = "amd_iommu_fini";
 
-	mutex_enter(&iommu->aiomt_mutex);
-	if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
-		cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
-		    "idx = %d", f, driver, instance, idx);
-		return (DDI_FAILURE);
+	if (type == AMD_IOMMU_TEARDOWN) {
+		mutex_enter(&iommu->aiomt_mutex);
+		if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
+			cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
+			    "idx = %d", f, driver, instance, idx);
+			return (DDI_FAILURE);
+		}
 	}
+
 	amd_iommu_stop(iommu);
-	amd_iommu_fini_page_tables(iommu);
-	amd_iommu_teardown_interrupts(iommu);
-	amd_iommu_teardown_exclusion(iommu);
-	amd_iommu_teardown_tables_and_buffers(iommu);
+
+	if (type == AMD_IOMMU_TEARDOWN) {
+		amd_iommu_fini_page_tables(iommu);
+		amd_iommu_teardown_interrupts(iommu);
+		amd_iommu_teardown_exclusion(iommu);
+	}
+
+	amd_iommu_teardown_tables_and_buffers(iommu, type);
+
+	if (type == AMD_IOMMU_QUIESCE)
+		return (DDI_SUCCESS);
+
 	if (iommu->aiomt_va != NULL) {
 		hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
 		    iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
@@ -1246,7 +1267,7 @@
 		/* check if cap ID is secure device cap id */
 		if (id != PCI_CAP_ID_SECURE_DEV) {
 			if (amd_iommu_debug) {
-				cmn_err(CE_WARN,
+				cmn_err(CE_NOTE,
 				    "%s: %s%d: skipping IOMMU: idx(0x%x) "
 				    "cap ID (0x%x) != secure dev capid (0x%x)",
 				    f, driver, instance, idx, id,
@@ -1299,20 +1320,21 @@
 }
 
 int
-amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep)
+amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type)
 {
 	int instance = ddi_get_instance(dip);
 	const char *driver = ddi_driver_name(dip);
-	amd_iommu_t *iommu;
+	amd_iommu_t *iommu, *next_iommu;
 	int teardown;
 	int error = DDI_SUCCESS;
 	const char *f = "amd_iommu_teardown";
 
 	teardown = 0;
 	for (iommu = statep->aioms_iommu_start; iommu;
-	    iommu = iommu->aiomt_next) {
+	    iommu = next_iommu) {
 		ASSERT(statep->aioms_nunits > 0);
-		if (amd_iommu_fini(iommu) != DDI_SUCCESS) {
+		next_iommu = iommu->aiomt_next;
+		if (amd_iommu_fini(iommu, type) != DDI_SUCCESS) {
 			error = DDI_FAILURE;
 			continue;
 		}
@@ -1394,7 +1416,7 @@
 	mutex_enter(&amd_iommu_pgtable_lock);
 
 	if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
-		cmn_err(CE_WARN, "%s: %s%d: idx=%d Attempting to get cookies "
+		cmn_err(CE_NOTE, "%s: %s%d: idx=%d Attempting to get cookies "
 		    "from handle for device %s",
 		    f, driver, instance, idx, path);
 	}
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c	Mon Sep 14 21:48:21 2009 -0700
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c	Mon Sep 14 21:48:22 2009 -0700
@@ -510,14 +510,14 @@
 {
 	uint64_t *devtbl_entry;
 	amd_iommu_cmdargs_t cmdargs = {0};
-	int error;
+	int error, flags;
 	dev_info_t *idip = iommu->aiomt_dip;
 	const char *driver = ddi_driver_name(idip);
 	int instance = ddi_get_instance(idip);
 	const char *f = "amd_iommu_set_devtbl_entry";
 
 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_WARN, "%s: attempting to set devtbl entry for %s",
+		cmn_err(CE_NOTE, "%s: attempting to set devtbl entry for %s",
 		    f, path);
 	}
 
@@ -536,10 +536,39 @@
 	    [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
 
 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
+		cmn_err(CE_NOTE, "%s: deviceid=%u devtbl entry (%p) for %s",
 		    f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
 	}
 
+	/*
+	 * Flush internal caches, need to do this if we came up from
+	 * fast boot
+	 */
+	cmdargs.ca_deviceid = deviceid;
+	error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
+	    &cmdargs, 0, 0);
+	if (error != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: idx=%d: deviceid=%d"
+		    "Failed to invalidate domain in IOMMU HW cache",
+		    f, iommu->aiomt_idx, deviceid);
+		return (error);
+	}
+
+	cmdargs.ca_domainid = (uint16_t)domainid;
+	cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
+	flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
+	    AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
+
+	error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
+	    &cmdargs, flags, 0);
+	if (error != DDI_SUCCESS) {
+		cmn_err(CE_WARN, "%s: idx=%d: domainid=%d"
+		    "Failed to invalidate translations in IOMMU HW cache",
+		    f, iommu->aiomt_idx, cmdargs.ca_domainid);
+		return (error);
+	}
+
+	/* Initialize device table entry */
 	if (init_devtbl(iommu, devtbl_entry, domainid, dp)) {
 		cmdargs.ca_deviceid = deviceid;
 		error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
@@ -582,7 +611,7 @@
 	    [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
 
 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
-		cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
+		cmn_err(CE_NOTE, "%s: deviceid=%u devtbl entry (%p) for %s",
 		    f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
 	}
 
@@ -1548,7 +1577,7 @@
 	for (pfn = pfn_start; pfn <= pfn_end; pfn++, pg++) {
 
 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-			cmn_err(CE_WARN, "%s: attempting to create page tables "
+			cmn_err(CE_NOTE, "%s: attempting to create page tables "
 			    "for pfn = %p, va = %p, path = %s",
 			    f, (void *)(uintptr_t)(pfn << MMU_PAGESHIFT),
 			    (void *)(uintptr_t)(pg << MMU_PAGESHIFT), path);
@@ -1568,7 +1597,7 @@
 		}
 
 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
-			cmn_err(CE_WARN, "%s: successfuly created page tables "
+			cmn_err(CE_NOTE, "%s: successfuly created page tables "
 			    "for pfn = %p, vapg = %p, path = %s",
 			    f, (void *)(uintptr_t)pfn,
 			    (void *)(uintptr_t)pg, path);
--- a/usr/src/uts/i86pc/sys/amd_iommu.h	Mon Sep 14 21:48:21 2009 -0700
+++ b/usr/src/uts/i86pc/sys/amd_iommu.h	Mon Sep 14 21:48:22 2009 -0700
@@ -43,8 +43,11 @@
 	int aioms_nunits;			/* # of IOMMUs in function */
 } amd_iommu_state_t;
 
+#define	AMD_IOMMU_QUIESCE	(0)
+#define	AMD_IOMMU_TEARDOWN	(1)
+
 int amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep);
-int amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep);
+int amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type);
 int amd_iommu_lookup_src_bdf(uint16_t bdf, uint16_t *src_bdfp);
 
 #endif	/* _KERNEL */