6907671 oce driver should make MAC_PROP_ADV_10GFDX_CAP property as MAC_PROP_PERM_RW
6916252 LSO not working with S11 oce driver
6916255 Enhance the RX/TX handling in the oce driver
6916259 Code cleanup (remove redundant code) in the oce driver
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_buf.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_buf.c Fri Feb 19 18:04:10 2010 -0800
@@ -74,11 +74,14 @@
ASSERT(size > 0);
- dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_SLEEP);
+ dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_NOSLEEP);
+ if (dbuf == NULL) {
+ return (NULL);
+ }
/* allocate dma handle */
ret = ddi_dma_alloc_handle(dev->dip, &oce_dma_buf_attr,
- DDI_DMA_SLEEP, NULL, &dbuf->dma_handle);
+ DDI_DMA_DONTWAIT, NULL, &dbuf->dma_handle);
if (ret != DDI_SUCCESS) {
oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
"Failed to allocate DMA handle");
@@ -86,7 +89,7 @@
}
/* allocate the DMA-able memory */
ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr,
- flags, DDI_DMA_SLEEP, NULL, &dbuf->base,
+ flags, DDI_DMA_DONTWAIT, NULL, &dbuf->base,
&actual_len, &dbuf->acc_handle);
if (ret != DDI_SUCCESS) {
oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
@@ -98,7 +101,7 @@
ret = ddi_dma_addr_bind_handle(dbuf->dma_handle,
(struct as *)0, dbuf->base, actual_len,
DDI_DMA_RDWR | flags,
- DDI_DMA_SLEEP, NULL, &cookie, &count);
+ DDI_DMA_DONTWAIT, NULL, &cookie, &count);
if (ret != DDI_DMA_MAPPED) {
oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
"Failed to bind dma handle");
@@ -162,7 +165,10 @@
uint32_t size;
/* allocate the ring buffer */
- ring = kmem_zalloc(sizeof (oce_ring_buffer_t), KM_SLEEP);
+ ring = kmem_zalloc(sizeof (oce_ring_buffer_t), KM_NOSLEEP);
+ if (ring == NULL) {
+ return (NULL);
+ }
/* get the dbuf defining the ring */
size = num_items * item_size;
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_gld.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_gld.c Fri Feb 19 18:04:10 2010 -0800
@@ -36,6 +36,7 @@
mac_priv_prop_t oce_priv_props[] = {
{"_tx_ring_size", MAC_PROP_PERM_READ},
{"_tx_bcopy_limit", MAC_PROP_PERM_RW},
+ {"_rx_bcopy_limit", MAC_PROP_PERM_RW},
{"_rx_ring_size", MAC_PROP_PERM_READ},
};
uint32_t oce_num_props = sizeof (oce_priv_props) / sizeof (mac_priv_prop_t);
@@ -95,8 +96,6 @@
dev->state |= STATE_MAC_STARTED;
mutex_exit(&dev->dev_lock);
- /* enable interrupts */
- oce_ei(dev);
return (DDI_SUCCESS);
}
@@ -107,32 +106,16 @@
int qidx = 0;
int ret;
- ret = oce_hw_init(dev);
- if (ret != DDI_SUCCESS) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Hardware initialization failed with %d", ret);
- return (ret);
- }
-
- ret = oce_chip_hw_init(dev);
- if (ret != DDI_SUCCESS) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Chip initialization failed: %d", ret);
- oce_hw_fini(dev);
- return (ret);
- }
+ ret = oce_alloc_intr(dev);
+ if (ret != DDI_SUCCESS)
+ goto start_fail;
ret = oce_setup_handlers(dev);
if (ret != DDI_SUCCESS) {
oce_log(dev, CE_WARN, MOD_CONFIG,
"Interrupt handler setup failed with %d", ret);
- oce_chip_hw_fini(dev);
- oce_hw_fini(dev);
- return (ret);
+ (void) oce_teardown_intr(dev);
+ goto start_fail;
}
-
- (void) oce_start_wq(dev->wq[0]);
- (void) oce_start_rq(dev->rq[0]);
-
/* get link status */
(void) oce_get_link_status(dev, &dev->link);
@@ -147,6 +130,12 @@
dev->link.mac_duplex, dev->link.physical_port);
mac_link_update(dev->mac_handle, LINK_STATE_UP);
}
+
+ (void) oce_start_wq(dev->wq[0]);
+ (void) oce_start_rq(dev->rq[0]);
+ (void) oce_start_mq(dev->mq);
+ /* enable interrupts */
+ oce_ei(dev);
/* arm the eqs */
for (qidx = 0; qidx < dev->neqs; qidx++) {
oce_arm_eq(dev, dev->eq[qidx]->eq_id, 0, B_TRUE, B_FALSE);
@@ -154,6 +143,8 @@
/* update state */
return (DDI_SUCCESS);
+start_fail:
+ return (DDI_FAILURE);
} /* oce_start */
@@ -163,40 +154,34 @@
struct oce_dev *dev = arg;
/* disable interrupts */
- oce_di(dev);
mutex_enter(&dev->dev_lock);
-
- dev->state |= STATE_MAC_STOPPING;
-
if (dev->suspended) {
mutex_exit(&dev->dev_lock);
return;
}
-
+ dev->state |= STATE_MAC_STOPPING;
oce_stop(dev);
-
dev->state &= ~(STATE_MAC_STOPPING | STATE_MAC_STARTED);
-
mutex_exit(&dev->dev_lock);
}
-
+/* called with Tx/Rx comp locks held */
void
oce_stop(struct oce_dev *dev)
{
+ /* disable interrupts */
+ oce_di(dev);
+ oce_remove_handler(dev);
+ (void) oce_teardown_intr(dev);
+ mutex_enter(&dev->wq[0]->tx_lock);
+ mutex_enter(&dev->rq[0]->rx_lock);
+ mutex_enter(&dev->mq->lock);
/* complete the pending Tx */
- oce_stop_wq(dev->wq[0]);
-
- oce_chip_hw_fini(dev);
-
- OCE_MSDELAY(200);
- oce_stop_mq(dev->mq);
- oce_stop_rq(dev->rq[0]);
-
- /* remove interrupt handlers */
- oce_remove_handler(dev);
- /* release hw resources */
- oce_hw_fini(dev);
+ oce_clean_wq(dev->wq[0]);
+ /* Release all the locks */
+ mutex_exit(&dev->mq->lock);
+ mutex_exit(&dev->rq[0]->rx_lock);
+ mutex_exit(&dev->wq[0]->tx_lock);
} /* oce_stop */
@@ -206,8 +191,8 @@
struct oce_dev *dev = (struct oce_dev *)arg;
struct ether_addr *mca_drv_list;
- struct ether_addr *mca_hw_list;
- int new_mcnt = 0;
+ struct ether_addr mca_hw_list[OCE_MAX_MCA];
+ uint16_t new_mcnt = 0;
int ret;
int i;
@@ -215,31 +200,25 @@
if ((mca[0] & 0x1) == 0) {
return (EINVAL);
}
-
/* Allocate the local array for holding the addresses temporarily */
- mca_hw_list = kmem_zalloc(OCE_MAX_MCA * sizeof (struct ether_addr),
- KM_NOSLEEP);
+ bzero(&mca_hw_list, sizeof (&mca_hw_list));
+ mca_drv_list = &dev->multi_cast[0];
- if (mca_hw_list == NULL)
- return (ENOMEM);
-
- mca_drv_list = &dev->multi_cast[0];
+ DEV_LOCK(dev);
if (add) {
/* check if we exceeded hw max supported */
- if (dev->num_mca >= OCE_MAX_MCA) {
- return (ENOENT);
+ if (dev->num_mca <= OCE_MAX_MCA) {
+ /* copy entire dev mca to the mbx */
+ bcopy((void*)mca_drv_list,
+ (void*)mca_hw_list,
+ (dev->num_mca * sizeof (struct ether_addr)));
+ /* Append the new one to local list */
+ bcopy(mca, &mca_hw_list[dev->num_mca],
+ sizeof (struct ether_addr));
}
- /* copy entire dev mca to the mbx */
- bcopy((void*)mca_drv_list,
- (void *)mca_hw_list,
- (dev->num_mca * sizeof (struct ether_addr)));
- /* Append the new one to local list */
- bcopy(mca, &mca_hw_list[dev->num_mca],
- sizeof (struct ether_addr));
new_mcnt = dev->num_mca + 1;
-
} else {
- struct ether_addr *hwlistp = mca_hw_list;
+ struct ether_addr *hwlistp = &mca_hw_list[0];
for (i = 0; i < dev->num_mca; i++) {
/* copy only if it does not match */
if (bcmp((mca_drv_list + i), mca, ETHERADDRL)) {
@@ -251,27 +230,29 @@
new_mcnt = dev->num_mca - 1;
}
- mutex_enter(&dev->dev_lock);
if (dev->suspended) {
- mutex_exit(&dev->dev_lock);
goto finish;
}
- mutex_exit(&dev->dev_lock);
-
- ret = oce_set_multicast_table(dev, mca_hw_list, new_mcnt, B_FALSE);
+ if (new_mcnt == 0 || new_mcnt > OCE_MAX_MCA) {
+ ret = oce_set_multicast_table(dev, dev->if_id, NULL, 0, B_TRUE);
+ } else {
+ ret = oce_set_multicast_table(dev, dev->if_id,
+ &mca_hw_list[0], new_mcnt, B_FALSE);
+ }
if (ret != 0) {
- kmem_free(mca_hw_list,
- OCE_MAX_MCA * sizeof (struct ether_addr));
+ DEV_UNLOCK(dev);
return (EIO);
}
/*
* Copy the local structure to dev structure
*/
finish:
- bcopy(mca_hw_list, mca_drv_list,
- new_mcnt * sizeof (struct ether_addr));
+ if (new_mcnt && new_mcnt <= OCE_MAX_MCA) {
+ bcopy(mca_hw_list, mca_drv_list,
+ new_mcnt * sizeof (struct ether_addr));
+ }
dev->num_mca = (uint16_t)new_mcnt;
- kmem_free(mca_hw_list, OCE_MAX_MCA * sizeof (struct ether_addr));
+ DEV_UNLOCK(dev);
return (0);
} /* oce_m_multicast */
@@ -287,19 +268,21 @@
DEV_UNLOCK(dev);
return (DDI_SUCCESS);
}
- DEV_UNLOCK(dev);
/* Delete previous one and add new one */
- ret = oce_del_mac(dev, &dev->pmac_id);
+ ret = oce_del_mac(dev, dev->if_id, &dev->pmac_id);
if (ret != DDI_SUCCESS) {
+ DEV_UNLOCK(dev);
return (EIO);
}
/* Set the New MAC addr earlier is no longer valid */
- ret = oce_add_mac(dev, uca, &dev->pmac_id);
+ ret = oce_add_mac(dev, dev->if_id, uca, &dev->pmac_id);
if (ret != DDI_SUCCESS) {
+ DEV_UNLOCK(dev);
return (EIO);
}
+ DEV_UNLOCK(dev);
return (ret);
} /* oce_m_unicast */
@@ -309,22 +292,27 @@
struct oce_dev *dev = arg;
mblk_t *nxt_pkt;
mblk_t *rmp = NULL;
+ struct oce_wq *wq;
DEV_LOCK(dev);
- if (dev->suspended) {
+ if (dev->suspended || !(dev->state & STATE_MAC_STARTED)) {
DEV_UNLOCK(dev);
freemsg(mp);
return (NULL);
}
DEV_UNLOCK(dev);
+ wq = dev->wq[0];
while (mp != NULL) {
/* Save the Pointer since mp will be freed in case of copy */
nxt_pkt = mp->b_next;
mp->b_next = NULL;
/* Hardcode wq since we have only one */
- rmp = oce_send_packet(dev->wq[0], mp);
+ rmp = oce_send_packet(wq, mp);
if (rmp != NULL) {
+ /* reschedule Tx */
+ wq->resched = B_TRUE;
+ oce_arm_cq(dev, wq->cq->cq_id, 0, B_TRUE);
/* restore the chain */
rmp->b_next = nxt_pkt;
break;
@@ -372,6 +360,7 @@
struct oce_dev *dev = arg;
int ret = 0;
+ DEV_LOCK(dev);
switch (id) {
case MAC_PROP_MTU: {
uint32_t mtu;
@@ -451,6 +440,7 @@
break;
} /* switch id */
+ DEV_UNLOCK(dev);
return (ret);
} /* oce_m_setprop */
@@ -461,6 +451,8 @@
struct oce_dev *dev = arg;
uint32_t ret = 0;
+ *perm = MAC_PROP_PERM_READ;
+
switch (id) {
case MAC_PROP_AUTONEG:
case MAC_PROP_EN_AUTONEG:
@@ -478,14 +470,16 @@
case MAC_PROP_EN_10HDX_CAP:
case MAC_PROP_ADV_100T4_CAP:
case MAC_PROP_EN_100T4_CAP: {
- *perm = MAC_PROP_PERM_READ;
*(uint8_t *)val = 0x0;
break;
}
- case MAC_PROP_ADV_10GFDX_CAP:
+ case MAC_PROP_ADV_10GFDX_CAP: {
+ *(uint8_t *)val = 0x01;
+ break;
+ }
+
case MAC_PROP_EN_10GFDX_CAP: {
- *perm = MAC_PROP_PERM_READ;
*(uint8_t *)val = 0x01;
break;
}
@@ -566,6 +560,7 @@
break;
}
default:
+ ret = ENOTSUP;
break;
} /* switch id */
return (ret);
@@ -625,17 +620,18 @@
DEV_UNLOCK(dev);
return (ret);
}
- dev->promisc = enable;
if (dev->suspended) {
+ /* remember the setting */
+ dev->promisc = enable;
DEV_UNLOCK(dev);
return (ret);
}
+ ret = oce_set_promiscuous(dev, enable);
+ if (ret == DDI_SUCCESS)
+ dev->promisc = enable;
DEV_UNLOCK(dev);
-
- ret = oce_set_promiscuous(dev, enable);
-
return (ret);
} /* oce_m_promiscuous */
@@ -679,8 +675,18 @@
if (strcmp(name, "_tx_bcopy_limit") == 0) {
(void) ddi_strtol(val, (char **)NULL, 0, &result);
if (result <= OCE_WQ_BUF_SIZE) {
- if (result != dev->bcopy_limit)
- dev->bcopy_limit = (uint32_t)result;
+ if (result != dev->tx_bcopy_limit)
+ dev->tx_bcopy_limit = (uint32_t)result;
+ ret = 0;
+ } else {
+ ret = EINVAL;
+ }
+ }
+ if (strcmp(name, "_rx_bcopy_limit") == 0) {
+ (void) ddi_strtol(val, (char **)NULL, 0, &result);
+ if (result <= OCE_RQ_BUF_SIZE) {
+ if (result != dev->rx_bcopy_limit)
+ dev->rx_bcopy_limit = (uint32_t)result;
ret = 0;
} else {
ret = EINVAL;
@@ -722,7 +728,13 @@
}
if (strcmp(name, "_tx_bcopy_limit") == 0) {
- value = dev->bcopy_limit;
+ value = dev->tx_bcopy_limit;
+ ret = 0;
+ goto done;
+ }
+
+ if (strcmp(name, "_rx_bcopy_limit") == 0) {
+ value = dev->rx_bcopy_limit;
ret = 0;
goto done;
}
@@ -735,7 +747,7 @@
}
done:
- if (ret != 0) {
+ if (ret == 0) {
(void) snprintf(val, size, "%d", value);
}
return (ret);
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_hw.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_hw.c Fri Feb 19 18:04:10 2010 -0800
@@ -43,14 +43,8 @@
extern int oce_destroy_q(struct oce_dev *dev, struct oce_mbx *mbx,
size_t req_size, enum qtype qtype);
-/*
- * function to map the device memory
- *
- * dev - handle to device private data structure
- *
- */
-int
-oce_pci_init(struct oce_dev *dev)
+static int
+oce_map_regs(struct oce_dev *dev)
{
int ret = 0;
off_t bar_size = 0;
@@ -126,7 +120,46 @@
ddi_regs_map_free(&dev->dev_cfg_handle);
return (DDI_FAILURE);
}
+ return (DDI_SUCCESS);
+}
+static void
+oce_unmap_regs(struct oce_dev *dev)
+{
+ ASSERT(NULL != dev);
+ ASSERT(NULL != dev->dip);
+
+ ddi_regs_map_free(&dev->db_handle);
+ ddi_regs_map_free(&dev->csr_handle);
+ ddi_regs_map_free(&dev->dev_cfg_handle);
+
+}
+
+
+
+
+
+/*
+ * function to map the device memory
+ *
+ * dev - handle to device private data structure
+ *
+ */
+int
+oce_pci_init(struct oce_dev *dev)
+{
+ int ret = 0;
+
+ ret = pci_config_setup(dev->dip, &dev->pci_cfg_handle);
+ if (ret != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ ret = oce_map_regs(dev);
+
+ if (ret != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
dev->fn = OCE_PCI_FUNC(dev);
ret = oce_fm_check_acc_handle(dev, dev->dev_cfg_handle);
@@ -147,309 +180,10 @@
void
oce_pci_fini(struct oce_dev *dev)
{
- ASSERT(NULL != dev);
- ASSERT(NULL != dev->dip);
-
- ddi_regs_map_free(&dev->db_handle);
- ddi_regs_map_free(&dev->csr_handle);
- ddi_regs_map_free(&dev->dev_cfg_handle);
+ oce_unmap_regs(dev);
+ pci_config_teardown(&dev->pci_cfg_handle);
} /* oce_pci_fini */
-/*
- * function to initailise the hardware. This includes creation of queues,
- * interfaces and associated buffers for data movement
- *
- * dev - software handle to the device
- *
- */
-int
-oce_hw_init(struct oce_dev *dev)
-{
- int ret = DDI_SUCCESS;
-
- /* create an interface for the device with out mac */
- ret = oce_if_create(dev, OCE_DEFAULT_IF_CAP, OCE_DEFAULT_IF_CAP_EN,
- 0, &dev->mac_addr[0], (uint32_t *)&dev->if_id);
- if (ret != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Interface creation failed: 0x%x", ret);
- dev->if_id = OCE_INVAL_IF_ID;
- goto init_fail;
- }
-
- dev->if_cap_flags = OCE_DEFAULT_IF_CAP_EN;
-
- /* Enable VLAN Promisc on HW */
- ret = oce_config_vlan(dev, (uint8_t)dev->if_id, NULL, 0,
- B_TRUE, B_TRUE);
- if (ret != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Config vlan failed: %d", ret);
- goto init_fail;
-
- }
-
- /* set default flow control */
- ret = oce_set_flow_control(dev, dev->flow_control);
- if (ret != 0) {
- oce_log(dev, CE_NOTE, MOD_CONFIG,
- "Set flow control failed: %d", ret);
- }
-
- /* set to promiscuous mode */
- ret = oce_set_promiscuous(dev, dev->promisc);
-
- if (ret != 0) {
- oce_log(dev, CE_NOTE, MOD_CONFIG,
- "Set Promisc failed: %d", ret);
- }
- /* this could happen if the driver is resuming after suspend */
- if (dev->num_mca > 0) {
- ret = oce_set_multicast_table(dev, dev->multi_cast,
- dev->num_mca, B_FALSE);
- if (ret != 0) {
- oce_log(dev, CE_NOTE, MOD_CONFIG,
- "Set Multicast failed: %d", ret);
- }
- }
-
- /* we are done. Now return */
- return (DDI_SUCCESS);
-
-init_fail:
- oce_hw_fini(dev);
- return (DDI_FAILURE);
-} /* oce_hw_init */
-
-/*
- * function to return resources allocated in oce_hw_init
- *
- * dev - software handle to the device
- *
- */
-void
-oce_hw_fini(struct oce_dev *dev)
-{
- int i;
-
- /* release OS resources */
- if (dev->mq != NULL) {
- (void) oce_mq_del(dev, dev->mq);
- dev->mq = NULL;
- }
-
- if (dev->wq[0] != NULL) {
- (void) oce_wq_del(dev, dev->wq[0]);
- dev->wq[0] = NULL;
- }
- for (i = 0; i < dev->num_vectors; i++) {
- if (dev->eq[i] != NULL) {
- if (oce_eq_del(dev, dev->eq[i])) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "eq[%d] del failed", i);
- }
- dev->eq[i] = NULL;
- }
- }
- if (dev->if_id >= 0) {
- (void) oce_if_del(dev, dev->if_id);
- }
-
- if (dev->rq[0] != NULL) {
- (void) oce_rq_del(dev, dev->rq[0]);
- dev->rq[0] = NULL;
- }
-} /* oce_hw_fini */
-
-int
-oce_chip_hw_init(struct oce_dev *dev)
-{
- struct oce_wq *wq;
- struct oce_rq *rq;
- struct oce_eq *eq;
- struct oce_mq *mq;
- int i = 0;
-
- /*
- * create Event Queues. One event queue per available vector. In
- * case of INTX, only one vector is available and will handle
- * event notification for Write Queue (WQ), Receive Queue (RQ) and
- * Mbox Queue (MQ).
- *
- * The EQ is not directly used by the WQ, RQ and MQ. The WQ, RQ and
- * MQ is composed of a Completion Queue (CQ) that is created per
- * queue and is dependent on the queue type. The EQ passed is
- * associated with the CQ at the time of creation.
- *
- * In the case of MSIX, there will be one EQ for the RQ and one EQ
- * shared between the WQ and MQ.
- */
- for (i = 0; i < dev->num_vectors; i++) {
- eq = oce_eq_create(dev, EQ_LEN_1024, EQE_SIZE_4, 0);
- if (eq == NULL) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "EQ creation(%d) failed ", i);
- goto chip_fail;
- }
- /* Save the eq pointer */
- dev->eq[eq->eq_id % OCE_MAX_EQ] = eq;
- }
-
- /*
- * create the Write Queue (WQ). The WQ is the low level sructure for
- * queueing send packets. It maintains a ring buffer to queue packets
- * to be sent out on the wire and return the context to the host
- * when there is a send complete event.
- *
- * The WQ uses a Completion Queue (CQ) with an associated EQ for
- * handling send completion events.
- */
- wq = oce_wq_create(dev, dev->eq[0],
- dev->tx_ring_size, NIC_WQ_TYPE_STANDARD);
- if (wq == NULL) {
- oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
- "WQ creation failed ");
- goto chip_fail;
- }
- /* store the WQ pointer */
- dev->wq[0] = wq;
-
- /*
- * create the Receive Queue (RQ). The RQ is the low level structure
- * for receiving data from the wire, It implements a ring buffer
- * that allows the adpater to DMA data onto host buffers.
- *
- * The RQ uses a Completion Queue (CQ) with an associated EQ for
- * handling recieve events when packets are received by the adapter
- */
- rq = oce_rq_create(dev,
- ((dev->num_vectors > 1) ? dev->eq[1] : dev->eq[0]),
- dev->rx_ring_size,
- OCE_RQ_BUF_SIZE, OCE_RQ_MAX_FRAME_SZ,
- dev->if_id, B_FALSE);
- if (rq == NULL) {
- oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
- "RQ creation failed ");
- goto chip_fail;
- }
- dev->rq[0] = rq;
-
- /*
- * create the Mailbox Queue (MQ). Only one per adapter instance can
- * be created. The MQ is used for receiving asynchronous adapter
- * events, like link status updates.
- *
- * The MQ uses an Asynchronous CQ (ACQ) with an associated EQ for
- * handling asynchronous event notification to the host.
- */
- mq = oce_mq_create(dev, dev->eq[0], 64);
- if (mq == NULL) {
- oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
- "MQ creation failed ");
- goto chip_fail;
- }
- dev->mq = mq;
-
- return (DDI_SUCCESS);
-chip_fail:
- oce_chip_hw_fini(dev);
- return (DDI_FAILURE);
-} /* oce_chip_hw_init */
-
-
-void
-oce_chip_hw_fini(struct oce_dev *dev)
-{
- struct oce_mbx mbx;
- struct mbx_destroy_common_mq *mq_cmd;
- struct mbx_delete_nic_rq *rq_cmd;
- struct mbx_delete_nic_wq *wq_cmd;
- struct mbx_destroy_common_cq *cq_cmd;
- struct oce_mq *mq = dev->mq;
- struct oce_rq *rq = dev->rq[0];
- struct oce_wq *wq = dev->wq[0];
- struct oce_eq *eq = NULL;
- struct mbx_destroy_common_eq *eq_cmd;
- int i;
-
- if (mq != NULL) {
-
- /* send a command to delete the MQ */
- bzero(&mbx, sizeof (struct oce_mbx));
- mq_cmd = (struct mbx_destroy_common_mq *)&mbx.payload;
-
- mq_cmd->params.req.id = mq->mq_id;
- (void) oce_destroy_q(dev, &mbx,
- sizeof (struct mbx_destroy_common_cq), QTYPE_MQ);
-
- /* send a command to delete the MQ_CQ */
- bzero(&mbx, sizeof (struct oce_mbx));
- cq_cmd = (struct mbx_destroy_common_cq *)&mbx.payload;
-
- cq_cmd->params.req.id = mq->cq->cq_id;
- (void) oce_destroy_q(dev, &mbx,
- sizeof (struct mbx_destroy_common_cq), QTYPE_CQ);
- mq->ring->pidx = mq->ring->cidx = 0;
- }
-
- if (rq != NULL) {
- /* send a command to delete the RQ */
- bzero(&mbx, sizeof (struct oce_mbx));
-
- rq_cmd = (struct mbx_delete_nic_rq *)&mbx.payload;
- rq_cmd->params.req.rq_id = rq->rq_id;
-
- (void) oce_destroy_q(dev, &mbx,
- sizeof (struct mbx_delete_nic_rq), QTYPE_RQ);
-
- rq->ring->cidx = rq->ring->pidx = 0;
-
- /* send a command to delete the RQ_CQ */
- bzero(&mbx, sizeof (struct oce_mbx));
- cq_cmd = (struct mbx_destroy_common_cq *)&mbx.payload;
-
- cq_cmd->params.req.id = rq->cq->cq_id;
- (void) oce_destroy_q(dev, &mbx,
- sizeof (struct mbx_destroy_common_cq), QTYPE_CQ);
- rq->cq->ring->pidx = rq->cq->ring->cidx = 0;
- }
-
- if (wq != NULL) {
- /* send a command to delete the WQ */
- bzero(&mbx, sizeof (struct oce_mbx));
-
- /* now fill the command */
- wq_cmd = (struct mbx_delete_nic_wq *)&mbx.payload;
- wq_cmd->params.req.wq_id = wq->wq_id;
- (void) oce_destroy_q(dev, &mbx,
- sizeof (struct mbx_delete_nic_wq), QTYPE_WQ);
-
- wq->ring->pidx = wq->ring->cidx = 0;
-
- /* send a command to delete the WQ_CQ */
- bzero(&mbx, sizeof (struct oce_mbx));
- cq_cmd = (struct mbx_destroy_common_cq *)&mbx.payload;
- cq_cmd->params.req.id = wq->cq->cq_id;
- (void) oce_destroy_q(dev, &mbx,
- sizeof (struct mbx_destroy_common_cq), QTYPE_CQ);
- wq->cq->ring->pidx = wq->cq->ring->cidx = 0;
- }
-
- for (i = 0; i < dev->num_vectors; i++) {
- eq = dev->eq[i];
- if (eq != NULL) {
- bzero(&mbx, sizeof (struct oce_mbx));
- /* send a command to delete the EQ */
- eq_cmd = (struct mbx_destroy_common_eq *)&mbx.payload;
-
- eq_cmd->params.req.id = eq->eq_id;
-
- (void) oce_destroy_q(dev, &mbx,
- sizeof (struct mbx_destroy_common_eq), QTYPE_EQ);
- eq->ring->pidx = eq->ring->cidx = 0;
- }
- }
-}
/*
* function to check if a reset is required
@@ -470,11 +204,7 @@
if (post_status.bits.stage == POST_STAGE_ARMFW_READY) {
return (B_FALSE);
- } else if ((post_status.bits.stage <= POST_STAGE_AWAITING_HOST_RDY) ||
- post_status.bits.stage == POST_STAGE_ARMFW_UE) {
- return (B_TRUE);
}
-
return (B_TRUE);
} /* oce_is_reset_pci */
@@ -554,27 +284,17 @@
}
post_status.dw0 = OCE_CSR_READ32(dev, MPU_EP_SEMAPHORE);
- if (post_status.bits.error) break;
+ if (post_status.bits.error) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "0x%x POST ERROR!!", post_status.dw0);
+ return (DDI_FAILURE);
+ }
if (post_status.bits.stage == POST_STAGE_ARMFW_READY)
- break;
+ return (DDI_SUCCESS);
drv_usecwait(100);
}
-
- if (post_status.bits.error) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "0x%x POST ERROR!!", post_status.dw0);
- return (DDI_FAILURE);
- } else if (post_status.bits.stage == POST_STAGE_ARMFW_READY) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "0x%x POST SUCCESSFUL",
- post_status.dw0);
- return (DDI_SUCCESS);
- } else {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "0x%x POST timedout", post_status.dw0);
- return (DDI_FAILURE);
- }
+ return (DDI_FAILURE);
} /* oce_POST */
/*
* function to modify register access attributes corresponding to the
@@ -594,3 +314,170 @@
reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
}
} /* oce_set_fma_flags */
+
+
+int
+oce_create_nw_interface(struct oce_dev *dev)
+{
+ int ret;
+
+ /* create an interface for the device with out mac */
+ ret = oce_if_create(dev, OCE_DEFAULT_IF_CAP, OCE_DEFAULT_IF_CAP_EN,
+ 0, &dev->mac_addr[0], (uint32_t *)&dev->if_id);
+ if (ret != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "Interface creation failed: 0x%x", ret);
+ return (ret);
+ }
+ atomic_inc_32(&dev->nifs);
+
+ dev->if_cap_flags = OCE_DEFAULT_IF_CAP_EN;
+
+ /* Enable VLAN Promisc on HW */
+ ret = oce_config_vlan(dev, (uint8_t)dev->if_id, NULL, 0,
+ B_TRUE, B_TRUE);
+ if (ret != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "Config vlan failed: %d", ret);
+ oce_delete_nw_interface(dev);
+ return (ret);
+
+ }
+
+ /* set default flow control */
+ ret = oce_set_flow_control(dev, dev->flow_control);
+ if (ret != 0) {
+ oce_log(dev, CE_NOTE, MOD_CONFIG,
+ "Set flow control failed: %d", ret);
+ }
+ ret = oce_set_promiscuous(dev, dev->promisc);
+
+ if (ret != 0) {
+ oce_log(dev, CE_NOTE, MOD_CONFIG,
+ "Set Promisc failed: %d", ret);
+ }
+#if 0
+ /* this could happen if the driver is resuming after suspend */
+ if (dev->num_mca > 0) {
+ ret = oce_set_multicast_table(dev, dev->multi_cast,
+ dev->num_mca);
+ if (ret != 0) {
+ oce_log(dev, CE_NOTE, MOD_CONFIG,
+ "Set Multicast failed: %d", ret);
+ }
+ }
+#endif
+
+ return (0);
+}
+
+void
+oce_delete_nw_interface(struct oce_dev *dev) {
+
+ /* currently only single interface is implmeneted */
+ if (dev->nifs > 0) {
+ (void) oce_if_del(dev, dev->if_id);
+ atomic_dec_32(&dev->nifs);
+ }
+}
+
+
+int
+oce_setup_adapter(struct oce_dev *dev)
+{
+ int ret;
+ ret = oce_create_nw_interface(dev);
+ if (ret != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+ ret = oce_create_queues(dev);
+ if (ret != DDI_SUCCESS) {
+ oce_delete_nw_interface(dev);
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+void
+oce_unsetup_adapter(struct oce_dev *dev)
+{
+ oce_delete_queues(dev);
+ oce_delete_nw_interface(dev);
+}
+
+int
+oce_hw_init(struct oce_dev *dev)
+{
+ int ret;
+ struct mac_address_format mac_addr;
+
+ ret = oce_POST(dev);
+ if (ret != DDI_SUCCESS) {
+ oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
+ "!!!HW POST1 FAILED");
+ /* ADD FM FAULT */
+ return (DDI_FAILURE);
+ }
+ /* create bootstrap mailbox */
+ dev->bmbx = oce_alloc_dma_buffer(dev,
+ sizeof (struct oce_bmbx), DDI_DMA_CONSISTENT);
+ if (dev->bmbx == NULL) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "Failed to allocate bmbx: size = %u",
+ (uint32_t)sizeof (struct oce_bmbx));
+ return (DDI_FAILURE);
+ }
+
+ ret = oce_reset_fun(dev);
+ if (ret != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
+ "!!!FUNCTION RESET FAILED");
+ goto init_fail;
+ }
+
+ /* reset the Endianess of BMBX */
+ ret = oce_mbox_init(dev);
+ if (ret != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "Mailbox initialization2 Failed with %d", ret);
+ goto init_fail;
+ }
+
+ /* read the firmware version */
+ ret = oce_get_fw_version(dev);
+ if (ret != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "Firmaware version read failed with %d", ret);
+ goto init_fail;
+ }
+
+ /* read the fw config */
+ ret = oce_get_fw_config(dev);
+ if (ret != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "Firmware configuration read failed with %d", ret);
+ goto init_fail;
+ }
+
+ /* read the Factory MAC address */
+ ret = oce_read_mac_addr(dev, 0, 1,
+ MAC_ADDRESS_TYPE_NETWORK, &mac_addr);
+ if (ret != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "MAC address read failed with %d", ret);
+ goto init_fail;
+ }
+ bcopy(&mac_addr.mac_addr[0], &dev->mac_addr[0], ETHERADDRL);
+ return (DDI_SUCCESS);
+init_fail:
+ oce_hw_fini(dev);
+ return (DDI_FAILURE);
+}
+void
+oce_hw_fini(struct oce_dev *dev)
+{
+ if (dev->bmbx != NULL) {
+ oce_free_dma_buffer(dev, dev->bmbx);
+ dev->bmbx = NULL;
+ }
+}
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_intr.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_intr.c Fri Feb 19 18:04:10 2010 -0800
@@ -53,17 +53,38 @@
oce_setup_intr(struct oce_dev *dev)
{
int ret;
+ int intr_types = 0;
- if (dev->intr_types & DDI_INTR_TYPE_MSIX) {
- ret = oce_setup_msix(dev);
- if (ret == 0)
- return (ret);
+ /* get supported intr types */
+ ret = ddi_intr_get_supported_types(dev->dip, &intr_types);
+ if (ret != DDI_SUCCESS) {
+ oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
+ "Failed to retrieve intr types ");
+ return (DDI_FAILURE);
+ }
+
+ if (intr_types & DDI_INTR_TYPE_MSIX) {
+ dev->intr_types = DDI_INTR_TYPE_MSIX;
+ dev->num_vectors = 2;
+ return (DDI_SUCCESS);
}
- if (dev->intr_types & DDI_INTR_TYPE_FIXED) {
- ret = oce_setup_intx(dev);
- if (ret == 0)
- return (ret);
+ if (intr_types & DDI_INTR_TYPE_FIXED) {
+ dev->intr_types = DDI_INTR_TYPE_FIXED;
+ dev->num_vectors = 1;
+ return (DDI_SUCCESS);
+ }
+ return (DDI_FAILURE);
+}
+
+int
+oce_alloc_intr(struct oce_dev *dev)
+{
+ if (dev->intr_types == DDI_INTR_TYPE_MSIX) {
+ return (oce_setup_msix(dev));
+ }
+ if (dev->intr_types == DDI_INTR_TYPE_FIXED) {
+ return (oce_setup_intx(dev));
}
return (DDI_FAILURE);
@@ -192,8 +213,6 @@
int i;
int ret;
- oce_chip_di(dev);
-
if (dev->intr_cap & DDI_INTR_FLAG_BLOCK) {
(void) ddi_intr_block_disable(dev->htable, dev->num_vectors);
} else {
@@ -205,6 +224,7 @@
}
}
}
+ oce_chip_di(dev);
} /* oce_di */
/*
@@ -237,21 +257,21 @@
return (DDI_FAILURE);
}
- if (navail < OCE_NUM_USED_VECTORS)
+ if (navail < dev->num_vectors)
return (DDI_FAILURE);
- dev->num_vectors = OCE_NUM_USED_VECTORS;
- dev->intr_types = DDI_INTR_TYPE_MSIX;
-
/* allocate htable */
dev->htable = kmem_zalloc(dev->num_vectors *
- sizeof (ddi_intr_handle_t), KM_SLEEP);
+ sizeof (ddi_intr_handle_t), KM_NOSLEEP);
+
+ if (dev->htable == NULL)
+ return (DDI_FAILURE);
/* allocate interrupt handlers */
ret = ddi_intr_alloc(dev->dip, dev->htable, DDI_INTR_TYPE_MSIX,
0, dev->num_vectors, &navail, DDI_INTR_ALLOC_NORMAL);
- if (ret != DDI_SUCCESS || navail < OCE_NUM_USED_VECTORS) {
+ if (ret != DDI_SUCCESS || navail < dev->num_vectors) {
oce_log(dev, CE_WARN, MOD_CONFIG,
"Alloc intr failed: %d %d",
navail, ret);
@@ -324,9 +344,6 @@
int ret;
int i;
- if (!(dev->intr_types & DDI_INTR_TYPE_MSIX)) {
- return (DDI_FAILURE);
- }
for (i = 0; i < dev->neqs; i++) {
ret = ddi_intr_add_handler(dev->htable[i], oce_isr,
(caddr_t)dev->eq[i], NULL);
@@ -355,7 +372,7 @@
{
int nvec;
- for (nvec = 0; nvec < dev->neqs; nvec++) {
+ for (nvec = 0; nvec < dev->num_vectors; nvec++) {
(void) ddi_intr_remove_handler(dev->htable[nvec]);
}
} /* oce_del_msix_handlers */
@@ -460,7 +477,6 @@
return (DDI_FAILURE);
dev->num_vectors = navail;
- dev->intr_types = DDI_INTR_TYPE_FIXED;
/* allocate htable */
dev->htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_main.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_main.c Fri Feb 19 18:04:10 2010 -0800
@@ -35,12 +35,11 @@
#define ATTACH_DEV_INIT 0x1
#define ATTACH_FM_INIT 0x2
-#define ATTACH_SETUP_INTR 0x4
-#define ATTACH_LOCK_INIT 0x8
-#define ATTACH_PCI_INIT 0x10
-#define ATTACH_BOOTSTRAP_INIT 0x20
-#define ATTACH_HW_INIT 0x40
-#define ATTACH_ADD_HANDLERS 0x80
+#define ATTACH_LOCK_INIT 0x4
+#define ATTACH_PCI_INIT 0x8
+#define ATTACH_HW_INIT 0x10
+#define ATTACH_SETUP_TXRX 0x20
+#define ATTACH_SETUP_ADAP 0x40
#define ATTACH_STAT_INIT 0x100
#define ATTACH_MAC_REG 0x200
@@ -54,7 +53,8 @@
/* driver properties */
static const char mtu_prop_name[] = "oce_default_mtu";
static const char tx_ring_size_name[] = "oce_tx_ring_size";
-static const char bcopy_limit_name[] = "oce_bcopy_limit";
+static const char tx_bcopy_limit_name[] = "oce_tx_bcopy_limit";
+static const char rx_bcopy_limit_name[] = "oce_rx_bcopy_limit";
static const char fm_cap_name[] = "oce_fm_capability";
static const char log_level_name[] = "oce_log_level";
static const char lso_capable_name[] = "oce_lso_capable";
@@ -69,7 +69,6 @@
static void oce_init_locks(struct oce_dev *dev);
static void oce_destroy_locks(struct oce_dev *dev);
static void oce_get_params(struct oce_dev *dev);
-int oce_reset_fun(struct oce_dev *dev);
static struct cb_ops oce_cb_ops = {
nulldev, /* cb_open */
@@ -182,10 +181,8 @@
oce_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
int ret = 0;
- int intr_types;
struct oce_dev *dev = NULL;
mac_register_t *mac;
- struct mac_address_format mac_addr;
switch (cmd) {
case DDI_RESUME:
@@ -199,22 +196,12 @@
oce_log(dev, CE_CONT, MOD_CONFIG, "!%s, %s",
oce_desc_string, oce_version);
-
- /* get supported intr types */
- ret = ddi_intr_get_supported_types(dip, &intr_types);
- if (ret != DDI_SUCCESS) {
- oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
- "Failed to retrieve intr types ");
- return (DDI_FAILURE);
- }
-
/* allocate dev */
dev = kmem_zalloc(sizeof (struct oce_dev), KM_SLEEP);
/* populate the dev structure */
dev->dip = dip;
dev->dev_id = ddi_get_instance(dip);
- dev->intr_types = intr_types;
dev->suspended = B_FALSE;
/* get the parameters */
@@ -230,14 +217,13 @@
oce_fm_init(dev);
dev->attach_state |= ATTACH_FM_INIT;
-
ret = oce_setup_intr(dev);
if (ret != DDI_SUCCESS) {
oce_log(dev, CE_WARN, MOD_CONFIG,
"Interrupt setup failed with %d", ret);
goto attach_fail;
+
}
- dev->attach_state |= ATTACH_SETUP_INTR;
/* initialize locks */
oce_init_locks(dev);
@@ -252,60 +238,31 @@
}
dev->attach_state |= ATTACH_PCI_INIT;
- /* check if reset if required */
- if (oce_is_reset_pci(dev)) {
- ret = oce_pci_soft_reset(dev);
- if (ret) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Device Reset failed: %d", ret);
- goto attach_fail;
- }
- }
-
- /* create bootstrap mailbox */
- dev->bmbx = oce_alloc_dma_buffer(dev,
- sizeof (struct oce_bmbx), DDI_DMA_CONSISTENT);
- if (dev->bmbx == NULL) {
+ /* HW init */
+ ret = oce_hw_init(dev);
+ if (ret != DDI_SUCCESS) {
oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to allocate bmbx: size = %u",
- (uint32_t)sizeof (struct oce_bmbx));
+ "HW initialization failed with %d", ret);
goto attach_fail;
}
- dev->attach_state |= ATTACH_BOOTSTRAP_INIT;
+ dev->attach_state |= ATTACH_HW_INIT;
- /* initialize the BMBX */
- ret = oce_mbox_init(dev);
- if (ret != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Mailbox initialization Failed with %d", ret);
- goto attach_fail;
- }
-
- /* read the firmware version */
- ret = oce_get_fw_version(dev);
- if (ret != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Firmaware version read failed with %d", ret);
+ ret = oce_init_txrx(dev);
+ if (ret != DDI_SUCCESS) {
+ oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
+ "Failed to init rings");
goto attach_fail;
}
+ dev->attach_state |= ATTACH_SETUP_TXRX;
- /* read the fw config */
- ret = oce_get_fw_config(dev);
- if (ret != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Firmware configuration read failed with %d", ret);
+ ret = oce_setup_adapter(dev);
+ if (ret != DDI_SUCCESS) {
+ oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
+ "Failed to setup adapter");
goto attach_fail;
}
+ dev->attach_state |= ATTACH_SETUP_ADAP;
- /* read the MAC address */
- ret = oce_read_mac_addr(dev, dev->if_id, 1,
- MAC_ADDRESS_TYPE_NETWORK, &mac_addr);
- if (ret != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "MAC address read failed with %d", ret);
- goto attach_fail;
- }
- bcopy(&mac_addr.mac_addr[0], &dev->mac_addr[0], ETHERADDRL);
ret = oce_stat_init(dev);
if (ret != DDI_SUCCESS) {
@@ -371,24 +328,52 @@
static int
oce_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
- struct oce_dev *dev = ddi_get_driver_private(dip);
+ struct oce_dev *dev;
+ int pcnt = 0;
- ASSERT(dev != NULL);
-
+ dev = ddi_get_driver_private(dip);
+ if (dev == NULL) {
+ return (DDI_FAILURE);
+ }
oce_log(dev, CE_NOTE, MOD_CONFIG,
"Detaching driver: cmd = 0x%x", cmd);
switch (cmd) {
- case DDI_DETACH:
- oce_unconfigure(dev);
- break;
-
+ default:
+ return (DDI_FAILURE);
case DDI_SUSPEND:
return (oce_suspend(dip));
+ case DDI_DETACH:
+ break;
+ } /* switch cmd */
- default:
+ /* Fail detach if MAC unregister is unsuccessfule */
+ if (mac_unregister(dev->mac_handle) != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
+ "Failed to unregister MAC ");
return (DDI_FAILURE);
- } /* switch cmd */
+ }
+ dev->attach_state &= ~ATTACH_MAC_REG;
+
+ /* check if the detach is called with out stopping */
+ DEV_LOCK(dev);
+ if (dev->state & STATE_MAC_STARTED) {
+ dev->state &= ~STATE_MAC_STARTED;
+ oce_stop(dev);
+ DEV_UNLOCK(dev);
+ } else
+ DEV_UNLOCK(dev);
+
+ /*
+ * Wait for Packets sent up to be freed
+ */
+ if ((pcnt = oce_rx_pending(dev)) != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "%d Pending Buffers Detach failed", pcnt);
+ return (DDI_FAILURE);
+ }
+ oce_unconfigure(dev);
+
return (DDI_SUCCESS);
} /* oce_detach */
@@ -405,7 +390,7 @@
return (DDI_SUCCESS);
}
- oce_di(dev);
+ oce_chip_di(dev);
ret = oce_reset_fun(dev);
@@ -423,6 +408,7 @@
/* stop the adapter */
if (dev->state & STATE_MAC_STARTED) {
oce_stop(dev);
+ oce_unsetup_adapter(dev);
}
dev->state &= ~STATE_MAC_STARTED;
mutex_exit(&dev->dev_lock);
@@ -443,6 +429,11 @@
return (DDI_SUCCESS);
}
if (dev->state & STATE_MAC_STARTED) {
+ ret = oce_setup_adapter(dev);
+ if (ret != DDI_SUCCESS) {
+ mutex_exit(&dev->dev_lock);
+ return (DDI_FAILURE);
+ }
ret = oce_start(dev);
if (ret != DDI_SUCCESS) {
mutex_exit(&dev->dev_lock);
@@ -483,8 +474,16 @@
if (state & ATTACH_STAT_INIT) {
oce_stat_fini(dev);
}
- if (state & ATTACH_BOOTSTRAP_INIT) {
- oce_free_dma_buffer(dev, dev->bmbx);
+ if (state & ATTACH_SETUP_ADAP) {
+ oce_unsetup_adapter(dev);
+ }
+
+ if (state & ATTACH_SETUP_TXRX) {
+ oce_fini_txrx(dev);
+ }
+
+ if (state & ATTACH_HW_INIT) {
+ oce_hw_fini(dev);
}
if (state & ATTACH_PCI_INIT) {
oce_pci_fini(dev);
@@ -492,9 +491,6 @@
if (state & ATTACH_LOCK_INIT) {
oce_destroy_locks(dev);
}
- if (state & ATTACH_SETUP_INTR) {
- (void) oce_teardown_intr(dev);
- }
if (state & ATTACH_FM_INIT) {
oce_fm_fini(dev);
}
@@ -527,12 +523,15 @@
DDI_PROP_DONTPASS, (char *)tx_ring_size_name,
OCE_DEFAULT_TX_RING_SIZE);
- dev->bcopy_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dev->dip,
- DDI_PROP_DONTPASS, (char *)bcopy_limit_name,
- OCE_DEFAULT_BCOPY_LIMIT);
+ dev->tx_bcopy_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dev->dip,
+ DDI_PROP_DONTPASS, (char *)tx_bcopy_limit_name,
+ OCE_DEFAULT_TX_BCOPY_LIMIT);
+ dev->rx_bcopy_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dev->dip,
+ DDI_PROP_DONTPASS, (char *)rx_bcopy_limit_name,
+ OCE_DEFAULT_RX_BCOPY_LIMIT);
dev->lso_capable = (boolean_t)ddi_prop_get_int(DDI_DEV_T_ANY, dev->dip,
- DDI_PROP_DONTPASS, (char *)lso_capable_name, 0);
+ DDI_PROP_DONTPASS, (char *)lso_capable_name, 1);
dev->fm_caps = ddi_prop_get_int(DDI_DEV_T_ANY, dev->dip,
DDI_PROP_DONTPASS, (char *)fm_cap_name, OCE_FM_CAPABILITY);
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_mbx.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_mbx.c Fri Feb 19 18:04:10 2010 -0800
@@ -129,11 +129,13 @@
int
oce_mbox_wait(struct oce_dev *dev, uint32_t tmo_sec)
{
- clock_t tmo = (tmo_sec > 0) ? drv_usectohz(tmo_sec * 1000000) :
- drv_usectohz(DEFAULT_MQ_MBOX_TIMEOUT);
+ clock_t tmo;
clock_t now, tstamp;
pd_mpu_mbox_db_t mbox_db;
+ tmo = (tmo_sec > 0) ? drv_usectohz(tmo_sec * 1000000) :
+ drv_usectohz(DEFAULT_MQ_MBOX_TIMEOUT);
+
tstamp = ddi_get_lbolt();
do {
now = ddi_get_lbolt();
@@ -167,6 +169,7 @@
/* write 30 bits of address hi dword */
pa = (uint32_t)(DBUF_PA(dev->bmbx) >> 34);
+ bzero(&mbox_db, sizeof (pd_mpu_mbox_db_t));
mbox_db.bits.ready = 0;
mbox_db.bits.hi = 1;
mbox_db.bits.address = pa;
@@ -183,6 +186,11 @@
/* wait for mbox ready */
ret = oce_mbox_wait(dev, tmo_sec);
if (ret != 0) {
+ oce_log(dev, CE_NOTE, MOD_CONFIG,
+ "BMBX TIMED OUT PROGRAMMING HI ADDR: %d", ret);
+ /* if mbx times out, hw is in invalid state */
+ ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
+ oce_fm_ereport(dev, DDI_FM_DEVICE_INVAL_STATE);
return (ret);
}
@@ -199,7 +207,7 @@
ret = oce_mbox_wait(dev, tmo_sec);
if (ret != 0) {
oce_log(dev, CE_NOTE, MOD_CONFIG,
- "bmbx timed out: %d", ret);
+ "BMBX TIMED OUT PROGRAMMING LO ADDR: %d", ret);
/* if mbx times out, hw is in invalid state */
ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
oce_fm_ereport(dev, DDI_FM_DEVICE_INVAL_STATE);
@@ -335,15 +343,7 @@
/* now post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to get firmware version:"
- "CMD COMPLETION STATUS:(%d)"
- "MBX COMMAND COMPLETION STATUS:(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS:(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
+ if (ret != 0) {
return (ret);
}
bcopy(fwcmd->params.rsp.fw_ver_str, dev->fw_version, 32);
@@ -385,7 +385,7 @@
/* fill rest of mbx */
mbx->u0.s.embedded = 1;
mbx->payload_length = sizeof (struct ioctl_common_function_reset);
- DW_SWAP(u32ptr(&mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
+ DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
return (oce_mbox_dispatch(dev, 0));
} /* oce_reset_fun */
@@ -404,7 +404,7 @@
* return 0 on success, EIO on failure
*/
int
-oce_read_mac_addr(struct oce_dev *dev, uint16_t if_id, uint8_t perm,
+oce_read_mac_addr(struct oce_dev *dev, uint32_t if_id, uint8_t perm,
uint8_t type, struct mac_address_format *mac)
{
struct oce_mbx mbx;
@@ -423,7 +423,7 @@
/* fill the command */
fwcmd->params.req.permanent = perm;
if (perm)
- fwcmd->params.req.if_id = if_id;
+ fwcmd->params.req.if_id = (uint16_t)if_id;
else
fwcmd->params.req.if_id = 0;
fwcmd->params.req.type = type;
@@ -435,15 +435,7 @@
/* now post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to read MAC:"
- "CMD COMPLETION STATUS:(%d)"
- "MBX COMMAND COMPLETION STATUS:(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS:(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
+ if (ret != 0) {
return (ret);
}
@@ -522,18 +514,12 @@
/* now post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to create interface:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
+ if (ret != 0) {
return (ret);
}
+
+
/* get response */
*if_id = LE_32(fwcmd->params.rsp.if_id);
oce_log(dev, CE_NOTE, MOD_CONFIG,
@@ -582,18 +568,7 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to delete the interface:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
- return (0);
+ return (ret);
} /* oce_if_del */
/*
@@ -628,15 +603,8 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to get the link status:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
+
+ if (ret != 0) {
return (ret);
}
@@ -680,19 +648,8 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to set rx filter:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
- return (0);
+ return (ret);
} /* oce_set_rx_filter */
/*
@@ -706,8 +663,8 @@
* return 0 on success, EIO on failure
*/
int
-oce_set_multicast_table(struct oce_dev *dev, struct ether_addr *mca_table,
- uint8_t mca_cnt, boolean_t enable_promisc)
+oce_set_multicast_table(struct oce_dev *dev, uint32_t if_id,
+struct ether_addr *mca_table, uint16_t mca_cnt, boolean_t promisc)
{
struct oce_mbx mbx;
struct mbx_set_common_iface_multicast *fwcmd;
@@ -724,10 +681,13 @@
sizeof (struct mbx_set_common_iface_multicast));
/* fill the command */
- bcopy(mca_table, &fwcmd->params.req.mac[0], mca_cnt * ETHERADDRL);
- fwcmd->params.req.if_id = (uint8_t)dev->if_id;
+ fwcmd->params.req.if_id = (uint8_t)if_id;
+ if (mca_table != NULL) {
+ bcopy(mca_table, &fwcmd->params.req.mac[0],
+ mca_cnt * ETHERADDRL);
+ }
fwcmd->params.req.num_mac = LE_16(mca_cnt);
- fwcmd->params.req.promiscuous = (uint8_t)enable_promisc;
+ fwcmd->params.req.promiscuous = (uint8_t)promisc;
/* fill rest of mbx */
mbx.u0.s.embedded = B_TRUE;
@@ -737,19 +697,8 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check command req and mbx status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to set multicast table:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
- return (0);
+
+ return (ret);
} /* oce_set_multicast_table */
/*
@@ -782,16 +731,8 @@
/* now post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check the mailbox return status and mbx command response status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to get firmware configuration:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
+
+ if (ret != 0) {
return (ret);
}
@@ -843,15 +784,7 @@
/* now post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
/* Check the mailbox status and command completion status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to get hardware status:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
+ if (ret != 0) {
return (ret);
}
@@ -894,20 +827,8 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check the mailbox status and command completion status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to set interrupt vectors:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
- return (0);
+ return (ret);
} /* oce_num_intr_vectors_set */
/*
@@ -949,20 +870,7 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check the command completion and mbx response status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to set flow control:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
-
- return (0);
+ return (ret);
} /* oce_set_flow_control */
/*
@@ -1006,16 +914,7 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check the command completion and mbx response status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to get flow control value:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
+ if (ret != 0) {
return (ret);
}
@@ -1072,19 +971,7 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check the command completion and mbx response status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to change promiscuous setting:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
- return (0);
+ return (ret);
}
/*
@@ -1096,7 +983,8 @@
* return 0 on success, EIO on failure
*/
int
-oce_add_mac(struct oce_dev *dev, const uint8_t *mac, uint32_t *pmac_id)
+oce_add_mac(struct oce_dev *dev, uint32_t if_id,
+ const uint8_t *mac, uint32_t *pmac_id)
{
struct oce_mbx mbx;
struct mbx_add_common_iface_mac *fwcmd;
@@ -1104,7 +992,7 @@
bzero(&mbx, sizeof (struct oce_mbx));
fwcmd = (struct mbx_add_common_iface_mac *)&mbx.payload;
- fwcmd->params.req.if_id = LE_32(dev->if_id);
+ fwcmd->params.req.if_id = LE_32(if_id);
bcopy(mac, &fwcmd->params.req.mac_address[0], ETHERADDRL);
/* initialize the ioctl header */
@@ -1122,18 +1010,10 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check the command completion and mbx response status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to add MAC:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
+ if (ret != 0) {
return (ret);
}
+
*pmac_id = LE_32(fwcmd->params.rsp.pmac_id);
return (0);
}
@@ -1147,7 +1027,7 @@
* return 0 on success, EIO on failure
*/
int
-oce_del_mac(struct oce_dev *dev, uint32_t *pmac_id)
+oce_del_mac(struct oce_dev *dev, uint32_t if_id, uint32_t *pmac_id)
{
struct oce_mbx mbx;
struct mbx_del_common_iface_mac *fwcmd;
@@ -1155,7 +1035,7 @@
bzero(&mbx, sizeof (struct oce_mbx));
fwcmd = (struct mbx_del_common_iface_mac *)&mbx.payload;
- fwcmd->params.req.if_id = dev->if_id;
+ fwcmd->params.req.if_id = if_id;
fwcmd->params.req.pmac_id = *pmac_id;
/* initialize the ioctl header */
@@ -1173,20 +1053,7 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check the command completion and mbx response status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to delete MAC:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
-
- return (0);
+ return (ret);
}
@@ -1202,7 +1069,7 @@
* return 0 on success, EIO on failure
*/
int
-oce_config_vlan(struct oce_dev *dev, uint8_t if_id,
+oce_config_vlan(struct oce_dev *dev, uint32_t if_id,
struct normal_vlan *vtag_arr, uint8_t vtag_cnt,
boolean_t untagged, boolean_t enable_promisc)
{
@@ -1220,7 +1087,7 @@
MBX_TIMEOUT_SEC,
sizeof (struct mbx_common_config_vlan));
- fwcmd->params.req.if_id = if_id;
+ fwcmd->params.req.if_id = (uint8_t)if_id;
fwcmd->params.req.promisc = (uint8_t)enable_promisc;
fwcmd->params.req.untagged = (uint8_t)untagged;
fwcmd->params.req.num_vlans = vtag_cnt;
@@ -1239,19 +1106,7 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check command req and mbx status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to configure VLAN:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
- return (0);
+ return (ret);
} /* oce_config_vlan */
@@ -1292,19 +1147,7 @@
/* post the command */
ret = oce_mbox_post(dev, &mbx, NULL);
- /* Check command req and mbx status */
- if ((ret != 0) || OCE_MBX_STATUS(&fwcmd->hdr) != 0 ||
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr) != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to configure the link:"
- "CMD COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION STATUS(%d)"
- "MBX COMMAND COMPLETION ADDL STATUS(%d)",
- ret, OCE_MBX_STATUS(&fwcmd->hdr),
- OCE_MBX_ADDL_STATUS(&fwcmd->hdr));
- return (ret);
- }
- return (0);
+ return (ret);
} /* oce_config_link */
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_mq.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_mq.c Fri Feb 19 18:04:10 2010 -0800
@@ -54,7 +54,7 @@
mq = (struct oce_mq *)arg;
cq = mq->cq;
dev = mq->parent;
- mutex_enter(&cq->lock);
+ mutex_enter(&mq->lock);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
while (cqe->u0.dw[3]) {
DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe));
@@ -72,17 +72,39 @@
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
num_cqe++;
} /* for all valid CQE */
- mutex_exit(&cq->lock);
+ mutex_exit(&mq->lock);
oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
return (num_cqe);
} /* oce_drain_mq_cq */
+int
+oce_start_mq(struct oce_mq *mq)
+{
+ oce_arm_cq(mq->parent, mq->cq->cq_id, 0, B_TRUE);
+ return (0);
+}
+
+
void
-oce_stop_mq(struct oce_mq *mq)
+oce_clean_mq(struct oce_mq *mq)
{
- while (oce_drain_mq_cq(mq) != 0) {
- }
+ struct oce_cq *cq;
+ struct oce_dev *dev;
+ uint16_t num_cqe = 0;
+ struct oce_mq_cqe *cqe = NULL;
+ cq = mq->cq;
+ dev = mq->parent;
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
+ while (cqe->u0.dw[3]) {
+ DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe));
+ cqe->u0.dw[3] = 0;
+ RING_GET(cq->ring, 1);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
+ num_cqe++;
+ } /* for all valid CQE */
+ if (num_cqe)
+ oce_arm_cq(dev, cq->cq_id, num_cqe, B_FALSE);
/* Drain the Event queue now */
oce_drain_eq(mq->cq->eq);
}
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_queue.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_queue.c Fri Feb 19 18:04:10 2010 -0800
@@ -33,11 +33,35 @@
int oce_destroy_q(struct oce_dev *oce, struct oce_mbx *mbx, size_t req_size,
enum qtype qtype);
-struct oce_cq *oce_cq_create(struct oce_dev *dev, struct oce_eq *eq,
- uint32_t q_len, uint32_t entry_size, boolean_t sol_event,
- boolean_t is_eventable, boolean_t nodelay, uint32_t ncoalesce);
+/* MAil box Queue functions */
+struct oce_mq *
+oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
+
+/* event queue handling */
+struct oce_eq *
+oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
+ uint32_t eq_delay);
+
+/* completion queue handling */
+struct oce_cq *
+oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
+ uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
+ boolean_t nodelay, uint32_t ncoalesce);
-int oce_cq_del(struct oce_dev *dev, struct oce_cq *cq);
+
+/* Tx WQ functions */
+static struct oce_wq *oce_wq_init(struct oce_dev *dev, uint32_t q_len,
+ int wq_type);
+static void oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq);
+static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
+static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq);
+/* Rx Queue functions */
+static struct oce_rq *oce_rq_init(struct oce_dev *dev, uint32_t q_len,
+ uint32_t frag_size, uint32_t mtu,
+ boolean_t rss);
+static void oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq);
+static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
+static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq);
/*
* function to create an event queue
@@ -56,31 +80,28 @@
struct mbx_create_common_eq *fwcmd;
int ret = 0;
+ /* allocate an eq */
+ eq = kmem_zalloc(sizeof (struct oce_eq), KM_NOSLEEP);
+
+ if (eq == NULL) {
+ return (NULL);
+ }
+
bzero(&mbx, sizeof (struct oce_mbx));
-
/* allocate mbx */
fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
- /* allocate an eq */
- eq = kmem_zalloc(sizeof (struct oce_eq), KM_NOSLEEP);
- if (eq == NULL) {
- oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
- "EQ allocation failed");
- return (NULL);
- }
-
eq->ring = create_ring_buffer(dev, q_len,
item_size, DDI_DMA_CONSISTENT);
+
if (eq->ring == NULL) {
oce_log(dev, CE_WARN, MOD_CONFIG,
- "EQ ring alloc failed:0x%p",
+ "EQ ring alloc failed:0x%p",
(void *)eq->ring);
kmem_free(eq, sizeof (struct oce_eq));
return (NULL);
}
- /* now send the mbx using the MQ mailbox */
- bzero(fwcmd, sizeof (struct mbx_create_common_eq));
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_COMMON,
OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
@@ -128,10 +149,9 @@
eq->eq_cfg.cur_eqd = (uint8_t)eq_delay;
eq->parent = (void *)dev;
atomic_inc_32(&dev->neqs);
- mutex_init(&eq->lock, NULL, MUTEX_DRIVER,
- DDI_INTR_PRI(dev->intr_pri));
oce_log(dev, CE_NOTE, MOD_CONFIG,
"EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
+ /* Save the eq pointer */
return (eq);
} /* oce_eq_create */
@@ -143,20 +163,28 @@
*
* return 0=>success, failure otherwise
*/
-int
+void
oce_eq_del(struct oce_dev *dev, struct oce_eq *eq)
{
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_eq *fwcmd;
+
+ /* drain the residual events */
+ oce_drain_eq(eq);
+
/* destroy the ring */
destroy_ring_buffer(dev, eq->ring);
eq->ring = NULL;
- mutex_destroy(&eq->lock);
- /* release the eq */
+ /* send a command to delete the EQ */
+ fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
+ fwcmd->params.req.id = eq->eq_id;
+ (void) oce_destroy_q(dev, &mbx,
+ sizeof (struct mbx_destroy_common_eq),
+ QTYPE_EQ);
kmem_free(eq, sizeof (struct oce_eq));
atomic_dec_32(&dev->neqs);
-
- return (DDI_SUCCESS);
-} /* oce_eq_del */
+}
/*
* function to create a completion queue
@@ -177,7 +205,6 @@
struct mbx_create_common_cq *fwcmd;
int ret = 0;
- bzero(&mbx, sizeof (struct oce_mbx));
/* create cq */
cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
if (cq == NULL) {
@@ -196,8 +223,8 @@
kmem_free(cq, sizeof (struct oce_cq));
return (NULL);
}
-
- /* allocate mbx */
+ /* initialize mailbox */
+ bzero(&mbx, sizeof (struct oce_mbx));
fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
/* fill the command header */
@@ -216,10 +243,9 @@
fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
/* dw1 */
- fwcmd->params.req.cq_ctx.armed = 1;
+ fwcmd->params.req.cq_ctx.armed = B_FALSE;
fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
fwcmd->params.req.cq_ctx.pd = 0;
-
/* dw2 */
fwcmd->params.req.cq_ctx.function = dev->fn;
@@ -254,9 +280,6 @@
cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
dev->cq[cq->cq_id] = cq;
atomic_inc_32(&eq->ref_count);
- mutex_init(&cq->lock, NULL, MUTEX_DRIVER,
- DDI_INTR_PRI(dev->intr_pri));
-
return (cq);
} /* oce_cq_create */
@@ -266,25 +289,34 @@
* dev - software handle to the device
* cq - handle to the CQ to delete
*
- * return 0 => success, failure otherwise
+ * return none
*/
-int
+static void
oce_cq_del(struct oce_dev *dev, struct oce_cq *cq)
{
- /* Reset the handler */
- cq->cq_handler = NULL;
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_cq *fwcmd;
+
/* destroy the ring */
destroy_ring_buffer(dev, cq->ring);
cq->ring = NULL;
- /* decrement eq ref count */
+ bzero(&mbx, sizeof (struct oce_mbx));
+ /* send a command to delete the CQ */
+ fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
+ fwcmd->params.req.id = cq->cq_id;
+ (void) oce_destroy_q(dev, &mbx,
+ sizeof (struct mbx_destroy_common_cq),
+ QTYPE_CQ);
+
+ /* Reset the handler */
+ cq->cq_handler = NULL;
+ dev->cq[cq->cq_id] = NULL;
atomic_dec_32(&cq->eq->ref_count);
mutex_destroy(&cq->lock);
/* release the eq */
kmem_free(cq, sizeof (struct oce_cq));
-
- return (0);
} /* oce_cq_del */
/*
@@ -305,26 +337,26 @@
int ret = 0;
struct oce_cq *cq;
- bzero(&mbx, sizeof (struct oce_mbx));
-
- /* allocate mbx */
- fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
-
/* Create the Completion Q */
-
cq = oce_cq_create(dev, eq, CQ_LEN_256,
sizeof (struct oce_mq_cqe),
B_FALSE, B_TRUE, B_TRUE, 0);
if (cq == NULL) {
return (NULL);
}
+
+
/* allocate the mq */
mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
+
if (mq == NULL) {
- oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
- "MQ allocation failed");
+ goto mq_alloc_fail;
}
+ bzero(&mbx, sizeof (struct oce_mbx));
+ /* allocate mbx */
+ fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
+
/* create the ring buffer for this queue */
mq->ring = create_ring_buffer(dev, q_len,
sizeof (struct oce_mbx), DDI_DMA_CONSISTENT);
@@ -374,13 +406,16 @@
/* set the MQCQ handlers */
cq->cq_handler = oce_drain_mq_cq;
cq->cb_arg = (void *)mq;
+ mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(dev->intr_pri));
return (mq);
mq_fail:
destroy_ring_buffer(dev, mq->ring);
mq_ring_alloc:
kmem_free(mq, sizeof (struct oce_mq));
- (void) oce_cq_del(dev, cq);
+mq_alloc_fail:
+ oce_cq_del(dev, cq);
return (NULL);
} /* oce_mq_create */
@@ -390,27 +425,27 @@
* dev - software handle to the device
* mq - pointer to the MQ to delete
*
- * return 0 => success, failure otherwise
+ * return none
*/
-int
+static void
oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
{
- int ret = 0;
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_mq *fwcmd;
/* destroy the ring */
destroy_ring_buffer(dev, mq->ring);
mq->ring = NULL;
-
- /* destroy the CQ */
- ret = oce_cq_del(dev, mq->cq);
- if (ret != DDI_SUCCESS) {
- oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
- "MQCQ destroy Failed ");
- }
-
- /* release the eq */
+ bzero(&mbx, sizeof (struct oce_mbx));
+ fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
+ fwcmd->params.req.id = mq->mq_id;
+ (void) oce_destroy_q(dev, &mbx,
+ sizeof (struct mbx_destroy_common_mq),
+ QTYPE_MQ);
+ oce_cq_del(dev, mq->cq);
+ mq->cq = NULL;
+ mutex_destroy(&mq->lock);
kmem_free(mq, sizeof (struct oce_mq));
- return (DDI_SUCCESS);
} /* oce_mq_del */
/*
@@ -421,21 +456,14 @@
*
* return pointer to the WQ created. NULL on failure
*/
-struct oce_wq *
-oce_wq_create(struct oce_dev *dev, struct oce_eq *eq,
- uint32_t q_len, int wq_type)
+static struct oce_wq *
+oce_wq_init(struct oce_dev *dev, uint32_t q_len, int wq_type)
{
- struct oce_mbx mbx;
- struct mbx_create_nic_wq *fwcmd;
struct oce_wq *wq;
- struct oce_cq *cq;
char str[MAX_POOL_NAME];
int ret;
ASSERT(dev != NULL);
-
- bzero(&mbx, sizeof (struct oce_mbx));
-
/* q_len must be min 256 and max 2k */
if (q_len < 256 || q_len > 2048) {
oce_log(dev, CE_WARN, MOD_CONFIG,
@@ -462,7 +490,7 @@
wq->parent = (void *)dev;
/* Create the WQ Buffer pool */
- ret = oce_wqb_cache_create(wq, dev->bcopy_limit);
+ ret = oce_wqb_cache_create(wq, dev->tx_bcopy_limit);
if (ret != DDI_SUCCESS) {
oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
"WQ Buffer Pool create failed ");
@@ -487,16 +515,6 @@
goto wqed_fail;
}
- /* create the CQ */
- cq = oce_cq_create(dev, eq, CQ_LEN_1024,
- sizeof (struct oce_nic_tx_cqe),
- B_FALSE, B_TRUE, B_FALSE, 3);
- if (cq == NULL) {
- oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
- "WCCQ create failed ");
- goto wccq_fail;
- }
-
/* create the ring buffer */
wq->ring = create_ring_buffer(dev, q_len,
NIC_WQE_SIZE, DDI_DMA_CONSISTENT);
@@ -506,73 +524,20 @@
goto wq_ringfail;
}
- /* now fill the command */
- fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
- bzero(fwcmd, sizeof (struct mbx_create_nic_wq));
- mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
- MBX_SUBSYSTEM_NIC,
- OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
- sizeof (struct mbx_create_nic_wq));
-
- fwcmd->params.req.nic_wq_type = (uint8_t)wq_type;
- fwcmd->params.req.num_pages = wq->ring->dbuf->num_pages;
- oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
- (uint32_t)wq->ring->dbuf->num_pages,
- wq->ring->dbuf->size);
-
- /* workaround: fill 0x01 for ulp_mask in rsvd0 */
- fwcmd->params.req.rsvd0 = 0x01;
- fwcmd->params.req.wq_size = OCE_LOG2(q_len) + 1;
- fwcmd->params.req.valid = 1;
- fwcmd->params.req.pd_id = 0;
- fwcmd->params.req.pci_function_id = dev->fn;
- fwcmd->params.req.cq_id = cq->cq_id;
-
- oce_page_list(wq->ring->dbuf, fwcmd->params.req.pages,
- wq->ring->dbuf->num_pages);
-
- /* fill rest of mbx */
- mbx.u0.s.embedded = 1;
- mbx.payload_length = sizeof (struct mbx_create_nic_wq);
- DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
-
- /* now post the command */
- ret = oce_mbox_post(dev, &mbx, NULL);
- if (ret != DDI_SUCCESS) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "WQ create failed: %d", ret);
- goto wq_fail;
-
- }
-
- /* interpret the response */
- wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
-
- /* All are free to start with */
- wq->wq_free = q_len;
- wq->cq = cq;
-
- /* set the WQCQ handlers */
- cq->cq_handler = oce_drain_wq_cq;
- cq->cb_arg = (void *)wq;
-
- /* set the default eq delay for the eq associated with this wq */
- (void) oce_set_eq_delay(dev, &eq->eq_id, 1, wq->cfg.eqd);
-
/* Initialize WQ lock */
- mutex_init(&wq->lock, NULL, MUTEX_DRIVER,
+ mutex_init(&wq->tx_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(dev->intr_pri));
+ /* Initialize WQ lock */
+ mutex_init(&wq->txc_lock, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(dev->intr_pri));
atomic_inc_32(&dev->nwqs);
OCE_LIST_CREATE(&wq->wqe_desc_list, DDI_INTR_PRI(dev->intr_pri));
-
return (wq);
-wq_fail:
+wqcq_fail:
destroy_ring_buffer(dev, wq->ring);
wq_ringfail:
- (void) oce_cq_del(dev, cq);
-wccq_fail:
kmem_cache_destroy(wq->wqed_cache);
wqed_fail:
oce_wqm_cache_destroy(wq);
@@ -591,54 +556,148 @@
*
* return 0 => success, failure otherwise
*/
-int
-oce_wq_del(struct oce_dev *dev, struct oce_wq *wq)
+static void
+oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq)
{
- ASSERT(dev != NULL);
- ASSERT(wq != NULL);
-
- /* destroy the ring buffer */
- destroy_ring_buffer(dev, wq->ring);
- wq->ring = NULL;
-
/* destroy cq */
- (void) oce_cq_del(dev, wq->cq);
- wq->cq = NULL;
-
+ oce_wqb_cache_destroy(wq);
+ oce_wqm_cache_destroy(wq);
kmem_cache_destroy(wq->wqed_cache);
- oce_wqm_cache_destroy(wq);
- oce_wqb_cache_destroy(wq);
/* Free the packet descriptor list */
OCE_LIST_DESTROY(&wq->wqe_desc_list);
-
+ destroy_ring_buffer(dev, wq->ring);
+ wq->ring = NULL;
/* Destroy the Mutex */
- mutex_destroy(&wq->lock);
+ mutex_destroy(&wq->tx_lock);
+ mutex_destroy(&wq->txc_lock);
kmem_free(wq, sizeof (struct oce_wq));
atomic_dec_32(&dev->nwqs);
- return (DDI_SUCCESS);
+} /* oce_wq_del */
+
+
+static int
+oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
+{
+
+ struct oce_mbx mbx;
+ struct mbx_create_nic_wq *fwcmd;
+ struct oce_dev *dev = wq->parent;
+ struct oce_cq *cq;
+ int ret;
+
+ /* create the CQ */
+ cq = oce_cq_create(dev, eq, CQ_LEN_1024,
+ sizeof (struct oce_nic_tx_cqe),
+ B_FALSE, B_TRUE, B_FALSE, 3);
+ if (cq == NULL) {
+ oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
+ "WCCQ create failed ");
+ return (DDI_FAILURE);
+ }
+ /* now fill the command */
+ bzero(&mbx, sizeof (struct oce_mbx));
+ fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
+ sizeof (struct mbx_create_nic_wq));
+
+ fwcmd->params.req.nic_wq_type = (uint8_t)wq->cfg.wq_type;
+ fwcmd->params.req.num_pages = wq->ring->dbuf->num_pages;
+ oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
+ (uint32_t)wq->ring->dbuf->num_pages,
+ wq->ring->dbuf->size);
+
+ /* workaround: fill 0x01 for ulp_mask in rsvd0 */
+ fwcmd->params.req.rsvd0 = 0x01;
+ fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
+ fwcmd->params.req.valid = 1;
+ fwcmd->params.req.pd_id = 0;
+ fwcmd->params.req.pci_function_id = dev->fn;
+ fwcmd->params.req.cq_id = cq->cq_id;
+
+ oce_page_list(wq->ring->dbuf, fwcmd->params.req.pages,
+ wq->ring->dbuf->num_pages);
+
+ /* fill rest of mbx */
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof (struct mbx_create_nic_wq);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ /* now post the command */
+ ret = oce_mbox_post(dev, &mbx, NULL);
+ if (ret != DDI_SUCCESS) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "WQ create failed: %d", ret);
+ oce_cq_del(dev, cq);
+ return (ret);
+
+ }
+
+ /* interpret the response */
+ wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
+ wq->qstate = QCREATED;
+ wq->cq = cq;
+ /* set the WQCQ handlers */
+ wq->cq->cq_handler = oce_drain_wq_cq;
+ wq->cq->cb_arg = (void *)wq;
+ /* All are free to start with */
+ wq->wq_free = wq->cfg.q_len;
+ /* reset indicies */
+ wq->ring->cidx = 0;
+ wq->ring->pidx = 0;
+ return (0);
+}
+
+/*
+ * function to delete a WQ
+ *
+ * dev - software handle to the device
+ * wq - WQ to delete
+ *
+ * return none
+ */
+static void
+oce_wq_del(struct oce_dev *dev, struct oce_wq *wq)
+{
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_wq *fwcmd;
+
+
+ ASSERT(dev != NULL);
+ ASSERT(wq != NULL);
+ if (wq->qstate == QCREATED) {
+ bzero(&mbx, sizeof (struct oce_mbx));
+ /* now fill the command */
+ fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
+ fwcmd->params.req.wq_id = wq->wq_id;
+ (void) oce_destroy_q(dev, &mbx,
+ sizeof (struct mbx_delete_nic_wq),
+ QTYPE_WQ);
+ wq->qstate = QDELETED;
+ oce_cq_del(dev, wq->cq);
+ wq->cq = NULL;
+ }
} /* oce_wq_del */
/*
- * function to create a RQ
+ * function to allocate RQ resources
*
* dev - software handle to the device
* rqcfg - configuration structure providing RQ config parameters
*
* return pointer to the RQ created. NULL on failure
*/
-struct oce_rq *
-oce_rq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
- uint32_t frag_size, uint32_t mtu, int16_t if_id,
+/* ARGSUSED */
+static struct oce_rq *
+oce_rq_init(struct oce_dev *dev, uint32_t q_len,
+ uint32_t frag_size, uint32_t mtu,
boolean_t rss)
{
- struct oce_mbx mbx;
- struct mbx_create_nic_rq *fwcmd;
+
struct oce_rq *rq;
int ret;
- struct oce_cq *cq;
-
- bzero(&mbx, sizeof (struct oce_mbx));
/* validate q creation parameters */
if (!OCE_LOG2(frag_size))
@@ -656,7 +715,6 @@
rq->cfg.q_len = q_len;
rq->cfg.frag_size = frag_size;
- rq->cfg.if_id = if_id;
rq->cfg.mtu = mtu;
rq->cfg.eqd = 0;
rq->cfg.nbufs = 8 * 1024;
@@ -670,11 +728,6 @@
if (ret != DDI_SUCCESS) {
goto rqb_fail;
}
- cq = oce_cq_create(dev, eq, CQ_LEN_1024, sizeof (struct oce_nic_rx_cqe),
- B_FALSE, B_TRUE, B_FALSE, 3);
- if (cq == NULL) {
- goto rccq_fail;
- }
/* create the ring buffer */
rq->ring = create_ring_buffer(dev, q_len,
@@ -689,62 +742,20 @@
rq->shadow_ring = kmem_zalloc(sizeof (struct rq_shadow_entry) *
q_len, KM_SLEEP);
- /* now fill the command */
- fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
-
- mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
- MBX_SUBSYSTEM_NIC,
- OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
- sizeof (struct mbx_create_nic_rq));
-
- fwcmd->params.req.num_pages = rq->ring->dbuf->num_pages;
-
- fwcmd->params.req.frag_size = OCE_LOG2(frag_size);
- fwcmd->params.req.cq_id = cq->cq_id;
-
- oce_page_list(rq->ring->dbuf, fwcmd->params.req.pages,
- rq->ring->dbuf->num_pages);
-
- fwcmd->params.req.if_id = if_id;
-
- fwcmd->params.req.max_frame_size = (uint16_t)mtu;
- fwcmd->params.req.is_rss_queue = rss;
-
- /* fill rest of mbx */
- mbx.u0.s.embedded = 1;
- mbx.payload_length = sizeof (struct mbx_create_nic_rq);
- DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
-
- /* now post the command */
- ret = oce_mbox_post(dev, &mbx, NULL);
- if (ret != 0) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "RQ create failed: %d", ret);
- goto rq_fail;
- }
-
- /* interpret the response */
- rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
- rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
- rq->cq = cq;
-
/* Initialize the RQ lock */
- mutex_init(&rq->lock, NULL, MUTEX_DRIVER,
+ mutex_init(&rq->rx_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(dev->intr_pri));
+ /* Initialize the recharge lock */
+ mutex_init(&rq->rc_lock, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(dev->intr_pri));
atomic_inc_32(&dev->nrqs);
-
- /* set the Completion Handler */
- cq->cq_handler = oce_drain_rq_cq;
- cq->cb_arg = (void *)rq;
return (rq);
-rq_fail:
+rqcq_fail:
kmem_free(rq->shadow_ring,
sizeof (struct rq_shadow_entry) * q_len);
destroy_ring_buffer(dev, rq->ring);
rq_ringfail:
- (void) oce_cq_del(dev, cq);
-rccq_fail:
oce_rqb_cache_destroy(rq);
rqb_fail:
kmem_free(rq, sizeof (struct oce_rq));
@@ -757,32 +768,124 @@
* dev - software handle to the device
* rq - RQ to delete
*
- * return 0 => success, failure otherwise
+ * return none
*/
-int
+static void
+oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq)
+{
+ /* Destroy buffer cache */
+ oce_rqb_cache_destroy(rq);
+ destroy_ring_buffer(dev, rq->ring);
+ rq->ring = NULL;
+ kmem_free(rq->shadow_ring,
+ sizeof (struct rq_shadow_entry) * rq->cfg.q_len);
+ rq->shadow_ring = NULL;
+ mutex_destroy(&rq->rx_lock);
+ mutex_destroy(&rq->rc_lock);
+ kmem_free(rq, sizeof (struct oce_rq));
+ atomic_dec_32(&dev->nrqs);
+} /* oce_rq_del */
+
+
+static int
+oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_nic_rq *fwcmd;
+ struct oce_dev *dev = rq->parent;
+ struct oce_cq *cq;
+ int ret;
+
+ cq = oce_cq_create(dev, eq, CQ_LEN_1024, sizeof (struct oce_nic_rx_cqe),
+ B_FALSE, B_TRUE, B_FALSE, 3);
+
+ if (cq == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ /* now fill the command */
+ bzero(&mbx, sizeof (struct oce_mbx));
+ fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
+ sizeof (struct mbx_create_nic_rq));
+
+ fwcmd->params.req.num_pages = rq->ring->dbuf->num_pages;
+ fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
+ fwcmd->params.req.cq_id = cq->cq_id;
+ oce_page_list(rq->ring->dbuf, fwcmd->params.req.pages,
+ rq->ring->dbuf->num_pages);
+
+ fwcmd->params.req.if_id = if_id;
+ fwcmd->params.req.max_frame_size = (uint16_t)rq->cfg.mtu;
+ fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
+
+ /* fill rest of mbx */
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof (struct mbx_create_nic_rq);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ /* now post the command */
+ ret = oce_mbox_post(dev, &mbx, NULL);
+ if (ret != 0) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "RQ create failed: %d", ret);
+ oce_cq_del(dev, cq);
+ return (ret);
+ }
+
+ /* interpret the response */
+ rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
+ /* rq->rss_cpuid = fwcmd->params.rsp.u0.bits.rss_cpuid; */
+ rq->cfg.if_id = if_id;
+ rq->qstate = QCREATED;
+ rq->cq = cq;
+
+ /* set the Completion Handler */
+ rq->cq->cq_handler = oce_drain_rq_cq;
+ rq->cq->cb_arg = (void *)rq;
+ /* reset the indicies */
+ rq->ring->cidx = 0;
+ rq->ring->pidx = 0;
+ rq->buf_avail = 0;
+ return (0);
+
+}
+
+/*
+ * function to delete an RQ
+ *
+ * dev - software handle to the device
+ * rq - RQ to delete
+ *
+ * return none
+ */
+static void
oce_rq_del(struct oce_dev *dev, struct oce_rq *rq)
{
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_rq *fwcmd;
ASSERT(dev != NULL);
ASSERT(rq != NULL);
- (void) oce_cq_del(dev, rq->cq);
- rq->cq = NULL;
-
- /* Free any outstanding buffers with hardware */
- oce_rq_discharge(rq);
+ bzero(&mbx, sizeof (struct oce_mbx));
- /* Destroy buffer cache */
- oce_rqb_cache_destroy(rq);
- destroy_ring_buffer(dev, rq->ring);
- rq->ring = NULL;
-
- kmem_free(rq->shadow_ring,
- sizeof (struct rq_shadow_entry) * rq->cfg.q_len);
- mutex_destroy(&rq->lock);
- kmem_free(rq, sizeof (struct oce_rq));
- atomic_dec_32(&dev->nrqs);
- return (DDI_SUCCESS);
+ /* delete the Queue */
+ if (rq->qstate == QCREATED) {
+ fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
+ fwcmd->params.req.rq_id = rq->rq_id;
+ (void) oce_destroy_q(dev, &mbx,
+ sizeof (struct mbx_delete_nic_rq), QTYPE_RQ);
+ rq->qstate = QDELETED;
+ oce_clean_rq(rq);
+ /* Delete the associated CQ */
+ oce_cq_del(dev, rq->cq);
+ rq->cq = NULL;
+ /* free up the posted buffers */
+ oce_rq_discharge(dev->rq[0]);
+ }
} /* oce_rq_del */
/*
@@ -969,7 +1072,9 @@
{
struct oce_eqe *eqe;
uint16_t num_eqe = 0;
+ struct oce_dev *dev;
+ dev = eq->parent;
/* get the first item in eq to process */
eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
@@ -985,4 +1090,95 @@
eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
num_eqe++;
} /* for all EQEs */
+ if (num_eqe) {
+ oce_arm_eq(dev, eq->eq_id, num_eqe, B_FALSE, B_TRUE);
+ }
} /* oce_drain_eq */
+
+
+int
+oce_init_txrx(struct oce_dev *dev)
+{
+ dev->wq[0] = oce_wq_init(dev, dev->tx_ring_size, NIC_WQ_TYPE_STANDARD);
+
+ if (dev->wq[0] == NULL) {
+ goto queue_fail;
+ }
+
+ dev->rq[0] = oce_rq_init(dev, dev->rx_ring_size, OCE_RQ_BUF_SIZE,
+ OCE_RQ_MAX_FRAME_SZ, B_FALSE);
+ if (dev->rq[0] == NULL) {
+ goto queue_fail;
+ }
+ return (DDI_SUCCESS);
+queue_fail:
+ oce_fini_txrx(dev);
+ return (DDI_FAILURE);
+}
+void
+oce_fini_txrx(struct oce_dev *dev)
+{
+ if (dev->wq[0] != NULL) {
+ oce_wq_fini(dev, dev->wq[0]);
+ dev->wq[0] = NULL;
+ }
+ if (dev->rq[0] != NULL) {
+ oce_rq_fini(dev, dev->rq[0]);
+ dev->rq[0] = NULL;
+ }
+ return;
+
+}
+
+int
+oce_create_queues(struct oce_dev *dev)
+{
+
+ int i;
+ struct oce_eq *eq;
+ struct oce_mq *mq;
+
+ for (i = 0; i < dev->num_vectors; i++) {
+ eq = oce_eq_create(dev, EQ_LEN_1024, EQE_SIZE_4, 0);
+ if (eq == NULL) {
+ goto rings_fail;
+ }
+ dev->eq[i] = eq;
+ }
+ if (oce_wq_create(dev->wq[0], dev->eq[0]) != 0)
+ goto rings_fail;
+ if (oce_rq_create(dev->rq[0], dev->if_id,
+ dev->neqs > 1 ? dev->eq[1] : dev->eq[0]) != 0)
+ goto rings_fail;
+ mq = oce_mq_create(dev, dev->eq[0], 64);
+ if (mq == NULL)
+ goto rings_fail;
+ dev->mq = mq;
+ return (DDI_SUCCESS);
+rings_fail:
+ oce_delete_queues(dev);
+ return (DDI_FAILURE);
+
+}
+
+void
+oce_delete_queues(struct oce_dev *dev)
+{
+ int i;
+ if (dev->mq != NULL) {
+ oce_mq_del(dev, dev->mq);
+ dev->mq = NULL;
+ }
+
+ for (i = 0; i < dev->nrqs; i++) {
+ oce_rq_del(dev, dev->rq[i]);
+ }
+ for (i = 0; i < dev->nwqs; i++) {
+ oce_wq_del(dev, dev->wq[i]);
+ }
+ /* create as many eqs as the number of vectors */
+ for (i = 0; i < dev->num_vectors; i++) {
+ oce_eq_del(dev, dev->eq[i]);
+ dev->eq[i] = NULL;
+ }
+}
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c Fri Feb 19 18:04:10 2010 -0800
@@ -34,6 +34,8 @@
static void rx_pool_free(char *arg);
static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
struct oce_nic_rx_cqe *cqe);
+static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
+ struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
static int oce_rq_charge(struct oce_dev *dev, struct oce_rq *rq,
uint32_t nbufs);
static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
@@ -41,6 +43,11 @@
static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
size_t size, int flags);
+static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
+static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
+static inline void oce_rx_drop_pkt(struct oce_rq *rq,
+ struct oce_nic_rx_cqe *cqe);
+
/*
* function to create a DMA buffer pool for RQ
@@ -214,57 +221,35 @@
uint32_t cnt;
shadow_rq = rq->shadow_ring;
- mutex_enter(&rq->lock);
-
/* check number of slots free and recharge */
nbufs = ((rq->buf_avail + nbufs) > rq->cfg.q_len) ?
(rq->cfg.q_len - rq->buf_avail) : nbufs;
-
for (cnt = 0; cnt < nbufs; cnt++) {
-
- int i = 0;
- const int retries = 1000;
-
- do {
- rqbd = oce_rqb_alloc(rq);
- if (rqbd != NULL) {
- break;
- }
- } while ((++i) < retries);
-
+ rqbd = oce_rqb_alloc(rq);
if (rqbd == NULL) {
oce_log(dev, CE_NOTE, MOD_RX, "%s %x",
"rqb pool empty @ ticks",
(uint32_t)ddi_get_lbolt());
-
break;
}
-
- i = 0;
-
if (rqbd->mp == NULL) {
+ rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
+ rqbd->rqb->size, 0, &rqbd->fr_rtn);
+ if (rqbd->mp != NULL) {
+ rqbd->mp->b_rptr =
+ (uchar_t *)rqbd->rqb->base +
+ OCE_RQE_BUF_HEADROOM;
+ }
- do {
- rqbd->mp =
- desballoc((uchar_t *)(rqbd->rqb->base),
- rqbd->rqb->size, 0, &rqbd->fr_rtn);
- if (rqbd->mp != NULL) {
- rqbd->mp->b_rptr =
- (uchar_t *)rqbd->rqb->base +
- OCE_RQE_BUF_HEADROOM;
- break;
- }
- } while ((++i) < retries);
- }
+ /*
+ * Failed again put back the buffer and continue
+ * loops for nbufs so its a finite loop
+ */
- /*
- * Failed again put back the buffer and continue
- * loops for nbufs so its a finite loop
- */
-
- if (rqbd->mp == NULL) {
- oce_rqb_free(rq, rqbd);
- continue;
+ if (rqbd->mp == NULL) {
+ oce_rqb_free(rq, rqbd);
+ continue;
+ }
}
/* fill the rqes */
@@ -295,7 +280,6 @@
rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
}
- mutex_exit(&rq->lock);
atomic_add_32(&rq->buf_avail, total_bufs);
return (total_bufs);
} /* oce_rq_charge */
@@ -314,8 +298,6 @@
struct rq_shadow_entry *shadow_rq;
shadow_rq = rq->shadow_ring;
- mutex_enter(&rq->lock);
-
/* Free the posted buffer since RQ is destroyed already */
while ((int32_t)rq->buf_avail > 0) {
rqbd = shadow_rq[rq->ring->cidx].rqbd;
@@ -323,7 +305,6 @@
RING_GET(rq->ring, 1);
rq->buf_avail--;
}
- mutex_exit(&rq->lock);
}
/*
* function to process a single packet
@@ -338,9 +319,7 @@
oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
{
mblk_t *mp;
- uint32_t csum_flags = 0;
int pkt_len;
- uint16_t vtag;
int32_t frag_cnt = 0;
mblk_t *mblk_prev = NULL;
mblk_t *mblk_head = NULL;
@@ -348,40 +327,10 @@
struct rq_shadow_entry *shadow_rq;
struct rq_shadow_entry *shadow_rqe;
oce_rq_bdesc_t *rqbd;
- struct ether_vlan_header *ehp;
/* Get the relevant Queue pointers */
shadow_rq = rq->shadow_ring;
pkt_len = cqe->u0.s.pkt_size;
-
- /* Hardware always Strips Vlan tag so insert it back */
- if (cqe->u0.s.vlan_tag_present) {
- shadow_rqe = &shadow_rq[rq->ring->cidx];
- /* retrive the Rx buffer from the shadow ring */
- rqbd = shadow_rqe->rqbd;
- mp = rqbd->mp;
- if (mp == NULL)
- return (NULL);
- vtag = cqe->u0.s.vlan_tag;
- (void) memmove(mp->b_rptr - VLAN_TAGSZ,
- mp->b_rptr, 2 * ETHERADDRL);
- mp->b_rptr -= VLAN_TAGSZ;
- ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
- ehp->ether_tpid = htons(ETHERTYPE_VLAN);
- ehp->ether_tci = LE_16(vtag);
-
- frag_size = (pkt_len > rq->cfg.frag_size) ?
- rq->cfg.frag_size : pkt_len;
- mp->b_wptr = mp->b_rptr + frag_size + VLAN_TAGSZ;
- mblk_head = mblk_prev = mp;
- /* Move the pointers */
- RING_GET(rq->ring, 1);
- frag_cnt++;
- pkt_len -= frag_size;
- (void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
- DDI_DMA_SYNC_FORKERNEL);
- }
-
for (; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
shadow_rqe = &shadow_rq[rq->ring->cidx];
rqbd = shadow_rqe->rqbd;
@@ -408,16 +357,66 @@
oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
return (NULL);
}
+ atomic_add_32(&rq->pending, (cqe->u0.s.num_fragments & 0x7));
+ mblk_head->b_next = NULL;
+ return (mblk_head);
+} /* oce_rx */
- atomic_add_32(&rq->buf_avail, -frag_cnt);
- (void) oce_rq_charge(dev, rq, frag_cnt);
+/* ARGSUSED */
+static inline mblk_t *
+oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
+{
+ mblk_t *mp;
+ int pkt_len;
+ int alloc_len;
+ int32_t frag_cnt = 0;
+ int frag_size;
+ struct rq_shadow_entry *shadow_rq;
+ struct rq_shadow_entry *shadow_rqe;
+ oce_rq_bdesc_t *rqbd;
+ boolean_t tag_present = B_FALSE;
+ unsigned char *rptr;
+
+ shadow_rq = rq->shadow_ring;
+ pkt_len = cqe->u0.s.pkt_size;
+ alloc_len = pkt_len;
- /* check dma handle */
- if (oce_fm_check_dma_handle(dev, rqbd->rqb->dma_handle) !=
- DDI_FM_OK) {
- ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
+ /* Hardware always Strips Vlan tag so insert it back */
+ if (cqe->u0.s.vlan_tag_present) {
+ alloc_len += VLAN_TAGSZ;
+ tag_present = B_TRUE;
+ }
+ mp = allocb(alloc_len, BPRI_HI);
+ if (mp == NULL)
return (NULL);
+ if (tag_present) {
+ /* offset the read pointer by 4 bytes to insert tag */
+ mp->b_rptr += VLAN_TAGSZ;
}
+ rptr = mp->b_rptr;
+ mp->b_wptr = mp->b_wptr + alloc_len;
+
+ for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
+ shadow_rqe = &shadow_rq[rq->ring->cidx];
+ rqbd = shadow_rqe->rqbd;
+ frag_size = (pkt_len > rq->cfg.frag_size) ?
+ rq->cfg.frag_size : pkt_len;
+ (void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
+ DDI_DMA_SYNC_FORKERNEL);
+ bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM,
+ rptr, frag_size);
+ rptr += frag_size;
+ pkt_len -= frag_size;
+ oce_rqb_free(rq, rqbd);
+ RING_GET(rq->ring, 1);
+ }
+ return (mp);
+}
+
+static inline void
+oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
+{
+ int csum_flags = 0;
/* set flags */
if (cqe->u0.s.ip_cksum_pass) {
@@ -429,12 +428,24 @@
}
if (csum_flags) {
- (void) hcksum_assoc(mblk_head, NULL, NULL, 0, 0, 0, 0,
+ (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0,
csum_flags, 0);
}
- mblk_head->b_next = NULL;
- return (mblk_head);
-} /* oce_rx */
+}
+
+static inline void
+oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
+{
+ struct ether_vlan_header *ehp;
+
+ (void) memmove(mp->b_rptr - VLAN_TAGSZ,
+ mp->b_rptr, 2 * ETHERADDRL);
+ mp->b_rptr -= VLAN_TAGSZ;
+ ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
+ ehp->ether_tpid = htons(ETHERTYPE_VLAN);
+ ehp->ether_tci = LE_16(vtag);
+}
+
/*
@@ -455,7 +466,6 @@
uint16_t num_cqe = 0;
struct oce_cq *cq;
struct oce_dev *dev;
- int32_t buf_used = 0;
if (arg == NULL)
return (0);
@@ -463,36 +473,44 @@
rq = (struct oce_rq *)arg;
dev = rq->parent;
cq = rq->cq;
-
- if (dev == NULL || cq == NULL)
- return (0);
-
- mutex_enter(&cq->lock);
+ mutex_enter(&rq->rx_lock);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
/* dequeue till you reach an invalid cqe */
while (RQ_CQE_VALID(cqe) && (num_cqe < rq->cfg.q_len)) {
DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
- ASSERT(rq->ring->cidx != cqe->u0.s.frag_index);
- mp = oce_rx(dev, rq, cqe);
+ /* if insufficient buffers to charge then do copy */
+ if (cqe->u0.s.pkt_size < dev->rx_bcopy_limit ||
+ OCE_LIST_SIZE(&rq->rq_buf_list) < cqe->u0.s.num_fragments) {
+ mp = oce_rx_bcopy(dev, rq, cqe);
+ } else {
+ mp = oce_rx(dev, rq, cqe);
+ }
if (mp != NULL) {
+ if (cqe->u0.s.vlan_tag_present) {
+ oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
+ }
+ oce_set_rx_oflags(mp, cqe);
if (mblk_head == NULL) {
mblk_head = mblk_prev = mp;
} else {
mblk_prev->b_next = mp;
mblk_prev = mp;
}
+
+ } else {
+ oce_rx_drop_pkt(rq, cqe);
}
- buf_used += (cqe->u0.s.num_fragments & 0x7);
+ atomic_add_32(&rq->buf_avail, -(cqe->u0.s.num_fragments & 0x7));
+ (void) oce_rq_charge(dev, rq,
+ (cqe->u0.s.num_fragments & 0x7));
RQ_CQE_INVALIDATE(cqe);
RING_GET(cq->ring, 1);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
struct oce_nic_rx_cqe);
num_cqe++;
} /* for all valid CQEs */
-
- atomic_add_32(&rq->pending, buf_used);
- mutex_exit(&cq->lock);
+ mutex_exit(&rq->rx_lock);
if (mblk_head) {
mac_rx(dev->mac_handle, NULL, mblk_head);
}
@@ -512,9 +530,6 @@
{
oce_rq_bdesc_t *rqbd;
struct oce_rq *rq;
- struct oce_dev *dev;
- int i = 0;
- const int retries = 1000;
/* During destroy, arg will be NULL */
if (arg == NULL) {
@@ -524,32 +539,15 @@
/* retrieve the pointers from arg */
rqbd = (oce_rq_bdesc_t *)(void *)arg;
rq = rqbd->rq;
- dev = rq->parent;
- if ((dev->state & STATE_MAC_STARTED) == 0) {
- return;
+ rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
+ rqbd->rqb->size, 0, &rqbd->fr_rtn);
+ if (rqbd->mp != NULL) {
+ rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base +
+ OCE_RQE_BUF_HEADROOM;
}
-
- do {
- rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
- rqbd->rqb->size, 0, &rqbd->fr_rtn);
- if (rqbd->mp != NULL) {
- rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base +
- OCE_RQE_BUF_HEADROOM;
- break;
- }
- } while ((++i) < retries);
-
oce_rqb_free(rq, rqbd);
(void) atomic_add_32(&rq->pending, -1);
- if (atomic_add_32_nv(&rq->buf_avail, 0) == 0 &&
- OCE_LIST_SIZE(&rq->rq_buf_list) > 16) {
- /*
- * Rx has stalled because of lack of buffers
- * So try to charge fully
- */
- (void) oce_rq_charge(dev, rq, rq->cfg.q_len);
- }
} /* rx_pool_free */
/*
@@ -560,19 +558,43 @@
* return none
*/
void
-oce_stop_rq(struct oce_rq *rq)
+oce_clean_rq(struct oce_rq *rq)
{
- /*
- * Wait for Packets sent up to be freed
- */
- while (rq->pending > 0) {
- drv_usecwait(10 * 1000);
+ uint16_t num_cqe = 0;
+ struct oce_cq *cq;
+ struct oce_dev *dev;
+ struct oce_nic_rx_cqe *cqe;
+ int32_t ti = 0;
+
+ dev = rq->parent;
+ cq = rq->cq;
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
+ /* dequeue till you reach an invalid cqe */
+ for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
+
+ while (RQ_CQE_VALID(cqe)) {
+ DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
+ oce_rx_drop_pkt(rq, cqe);
+ atomic_add_32(&rq->buf_avail,
+ -(cqe->u0.s.num_fragments & 0x7));
+ oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
+ RQ_CQE_INVALIDATE(cqe);
+ RING_GET(cq->ring, 1);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
+ struct oce_nic_rx_cqe);
+ num_cqe++;
+ }
+ OCE_MSDELAY(1);
}
-
- rq->pending = 0;
+#if 0
+ if (num_cqe) {
+ oce_arm_cq(dev, cq->cq_id, num_cqe, B_FALSE);
+ }
/* Drain the Event queue now */
oce_drain_eq(rq->cq->eq);
-} /* oce_stop_rq */
+ return (num_cqe);
+#endif
+} /* oce_clean_rq */
/*
* function to start the RX
@@ -587,7 +609,39 @@
int ret = 0;
struct oce_dev *dev = rq->parent;
+ (void) oce_rq_charge(dev, rq, rq->cfg.q_len);
oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
- ret = oce_rq_charge(dev, rq, rq->cfg.q_len);
return (ret);
} /* oce_start_rq */
+
+/* Checks for pending rx buffers with Stack */
+int
+oce_rx_pending(struct oce_dev *dev)
+{
+ int ti;
+
+ for (ti = 0; ti < 200; ti++) {
+ if (dev->rq[0]->pending > 0) {
+ OCE_MSDELAY(1);
+ continue;
+ } else {
+ dev->rq[0]->pending = 0;
+ break;
+ }
+ }
+ return (dev->rq[0]->pending);
+}
+
+static inline void
+oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
+{
+ int frag_cnt;
+ oce_rq_bdesc_t *rqbd;
+ struct rq_shadow_entry *shadow_rq;
+ shadow_rq = rq->shadow_ring;
+ for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
+ rqbd = shadow_rq[rq->ring->cidx].rqbd;
+ oce_rqb_free(rq, rqbd);
+ RING_GET(rq->ring, 1);
+ }
+}
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_stat.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_stat.c Fri Feb 19 18:04:10 2010 -0800
@@ -47,8 +47,6 @@
struct oce_dev *dev;
struct oce_stat *stats;
struct rx_port_stats *port_stats;
- clock_t new;
- boolean_t is_update_stats = B_FALSE;
int ret;
if (rw == KSTAT_WRITE) {
@@ -64,33 +62,12 @@
mutex_exit(&dev->dev_lock);
return (EIO);
}
-
- /*
- * allow stats update only if enough
- * time has elapsed since last update
- */
- new = ddi_get_lbolt();
- if ((new - dev->stat_ticks) >= drv_usectohz(STAT_TIMEOUT)) {
- dev->stat_ticks = new;
- is_update_stats = B_TRUE;
- }
-
- mutex_exit(&dev->dev_lock);
-
- /* fetch the latest stats from the adapter */
- if (is_update_stats) {
- if (dev->in_stats) {
- return (EIO);
- } else {
- atomic_add_32(&dev->in_stats, 1);
- ret = oce_get_hw_stats(dev);
- atomic_add_32(&dev->in_stats, -1);
- if (ret != DDI_SUCCESS) {
- oce_log(dev, CE_WARN, MOD_CONFIG,
- "Failed to get stats:%d", ret);
- return (EIO);
- }
- }
+ ret = oce_get_hw_stats(dev);
+ if (ret != DDI_SUCCESS) {
+ oce_log(dev, CE_WARN, MOD_CONFIG,
+ "Failed to get stats:%d", ret);
+ mutex_exit(&dev->dev_lock);
+ return (EIO);
}
/* update the stats */
@@ -167,6 +144,7 @@
port_stats->tx_pause_frames;
stats->tx_control_frames.value.ul =
port_stats->tx_control_frames;
+ mutex_exit(&dev->dev_lock);
return (DDI_SUCCESS);
} /* oce_update_stats */
@@ -302,8 +280,6 @@
struct oce_dev *dev = arg;
struct oce_stat *stats;
struct rx_port_stats *port_stats;
- boolean_t is_update_stats = B_FALSE;
- clock_t new;
stats = (struct oce_stat *)dev->oce_kstats->ks_data;
port_stats = &dev->hw_stats->params.rsp.rx.port[dev->port_id];
@@ -317,26 +293,6 @@
return (EIO);
}
- /*
- * allow stats update only if enough
- * time has elapsed since last update
- */
- new = ddi_get_lbolt();
- if ((new - dev->stat_ticks) >= drv_usectohz(STAT_TIMEOUT)) {
- dev->stat_ticks = new;
- is_update_stats = B_TRUE;
- }
- mutex_exit(&dev->dev_lock);
-
- /* update hw stats. Required for netstat */
- if (is_update_stats) {
- if (dev->in_stats == 0) {
- atomic_add_32(&dev->in_stats, 1);
- (void) oce_get_hw_stats(dev);
- atomic_add_32(&dev->in_stats, -1);
- }
- }
-
switch (stat) {
case MAC_STAT_IFSPEED:
if (dev->state & STATE_MAC_STARTED)
@@ -479,7 +435,9 @@
break;
default:
+ mutex_exit(&dev->dev_lock);
return (ENOTSUP);
}
+ mutex_exit(&dev->dev_lock);
return (0);
} /* oce_m_stat */
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_tx.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_tx.c Fri Feb 19 18:04:10 2010 -0800
@@ -50,6 +50,8 @@
static void oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed);
static void oce_remove_vtag(mblk_t *mp);
static void oce_insert_vtag(mblk_t *mp, uint16_t vlan_tag);
+static inline int oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm);
+
static ddi_dma_attr_t tx_map_dma_attr = {
DMA_ATTR_V0, /* version number */
@@ -57,7 +59,7 @@
0xFFFFFFFFFFFFFFFFull, /* high address */
0x0000000000010000ull, /* dma counter max */
OCE_TXMAP_ALIGN, /* alignment */
- 0x1, /* burst sizes */
+ 0x7FF, /* burst sizes */
0x00000001, /* minimum transfer size */
0x00000000FFFFFFFFull, /* maximum transfer size */
0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
@@ -550,32 +552,19 @@
return (ret);
} /* oce_map_wqe */
-/*
- * function to drain a TxCQ and process its CQEs
- *
- * dev - software handle to the device
- * cq - pointer to the cq to drain
- *
- * return the number of CQEs processed
- */
-uint16_t
-oce_drain_wq_cq(void *arg)
+static inline int
+oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
{
struct oce_nic_tx_cqe *cqe;
uint16_t num_cqe = 0;
- struct oce_dev *dev;
- struct oce_wq *wq;
struct oce_cq *cq;
oce_wqe_desc_t *wqed;
int wqe_freed = 0;
- boolean_t is_update = B_FALSE;
+ struct oce_dev *dev;
- wq = (struct oce_wq *)arg;
cq = wq->cq;
dev = wq->parent;
-
- /* do while we do not reach a cqe that is not valid */
- mutex_enter(&cq->lock);
+ mutex_enter(&wq->txc_lock);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
while (WQ_CQE_VALID(cqe)) {
@@ -600,27 +589,39 @@
struct oce_nic_tx_cqe);
num_cqe++;
} /* for all valid CQE */
+ mutex_exit(&wq->txc_lock);
+ if (num_cqe)
+ oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
+ return (num_cqe);
+} /* oce_process_tx_completion */
+/*
+ * function to drain a TxCQ and process its CQEs
+ *
+ * dev - software handle to the device
+ * cq - pointer to the cq to drain
+ *
+ * return the number of CQEs processed
+ */
+uint16_t
+oce_drain_wq_cq(void *arg)
+{
+ uint16_t num_cqe = 0;
+ struct oce_dev *dev;
+ struct oce_wq *wq;
+
+ wq = (struct oce_wq *)arg;
+ dev = wq->parent;
+
+ /* do while we do not reach a cqe that is not valid */
+ num_cqe = oce_process_tx_compl(wq, B_FALSE);
+
+ /* check if we need to restart Tx */
if (wq->resched && num_cqe) {
wq->resched = B_FALSE;
- is_update = B_TRUE;
+ mac_tx_update(dev->mac_handle);
}
- mutex_exit(&cq->lock);
-
- oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
-
- /* check if we need to restart Tx */
- mutex_enter(&wq->lock);
- if (wq->resched && num_cqe) {
- wq->resched = B_FALSE;
- is_update = B_TRUE;
- }
- mutex_exit(&wq->lock);
-
- if (is_update)
- mac_tx_update(dev->mac_handle);
-
return (num_cqe);
} /* oce_process_wq_cqe */
@@ -679,7 +680,7 @@
int32_t num_wqes;
uint16_t etype;
uint32_t ip_offset;
- uint32_t csum_flags;
+ uint32_t csum_flags = 0;
boolean_t use_copy = B_FALSE;
boolean_t tagged = B_FALSE;
uint16_t vlan_tag;
@@ -690,21 +691,35 @@
uint32_t pkt_len = 0;
int num_mblks = 0;
int ret = 0;
+ uint32_t flags = 0;
+ uint32_t mss = 0;
/* retrieve the adap priv struct ptr */
dev = wq->parent;
+ /* check if we have enough free slots */
+ if (wq->wq_free < wq->cfg.q_len/2) {
+ (void) oce_process_tx_compl(wq, B_FALSE);
+ }
+ if (wq->wq_free < OCE_MAX_TX_HDL) {
+ return (mp);
+ }
+
/* check if we should copy */
for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
pkt_len += MBLKL(tmp);
num_mblks++;
}
+
+ /* Retrieve LSO info */
+ lso_info_get(mp, &mss, &flags);
+
/* get the offload flags */
hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL,
NULL, &csum_flags);
/* Limit should be always less than Tx Buffer Size */
- if (pkt_len < dev->bcopy_limit) {
+ if (pkt_len < dev->tx_bcopy_limit) {
use_copy = B_TRUE;
} else {
/* restrict the mapped segment to wat we support */
@@ -767,11 +782,10 @@
wqeh = (struct oce_nic_hdr_wqe *)&wqed->frag[0];
/* fill rest of wqe header fields based on packet */
- if (DB_CKSUMFLAGS(mp) & HW_LSO) {
+ if (flags & HW_LSO) {
wqeh->u0.s.lso = B_TRUE;
- wqeh->u0.s.lso_mss = DB_LSOMSS(mp);
+ wqeh->u0.s.lso_mss = mss;
}
-
if (csum_flags & HCK_FULLCKSUM) {
uint8_t *proto;
if (etype == ETHERTYPE_IP) {
@@ -798,6 +812,7 @@
wqeh->u0.s.crc = B_TRUE;
wqeh->u0.s.total_length = pkt_len;
+ /* frag count + header wqe */
num_wqes = wqed->frag_cnt;
/* h/w expects even no. of WQEs */
@@ -807,11 +822,10 @@
wqeh->u0.s.num_wqe = num_wqes;
DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));
- mutex_enter(&wq->lock);
+ mutex_enter(&wq->tx_lock);
if (num_wqes > wq->wq_free) {
atomic_inc_32(&wq->tx_deferd);
- wq->resched = B_TRUE;
- mutex_exit(&wq->lock);
+ mutex_exit(&wq->tx_lock);
goto wqe_fail;
}
atomic_add_32(&wq->wq_free, -num_wqes);
@@ -827,7 +841,7 @@
reg_value = (num_wqes << 16) | wq->wq_id;
/* Ring the door bell */
OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
- mutex_exit(&wq->lock);
+ mutex_exit(&wq->tx_lock);
/* free mp if copied or packet chain collapsed */
if (use_copy == B_TRUE) {
@@ -885,7 +899,7 @@
int
oce_start_wq(struct oce_wq *wq)
{
- oce_arm_cq(wq->parent, wq->cq->cq_id, 0, B_TRUE);
+ _NOTE(ARGUNUSED(wq));
return (DDI_SUCCESS);
} /* oce_start_wq */
@@ -897,18 +911,16 @@
* return none
*/
void
-oce_stop_wq(struct oce_wq *wq)
+oce_clean_wq(struct oce_wq *wq)
{
oce_wqe_desc_t *wqed;
-
- /* Max time for already posted TX to complete */
- drv_usecwait(150 * 1000); /* 150 mSec */
+ int ti;
/* Wait for already posted Tx to complete */
- while ((OCE_LIST_EMPTY(&wq->wqe_desc_list) == B_FALSE) ||
- (OCE_LIST_SIZE(&wq->wq_buf_list) != wq->cfg.nbufs) ||
- (OCE_LIST_SIZE(&wq->wq_mdesc_list) != wq->cfg.nhdl)) {
- (void) oce_drain_wq_cq(wq);
+
+ for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
+ (void) oce_process_tx_compl(wq, B_FALSE);
+ OCE_MSDELAY(1);
}
/* Free the remaining descriptors */
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_utils.c Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_utils.c Fri Feb 19 18:04:10 2010 -0800
@@ -31,16 +31,6 @@
#include <oce_impl.h>
-/*
- * inline function to get a list of pages from a dbuf
- *
- * dbuf - memory map from which to get the pa
- * pa_list - physical address array to fill
- * list_size - size of the array
- *
- * return none
- */
-
static void oce_list_del_node(OCE_LIST_NODE_T *prev_node,
OCE_LIST_NODE_T *next_node);
static void oce_list_remove(OCE_LIST_NODE_T *list_node);
--- a/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_buf.h Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_buf.h Fri Feb 19 18:04:10 2010 -0800
@@ -44,9 +44,10 @@
: (((_START) + (_STEP)) - (_END)))
#define OCE_MAX_TX_HDL 8
-#define OCE_MAX_TXDMA_COOKIES 5
+#define OCE_MAX_TXDMA_COOKIES 16
#define OCE_TXMAP_ALIGN 1
#define OCE_TX_MAX_FRAGS (OCE_MAX_TX_HDL * OCE_MAX_TXDMA_COOKIES)
+#define OCE_TX_LO_WM OCE_TX_MAX_FRAGS
/* helper structure to access OS addresses */
typedef union oce_addr_s {
--- a/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_hw.h Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_hw.h Fri Feb 19 18:04:10 2010 -0800
@@ -719,7 +719,8 @@
/* dw 1 */
uint8_t mgmt_mac_duplex;
uint8_t mgmt_mac_speed;
- uint16_t rsvd0;
+ uint16_t qos_link_speed;
+ uint32_t logical_link_status;
}rsp;
}params;
};
@@ -1072,7 +1073,7 @@
uint16_t rsvd0;
#endif
struct oce_cq_ctx cq_ctx;
- struct phys_addr pages[8];
+ struct phys_addr pages[4];
}req;
struct {
--- a/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_impl.h Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_impl.h Fri Feb 19 18:04:10 2010 -0800
@@ -81,7 +81,8 @@
#define OCE_WQ_NUM_BUFFERS 2048
#define OCE_WQ_BUF_SIZE 2048
#define OCE_LSO_MAX_SIZE (32 * 1024)
-#define OCE_DEFAULT_BCOPY_LIMIT 1024
+#define OCE_DEFAULT_TX_BCOPY_LIMIT 1024
+#define OCE_DEFAULT_RX_BCOPY_LIMIT 128
#define OCE_DEFAULT_WQ_EQD 16
#define OCE_MAX_RQ 8
@@ -92,7 +93,7 @@
#define OCE_NUM_USED_VECTORS 2
#define OCE_DMA_ALIGNMENT 0x1000ull
-#define OCE_DEFAULT_TX_RING_SIZE 256
+#define OCE_DEFAULT_TX_RING_SIZE 2048
#define OCE_DEFAULT_RX_RING_SIZE 1024
#define OCE_INVAL_IF_ID -1
@@ -161,9 +162,9 @@
((OCE_CFG_READ32(dev, PCICFG_INTR_CTRL) \
>> HOSTINTR_PFUNC_SHIFT) & HOSTINTR_PFUNC_MASK)
-#define DEV_LOCK(dev) { oce_chip_di(dev); mutex_enter(&dev->dev_lock); }
+#define DEV_LOCK(dev) mutex_enter(&dev->dev_lock)
-#define DEV_UNLOCK(dev) { mutex_exit(&dev->dev_lock); oce_chip_ei(dev); }
+#define DEV_UNLOCK(dev) mutex_exit(&dev->dev_lock)
enum oce_ring_size {
RING_SIZE_256 = 256,
@@ -194,7 +195,8 @@
struct oce_rq *rq[OCE_MAX_RQ]; /* RXQ Array */
struct oce_cq *cq[OCE_MAX_CQ]; /* Completion Queues */
struct oce_eq *eq[OCE_MAX_EQ]; /* Event Queues */
- uint32_t bcopy_limit; /* BCOPY Limit */
+ uint32_t tx_bcopy_limit; /* TX BCOPY Limit */
+ uint32_t rx_bcopy_limit; /* RX BCOPY Limit */
uint32_t cookie;
@@ -202,8 +204,9 @@
uint32_t in_stats;
/* Add implementation specific stuff here */
+ ddi_acc_handle_t pci_cfg_handle; /* Config space handle */
int num_bars;
- ddi_acc_handle_t cfg_handle; /* PCI Config Space Regs */
+ ddi_acc_handle_t cfg_handle; /* MMIO PCI Config Space Regs */
caddr_t csr_addr;
ddi_acc_handle_t csr_handle; /* MMIO Control Status Regs */
caddr_t db_addr;
@@ -262,6 +265,7 @@
uint32_t neqs; /* No of event queues */
uint32_t nwqs; /* No of Work Queues */
uint32_t nrqs; /* No of Receive Queues */
+ uint32_t nifs; /* No of interfaces created */
/* fw config: only relevant fields */
uint32_t config_number;
@@ -303,6 +307,7 @@
/* Interrupt handling */
int oce_setup_intr(struct oce_dev *dev);
+int oce_alloc_intr(struct oce_dev *dev);
int oce_teardown_intr(struct oce_dev *dev);
int oce_setup_handlers(struct oce_dev *dev);
void oce_remove_handler(struct oce_dev *dev);
@@ -311,6 +316,12 @@
void oce_chip_ei(struct oce_dev *dev);
void oce_chip_di(struct oce_dev *dev);
+/* HW initialisation */
+int oce_hw_init(struct oce_dev *dev);
+void oce_hw_fini(struct oce_dev *dev);
+int oce_setup_adapter(struct oce_dev *dev);
+void oce_unsetup_adapter(struct oce_dev *dev);
+
#ifdef __cplusplus
}
#endif
--- a/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_io.h Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_io.h Fri Feb 19 18:04:10 2010 -0800
@@ -45,6 +45,7 @@
#include <oce_buf.h>
#define DEFAULT_MQ_MBOX_TIMEOUT (5 * 1000 * 1000) /* 5 sec (in usec) */
+#define DEFAULT_DRAIN_TIME 200 /* Default Drain Time */
#define MBX_TIMEOUT_SEC 5
#define STAT_TIMEOUT 2000000 /* update stats every 2 sec */
@@ -63,6 +64,20 @@
EQE_SIZE_16 = 16
};
+enum qtype {
+ QTYPE_EQ,
+ QTYPE_MQ,
+ QTYPE_WQ,
+ QTYPE_RQ,
+ QTYPE_CQ,
+ QTYPE_RSS
+};
+
+typedef enum qstate_e {
+ QDELETED = 0x0,
+ QCREATED = 0x1
+}qstate_t;
+
struct eq_config {
/* number of entries in the eq */
enum eq_len q_len;
@@ -93,6 +108,8 @@
uint32_t ref_count;
/* ring buffer for this eq */
oce_ring_buffer_t *ring;
+ /* Queue state */
+ qstate_t qstate;
/* Lock for this queue */
kmutex_t lock;
};
@@ -108,10 +125,12 @@
enum cq_len q_len;
/* size of each item */
uint32_t item_size;
+ /* is eventable */
+ boolean_t is_eventable;
/* solicited eventable? */
- uint8_t sol_eventable;
+ boolean_t sol_eventable;
/* no delay? */
- uint8_t nodelay;
+ boolean_t nodelay;
/* dma coalescing */
uint16_t dma_coalescing;
};
@@ -134,6 +153,8 @@
void *cb_arg;
/* ring buffer for this cq */
oce_ring_buffer_t *ring;
+ /* Queue state */
+ qstate_t qstate;
/* lock */
kmutex_t lock;
};
@@ -158,16 +179,12 @@
struct oce_cq *async_cq;
/* free entries in Queue */
uint32_t mq_free;
+ /* Queue state */
+ qstate_t qstate;
+ /* lock for the mq */
+ kmutex_t lock;
};
-enum qtype {
- QTYPE_EQ,
- QTYPE_MQ,
- QTYPE_WQ,
- QTYPE_RQ,
- QTYPE_CQ,
- QTYPE_RSS
-};
/*
* utility structure that handles context of mbx
@@ -210,7 +227,11 @@
uint32_t wq_free; /* Wqe free */
uint32_t tx_deferd; /* Wqe free */
uint32_t pkt_drops; /* drops */
- kmutex_t lock; /* lock for the WQ */
+ /* Queue state */
+ qstate_t qstate;
+ kmutex_t tx_lock; /* lock for the WQ */
+ kmutex_t txc_lock; /* tx compl lock */
+ kmutex_t resched_lock; /* tx compl lock */
};
struct rq_config {
@@ -248,8 +269,11 @@
OCE_LIST_T rq_buf_list; /* Free list */
uint32_t buf_avail; /* buffer avaialable with hw */
uint32_t pending; /* Buffers sent up */
+ /* Queue state */
+ qstate_t qstate;
/* rq lock */
- kmutex_t lock;
+ kmutex_t rx_lock;
+ kmutex_t rc_lock;
};
struct link_status {
@@ -274,10 +298,6 @@
void destroy_ring_buffer(struct oce_dev *dev, oce_ring_buffer_t *ring);
/* Queues */
-struct oce_eq *oce_eq_create(struct oce_dev *dev, uint32_t q_len,
- uint32_t item_size, uint32_t eq_delay);
-
-int oce_eq_del(struct oce_dev *dev, struct oce_eq *eq);
int oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
uint32_t eq_cnt, uint32_t eq_delay);
void oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
@@ -301,38 +321,37 @@
int oce_POST(struct oce_dev *dev);
int oce_pci_init(struct oce_dev *dev);
void oce_pci_fini(struct oce_dev *dev);
+int oce_init_txrx(struct oce_dev *dev);
+void oce_fini_txrx(struct oce_dev *dev);
+int oce_create_queues(struct oce_dev *dev);
+void oce_delete_queues(struct oce_dev *dev);
+void oce_delete_nw_interface(struct oce_dev *dev);
+int oce_create_nw_interface(struct oce_dev *dev);
+int oce_reset_fun(struct oce_dev *dev);
/* Transmit */
-struct oce_wq *oce_wq_create(struct oce_dev *dev, struct oce_eq *eq,
- uint32_t q_len, int wq_type);
-
-int oce_wq_del(struct oce_dev *dev, struct oce_wq *wq);
struct oce_wq *oce_get_wq(struct oce_dev *dev, mblk_t *pkt);
uint16_t oce_drain_wq_cq(void *arg);
mblk_t *oce_send_packet(struct oce_wq *wq, mblk_t *mp);
int oce_start_wq(struct oce_wq *wq);
-void oce_stop_wq(struct oce_wq *wq);
+void oce_clean_wq(struct oce_wq *wq);
+
/* Recieve */
uint16_t oce_drain_rq_cq(void *arg);
-struct oce_rq *oce_rq_create(struct oce_dev *dev, struct oce_eq *eq,
- uint32_t q_len, uint32_t frag_size, uint32_t mtu,
- int16_t if_id, boolean_t rss);
-int oce_rq_del(struct oce_dev *dev, struct oce_rq *rq);
int oce_start_rq(struct oce_rq *rq);
-void oce_stop_rq(struct oce_rq *rq);
+void oce_clean_rq(struct oce_rq *rq);
+void oce_rq_discharge(struct oce_rq *rq);
+int oce_rx_pending(struct oce_dev *dev);
/* event handling */
-struct oce_mq *oce_mq_create(struct oce_dev *dev,
- struct oce_eq *eq, uint32_t q_len);
-int oce_mq_del(struct oce_dev *dev, struct oce_mq *mq);
uint16_t oce_drain_mq_cq(void *arg);
-
int oce_mq_mbox_post(struct oce_dev *dev, struct oce_mbx *mbx,
struct oce_mbx_ctx *mbxctx);
struct oce_mbx *oce_mq_get_mbx(struct oce_dev *dev);
-void oce_stop_mq(struct oce_mq *mq);
-void oce_rq_discharge(struct oce_rq *rq);
+void oce_clean_mq(struct oce_mq *mq);
+int oce_start_mq(struct oce_mq *mq);
+
/* mbx functions */
void mbx_common_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom,
@@ -341,7 +360,7 @@
void mbx_nic_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom, uint8_t port,
uint8_t opcode, uint32_t timeout, uint32_t pyld_len);
int oce_get_fw_version(struct oce_dev *dev);
-int oce_read_mac_addr(struct oce_dev *dev, uint16_t if_id, uint8_t perm,
+int oce_read_mac_addr(struct oce_dev *dev, uint32_t if_id, uint8_t perm,
uint8_t type, struct mac_address_format *mac);
int oce_if_create(struct oce_dev *dev, uint32_t cap_flags, uint32_t en_flags,
uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id);
@@ -351,26 +370,22 @@
int oce_get_link_status(struct oce_dev *dev, struct link_status *link);
int oce_set_rx_filter(struct oce_dev *dev,
struct mbx_set_common_ntwk_rx_filter *filter);
-int oce_set_multicast_table(struct oce_dev *dev, struct ether_addr *mca_table,
- uint8_t mca_cnt, boolean_t enable_promisc);
+int oce_set_multicast_table(struct oce_dev *dev, uint32_t if_id,
+ struct ether_addr *mca_table, uint16_t mca_cnt, boolean_t promisc);
int oce_get_fw_config(struct oce_dev *dev);
int oce_get_hw_stats(struct oce_dev *dev);
int oce_set_flow_control(struct oce_dev *dev, uint32_t flow_control);
int oce_get_flow_control(struct oce_dev *dev, uint32_t *flow_control);
int oce_set_promiscuous(struct oce_dev *dev, boolean_t enable);
-int oce_add_mac(struct oce_dev *dev, const uint8_t *mac, uint32_t *pmac_id);
-int oce_del_mac(struct oce_dev *dev, uint32_t *pmac_id);
-int oce_config_vlan(struct oce_dev *dev, uint8_t if_id,
+int oce_add_mac(struct oce_dev *dev, uint32_t if_id,
+ const uint8_t *mac, uint32_t *pmac_id);
+int oce_del_mac(struct oce_dev *dev, uint32_t if_id, uint32_t *pmac_id);
+int oce_config_vlan(struct oce_dev *dev, uint32_t if_id,
struct normal_vlan *vtag_arr,
uint8_t vtag_cnt, boolean_t untagged,
boolean_t enable_promisc);
int oce_config_link(struct oce_dev *dev, boolean_t enable);
-int oce_hw_init(struct oce_dev *dev);
-void oce_hw_fini(struct oce_dev *dev);
-int oce_chip_hw_init(struct oce_dev *dev);
-void oce_chip_hw_fini(struct oce_dev *dev);
-
int oce_issue_mbox(struct oce_dev *dev, queue_t *wq, mblk_t *mp,
uint32_t *payload_length);
--- a/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_version.h Fri Feb 19 16:18:33 2010 -0800
+++ b/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_version.h Fri Feb 19 18:04:10 2010 -0800
@@ -38,7 +38,7 @@
#define OCE_MAJOR_VERSION "1"
#define OCE_MINOR_VERSION "1"
#define OCE_RELEASE_NUM "0"
-#define OCE_PROTO_LEVEL "c"
+#define OCE_PROTO_LEVEL "e"
#define OCE_VERSION OCE_MAJOR_VERSION "." \
OCE_MINOR_VERSION \