--- a/usr/src/uts/common/Makefile.files Sat Dec 01 08:56:06 2007 -0800
+++ b/usr/src/uts/common/Makefile.files Sun Dec 02 07:26:48 2007 -0800
@@ -1471,6 +1471,9 @@
IXGB_OBJS += ixgb.o ixgb_atomic.o ixgb_chip.o ixgb_gld.o ixgb_kstats.o \
ixgb_log.o ixgb_ndd.o ixgb_rx.o ixgb_tx.o ixgb_xmii.o
+NGE_OBJS += nge_main.o nge_atomic.o nge_chip.o nge_ndd.o nge_kstats.o \
+ nge_log.o nge_rx.o nge_tx.o nge_xmii.o
+
RGE_OBJS += rge_main.o rge_chip.o rge_ndd.o rge_kstats.o rge_log.o rge_rxtx.o
ATH_OBJS += ath_aux.o ath_main.o ath_osdep.o ath_rate.o
--- a/usr/src/uts/common/Makefile.rules Sat Dec 01 08:56:06 2007 -0800
+++ b/usr/src/uts/common/Makefile.rules Sun Dec 02 07:26:48 2007 -0800
@@ -635,6 +635,10 @@
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/nge/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/common/io/nxge/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -1469,6 +1473,9 @@
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/net80211/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
+$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/nge/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
$(LINTS_DIR)/%.ln: $(UTSBASE)/common/io/nxge/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge.conf Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,16 @@
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# Configuration file for the nge driver.
+#
+# WARNING: This is an UNSTABLE configuration file. Its contents
+# may change at any time.
+#
+# Set the interrupt priority of the nge device to ipl 6 (same as
+# the ipl of other network devices). nge does not support high
+# level interrupts
+#
+interrupt-priorities=6;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge.h Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,1118 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#ifndef _SYS_NGE_H
+#define _SYS_NGE_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include <sys/types.h>
+#include <sys/stream.h>
+#include <sys/strsun.h>
+#include <sys/strsubr.h>
+#include <sys/stat.h>
+#include <sys/pci.h>
+#include <sys/note.h>
+#include <sys/modctl.h>
+#include <sys/kstat.h>
+#include <sys/ethernet.h>
+#include <sys/pattr.h>
+#include <sys/errno.h>
+#include <sys/dlpi.h>
+#include <sys/devops.h>
+#include <sys/debug.h>
+#include <sys/conf.h>
+#include <sys/callb.h>
+
+#include <netinet/ip6.h>
+
+#include <inet/common.h>
+#include <inet/ip.h>
+#include <netinet/udp.h>
+#include <inet/mi.h>
+#include <inet/nd.h>
+
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/mac.h>
+#include <sys/mac_ether.h>
+
+/*
+ * Reconfiguring the network devices requires the net_config privilege
+ * in Solaris 10+.
+ */
+extern int secpolicy_net_config(const cred_t *, boolean_t);
+
+#include <sys/netlb.h>
+#include <sys/miiregs.h>
+
+#include "nge_chip.h"
+
+#define PIO_ADDR(ngep, offset) ((void *)((caddr_t)(ngep)->io_regs+(offset)))
+/*
+ * Copy an ethernet address
+ */
+#define ethaddr_copy(src, dst) bcopy((src), (dst), ETHERADDRL)
+#define ether_eq(a, b) (bcmp((caddr_t)(a), (caddr_t)(b), (ETHERADDRL)) == 0)
+
+#define BIS(w, b) (((w) & (b)) ? B_TRUE : B_FALSE)
+#define BIC(w, b) (((w) & (b)) ? B_FALSE : B_TRUE)
+#define UPORDOWN(x) ((x) ? "up" : "down")
+
+#define NGE_DRIVER_NAME "nge"
+
+/*
+ * 'Progress' bit flags ...
+ */
+#define PROGRESS_CFG 0x0001 /* config space mapped */
+#define PROGRESS_REGS 0x0002 /* registers mapped */
+#define PROGRESS_BUFS 0x0004 /* registers mapped */
+#define PROGRESS_RESCHED 0x0008 /* resched softint registered */
+#define PROGRESS_FACTOTUM 0x0010 /* factotum softint registered */
+#define PROGRESS_SWINT 0x0020 /* s/w interrupt registered */
+#define PROGRESS_INTR 0x0040 /* h/w interrupt registered */
+ /* and mutexen initialised */
+#define PROGRESS_HWINT 0x0080
+#define PROGRESS_PHY 0x0100 /* PHY initialised */
+#define PROGRESS_NDD 0x0200 /* NDD parameters set up */
+#define PROGRESS_KSTATS 0x0400 /* kstats created */
+#define PROGRESS_READY 0x0800 /* ready for work */
+
+#define NGE_HW_ERR 0x00
+#define NGE_HW_LINK 0x01
+#define NGE_HW_BM 0x02
+#define NGE_HW_RCHAN 0x03
+#define NGE_HW_TCHAN 0x04
+#define NGE_HW_ROM 0x05
+#define NGE_SW_PROBLEM_ID 0x06
+
+
+/*
+ * NOTES:
+ *
+ * #defines:
+ *
+ * NGE_PCI_CONFIG_RNUMBER and NGE_PCI_OPREGS_RNUMBER are the
+ * register-set numbers to use for the config space registers
+ * and the operating registers respectively. On an OBP-based
+ * machine, regset 0 refers to CONFIG space, and regset 1 will
+ * be the operating registers in MEMORY space. If an expansion
+ * ROM is fitted, it may appear as a further register set.
+ *
+ * NGE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used
+ * for the data buffers. The descriptors are always set up
+ * in CONSISTENT mode.
+ *
+ * NGE_HEADROOM defines how much space we'll leave in allocated
+ * mblks before the first valid data byte. This should be chosen
+ * to be 2 modulo 4, so that once the ethernet header (14 bytes)
+ * has been stripped off, the packet data will be 4-byte aligned.
+ * The remaining space can be used by upstream modules to prepend
+ * any headers required.
+ */
+
+
+#define NGE_PCI_OPREGS_RNUMBER 1
+#define NGE_DMA_MODE DDI_DMA_STREAMING
+#define NGE_HEADROOM 6
+#define ETHER_HEAD_LEN 14
+#ifndef VTAG_SIZE
+#define VTAG_SIZE 4
+#endif
+
+#define NGE_HALFTICK 268435456LL /* 2**28 ns! */
+#define NGE_CYCLIC_PERIOD (4*NGE_HALFTICK) /* ~0.5s */
+
+#define NGE_DEFAULT_MTU 1500
+#define NGE_DEFAULT_SDU 1518
+#define NGE_MTU_2500 2500
+#define NGE_MTU_4500 4500
+#define NGE_MAX_MTU 9000
+#define NGE_MAX_SDU 9018
+
+#define NGE_DESC_MIN 0x100
+
+#define NGE_STD_BUFSZ 1792
+#define NGE_JB2500_BUFSZ (3*1024)
+#define NGE_JB4500_BUFSZ (5*1024)
+#define NGE_JB9000_BUFSZ (9*1024)
+
+#define NGE_SEND_SLOTS_DESC_1024 1024
+#define NGE_SEND_SLOTS_DESC_3072 3072
+#define NGE_SEND_JB2500_SLOTS_DESC 3072
+#define NGE_SEND_JB4500_SLOTS_DESC 2048
+#define NGE_SEND_JB9000_SLOTS_DESC 1024
+#define NGE_SEND_LOWMEM_SLOTS_DESC 1024
+#define NGE_SEND_SLOTS_BUF 3072
+
+#define NGE_RECV_SLOTS_DESC_1024 1024
+#define NGE_RECV_SLOTS_DESC_3072 3072
+#define NGE_RECV_JB2500_SLOTS_DESC 3072
+#define NGE_RECV_JB4500_SLOTS_DESC 2048
+#define NGE_RECV_JB9000_SLOTS_DESC 1024
+#define NGE_RECV_LOWMEM_SLOTS_DESC 1024
+#define NGE_RECV_SLOTS_BUF 6144
+
+#define NGE_SPLIT_32 32
+#define NGE_SPLIT_96 96
+#define NGE_SPLIT_256 256
+
+#define NGE_RX_COPY_SIZE 512
+#define NGE_TX_COPY_SIZE 512
+#define NGE_MAP_FRAGS 3
+#define NGE_MAX_COOKIES 3
+#define NGE_MAX_DMA_HDR (4*1024)
+
+/* Used by interrupt blank */
+#define NGE_TICKS_CNT 128
+#define NGE_RX_PKT_CNT 8
+
+/*
+ * NGE-specific ioctls ...
+ */
+#define NGE_IOC ((((('N' << 8) + 'G') << 8) + 'E') << 8)
+
+/*
+ * PHY register read/write ioctls, used by cable test software
+ */
+#define NGE_MII_READ (NGE_IOC|1)
+#define NGE_MII_WRITE (NGE_IOC|2)
+
+/*
+ * SEEPROM read/write ioctls, for use by SEEPROM upgrade utility
+ *
+ * Note: SEEPROMs can only be accessed as 32-bit words, so <see_addr>
+ * must be a multiple of 4. Not all systems have a SEEPROM fitted!
+ */
+#define NGE_SEE_READ (NGE_IOC|3)
+#define NGE_SEE_WRITE (NGE_IOC|4)
+
+
+/*
+ * These diagnostic IOCTLS are enabled only in DEBUG drivers
+ */
+#define NGE_DIAG (NGE_IOC|5) /* currently a no-op */
+#define NGE_PEEK (NGE_IOC|6)
+#define NGE_POKE (NGE_IOC|7)
+#define NGE_PHY_RESET (NGE_IOC|8)
+#define NGE_SOFT_RESET (NGE_IOC|9)
+#define NGE_HARD_RESET (NGE_IOC|10)
+
+
+enum NGE_HW_OP {
+ NGE_CLEAR = 0,
+ NGE_SET
+};
+
+/*
+ * Required state according to GLD
+ */
+enum nge_mac_state {
+ NGE_MAC_UNKNOWN,
+ NGE_MAC_RESET,
+ NGE_MAC_STOPPED,
+ NGE_MAC_STARTED,
+ NGE_MAC_UNATTACH
+};
+enum loop_type {
+ NGE_LOOP_NONE = 0,
+ NGE_LOOP_EXTERNAL_100,
+ NGE_LOOP_EXTERNAL_10,
+ NGE_LOOP_INTERNAL_PHY,
+};
+
+/*
+ * (Internal) return values from send_msg subroutines
+ */
+enum send_status {
+ SEND_COPY_FAIL = -1, /* => GLD_NORESOURCES */
+ SEND_MAP_FAIL, /* => GLD_NORESOURCES */
+ SEND_COPY_SUCESS, /* OK, msg queued */
+ SEND_MAP_SUCCESS /* OK, free msg */
+};
+
+
+/*
+ * NDD parameter indexes, divided into:
+ *
+ * read-only parameters describing the hardware's capabilities
+ * read-write parameters controlling the advertised capabilities
+ * read-only parameters describing the partner's capabilities
+ * read-only parameters describing the link state
+ */
+enum {
+ PARAM_AUTONEG_CAP,
+ PARAM_PAUSE_CAP,
+ PARAM_ASYM_PAUSE_CAP,
+ PARAM_1000FDX_CAP,
+ PARAM_1000HDX_CAP,
+ PARAM_100T4_CAP,
+ PARAM_100FDX_CAP,
+ PARAM_100HDX_CAP,
+ PARAM_10FDX_CAP,
+ PARAM_10HDX_CAP,
+
+ PARAM_ADV_AUTONEG_CAP,
+ PARAM_ADV_PAUSE_CAP,
+ PARAM_ADV_ASYM_PAUSE_CAP,
+ PARAM_ADV_1000FDX_CAP,
+ PARAM_ADV_1000HDX_CAP,
+ PARAM_ADV_100T4_CAP,
+ PARAM_ADV_100FDX_CAP,
+ PARAM_ADV_100HDX_CAP,
+ PARAM_ADV_10FDX_CAP,
+ PARAM_ADV_10HDX_CAP,
+
+ PARAM_LP_AUTONEG_CAP,
+ PARAM_LP_PAUSE_CAP,
+ PARAM_LP_ASYM_PAUSE_CAP,
+ PARAM_LP_1000FDX_CAP,
+ PARAM_LP_1000HDX_CAP,
+ PARAM_LP_100T4_CAP,
+ PARAM_LP_100FDX_CAP,
+ PARAM_LP_100HDX_CAP,
+ PARAM_LP_10FDX_CAP,
+ PARAM_LP_10HDX_CAP,
+
+ PARAM_LINK_STATUS,
+ PARAM_LINK_SPEED,
+ PARAM_LINK_DUPLEX,
+
+ PARAM_LINK_AUTONEG,
+ PARAM_LINK_RX_PAUSE,
+ PARAM_LINK_TX_PAUSE,
+
+ PARAM_LOOP_MODE,
+ PARAM_TXBCOPY_THRESHOLD,
+ PARAM_RXBCOPY_THRESHOLD,
+ PARAM_RECV_MAX_PACKET,
+
+ PARAM_COUNT
+};
+
+
+/*
+ * (Internal) return values from ioctl subroutines
+ */
+enum ioc_reply {
+ IOC_INVAL = -1, /* bad, NAK with EINVAL */
+ IOC_DONE, /* OK, reply sent */
+ IOC_ACK, /* OK, just send ACK */
+ IOC_REPLY, /* OK, just send reply */
+ IOC_RESTART_ACK, /* OK, restart & ACK */
+ IOC_RESTART_REPLY /* OK, restart & reply */
+};
+
+enum nge_pp_type {
+ NGE_PP_SPACE_CFG = 0,
+ NGE_PP_SPACE_REG,
+ NGE_PP_SPACE_NIC,
+ NGE_PP_SPACE_MII,
+ NGE_PP_SPACE_NGE,
+ NGE_PP_SPACE_TXDESC,
+ NGE_PP_SPACE_TXBUFF,
+ NGE_PP_SPACE_RXDESC,
+ NGE_PP_SPACE_RXBUFF,
+ NGE_PP_SPACE_STATISTICS,
+ NGE_PP_SPACE_SEEPROM,
+ NGE_PP_SPACE_FLASH
+};
+
+/*
+ * Flag to kstat type
+ */
+enum nge_kstat_type {
+ NGE_KSTAT_RAW = 0,
+ NGE_KSTAT_STATS,
+ NGE_KSTAT_CHIPID,
+ NGE_KSTAT_DEBUG,
+ NGE_KSTAT_COUNT
+};
+
+
+/*
+ * Actual state of the nvidia's chip
+ */
+enum nge_chip_state {
+ NGE_CHIP_FAULT = -2, /* fault, need reset */
+ NGE_CHIP_ERROR, /* error, want reset */
+ NGE_CHIP_INITIAL, /* Initial state only */
+ NGE_CHIP_RESET, /* reset, need init */
+ NGE_CHIP_STOPPED, /* Tx/Rx stopped */
+ NGE_CHIP_RUNNING /* with interrupts */
+};
+
+enum nge_eeprom_size {
+ EEPROM_1K = 0,
+ EEPROM_2K,
+ EEPROM_4K,
+ EEPROM_8K,
+ EEPROM_16K,
+ EEPROM_32K,
+ EEPROM_64K
+};
+
+enum nge_eeprom_access_wid {
+ ACCESS_8BIT = 0,
+ ACCESS_16BIT
+};
+
+/*
+ * MDIO operation
+ */
+enum nge_mdio_operation {
+ NGE_MDIO_READ = 0,
+ NGE_MDIO_WRITE
+};
+
+/*
+ * Speed selection
+ */
+enum nge_speed {
+ UNKOWN_SPEED = 0,
+ NGE_10M,
+ NGE_100M,
+ NGE_1000M
+};
+
+/*
+ * Duplex selection
+ */
+enum nge_duplex {
+ UNKOWN_DUPLEX = 0,
+ NGE_HD,
+ NGE_FD
+};
+
+typedef struct {
+ ether_addr_t addr; /* in canonical form */
+ uint8_t spare;
+ uint8_t set; /* nonzero => valid */
+} nge_mac_addr_t;
+
+struct nge;
+
+/*
+ * Named Data (ND) Parameter Management Structure
+ */
+typedef struct {
+ int ndp_info;
+ int ndp_min;
+ int ndp_max;
+ int ndp_val;
+ char *ndp_name;
+} nd_param_t;
+
+
+#define CHIP_FLAG_COPPER 0x40
+
+/*
+ * Collection of physical-layer functions to:
+ * (re)initialise the physical layer
+ * update it to match software settings
+ * check for link status change
+ */
+typedef struct {
+ void (*phys_restart)(struct nge *);
+ void (*phys_update)(struct nge *);
+ boolean_t (*phys_check)(struct nge *);
+} phys_ops_t;
+
+struct nge_see_rw {
+ uint32_t see_addr; /* Byte offset within SEEPROM */
+ uint32_t see_data; /* Data read/data to write */
+};
+
+typedef struct {
+ uint64_t pp_acc_size; /* in bytes: 1,2,4,8 */
+ uint64_t pp_acc_space; /* See #defines below */
+ uint64_t pp_acc_offset;
+ uint64_t pp_acc_data; /* output for peek */
+ /* input for poke */
+} nge_peekpoke_t;
+
+typedef uintptr_t nge_regno_t; /* register # (offset) */
+
+typedef struct _mul_list {
+ struct _mul_list *next;
+ uint32_t ref_cnt;
+ ether_addr_t mul_addr;
+}mul_item, *pmul_item;
+
+/*
+ * Describes one chunk of allocated DMA-able memory
+ *
+ * In some cases, this is a single chunk as allocated from the system;
+ * but we also use this structure to represent slices carved off such
+ * a chunk. Even when we don't really need all the information, we
+ * use this structure as a convenient way of correlating the various
+ * ways of looking at a piece of memory (kernel VA, IO space DVMA,
+ * handle+offset, etc).
+ */
+typedef struct dma_area
+{
+
+ caddr_t private; /* pointer to nge */
+ frtn_t rx_recycle; /* recycle function */
+ mblk_t *mp;
+ ddi_acc_handle_t acc_hdl; /* handle for memory */
+ void *mem_va; /* CPU VA of memory */
+ uint32_t nslots; /* number of slots */
+ uint32_t size; /* size per slot */
+ size_t alength; /* allocated size */
+ /* >= product of above */
+ ddi_dma_handle_t dma_hdl; /* DMA handle */
+ offset_t offset; /* relative to handle */
+ ddi_dma_cookie_t cookie; /* associated cookie */
+ uint32_t ncookies;
+ uint32_t signature; /* buffer signature */
+ /* for deciding to free */
+ /* or to reuse buffers */
+ boolean_t rx_delivered; /* hold by upper layer */
+ struct dma_area *next;
+} dma_area_t;
+
+#define HOST_OWN 0x00000000
+#define CONTROLER_OWN 0x00000001
+#define NGE_END_PACKET 0x00000002
+
+
+typedef struct nge_dmah_node
+{
+ struct nge_dmah_node *next;
+ ddi_dma_handle_t hndl;
+} nge_dmah_node_t;
+
+typedef struct nge_dmah_list
+{
+ nge_dmah_node_t *head;
+ nge_dmah_node_t *tail;
+} nge_dmah_list_t;
+
+/*
+ * Software version of the Recv Descriptor
+ * There's one of these for each recv buffer (up to 512 per ring)
+ */
+typedef struct sw_rx_sbd {
+
+ dma_area_t desc; /* (const) related h/w */
+ /* descriptor area */
+ dma_area_t *bufp; /* (const) related */
+ /* buffer area */
+ uint8_t flags;
+} sw_rx_sbd_t;
+
+/*
+ * Software version of the send Buffer Descriptor
+ * There's one of these for each send buffer (up to 512 per ring)
+ */
+typedef struct sw_tx_sbd {
+
+ dma_area_t desc; /* (const) related h/w */
+ /* descriptor area */
+ dma_area_t pbuf; /* (const) related */
+ /* buffer area */
+ void (*tx_recycle)(struct sw_tx_sbd *);
+ uint32_t flags;
+ mblk_t *mp; /* related mblk, if any */
+ nge_dmah_list_t mp_hndl;
+ uint32_t frags;
+ uint32_t ncookies; /* dma cookie number */
+
+} sw_tx_sbd_t;
+
+/*
+ * Software Receive Buffer (Producer) Ring Control Block
+ * There's one of these for each receiver producer ring (up to 3),
+ * but each holds buffers of a different size.
+ */
+typedef struct buff_ring {
+
+ uint64_t nslots; /* descriptor area */
+ struct nge *ngep; /* (const) containing */
+ /* driver soft state */
+ /* initialise same */
+ uint64_t rx_hold;
+ sw_rx_sbd_t *sw_rbds; /* software descriptors */
+ sw_rx_sbd_t *free_rbds; /* free ring */
+ dma_area_t *free_list; /* available buffer queue */
+ dma_area_t *recycle_list; /* recycling buffer queue */
+ kmutex_t recycle_lock[1];
+ uint32_t buf_sign;
+ boolean_t rx_bcopy;
+} buff_ring_t;
+
+/*
+ * Software Receive (Return) Ring Control Block
+ * There's one of these for each receiver return ring (up to 16).
+ */
+typedef struct recv_ring {
+ /*
+ * The elements flagged (const) in the comments below are
+ * set up once during initialiation and thereafter unchanged.
+ */
+ dma_area_t desc; /* (const) related h/w */
+ /* descriptor area */
+ struct nge *ngep; /* (const) containing */
+ /* driver soft state */
+ uint16_t prod_index; /* (const) ptr to h/w */
+ /* "producer index" */
+ mac_resource_handle_t handle;
+} recv_ring_t;
+
+
+
+/*
+ * Software Send Ring Control Block
+ * There's one of these for each of (up to) 1 send rings
+ */
+typedef struct send_ring {
+ /*
+ * The elements flagged (const) in the comments below are
+ * set up once during initialiation and thereafter unchanged.
+ */
+ dma_area_t desc; /* (const) related h/w */
+ /* descriptor area */
+ dma_area_t buf[NGE_SEND_SLOTS_BUF];
+ /* buffer area(s) */
+ struct nge *ngep; /* (const) containing */
+ /* driver soft state */
+
+ uint64_t tx_hwmark;
+ uint64_t tx_lwmark;
+
+ /*
+ * The tx_lock must be held when updating
+ * the s/w producer index
+ * (tx_next)
+ */
+ kmutex_t tx_lock[1]; /* serialize h/w update */
+ uint64_t tx_next; /* next slot to use */
+ uint64_t tx_flow;
+
+ /*
+ * These counters/indexes are manipulated in the transmit
+ * path using atomics rather than mutexes for speed
+ */
+ uint64_t tx_free; /* # of slots available */
+
+ /*
+ * index (tc_next).
+ */
+ kmutex_t tc_lock[1];
+ uint64_t tc_next; /* next slot to recycle */
+ /* ("consumer index") */
+
+ sw_tx_sbd_t *sw_sbds; /* software descriptors */
+
+ kmutex_t dmah_lock;
+ nge_dmah_list_t dmah_free;
+ nge_dmah_node_t dmahndl[NGE_MAX_DMA_HDR];
+
+} send_ring_t;
+
+
+typedef struct {
+ uint32_t businfo; /* from private reg */
+ uint16_t command; /* saved during attach */
+
+ uint16_t vendor; /* vendor-id */
+ uint16_t device; /* device-id */
+ uint16_t subven; /* subsystem-vendor-id */
+ uint16_t subdev; /* subsystem-id */
+ uint8_t class_code;
+ uint8_t revision; /* revision-id */
+ uint8_t clsize; /* cache-line-size */
+ uint8_t latency; /* latency-timer */
+ uint8_t flags;
+
+ uint16_t phy_type; /* Fiber module type */
+ uint64_t hw_mac_addr; /* from chip register */
+ nge_mac_addr_t vendor_addr; /* transform of same */
+} chip_info_t;
+
+
+typedef struct {
+ offset_t index;
+ char *name;
+} nge_ksindex_t;
+
+typedef struct {
+ uint64_t tso_err_mss;
+ uint64_t tso_dis;
+ uint64_t tso_err_nosum;
+ uint64_t tso_err_hov;
+ uint64_t tso_err_huf;
+ uint64_t tso_err_l2;
+ uint64_t tso_err_ip;
+ uint64_t tso_err_l4;
+ uint64_t tso_err_tcp;
+ uint64_t hsum_err_ip;
+ uint64_t hsum_err_l4;
+}fe_statistics_t;
+
+/*
+ * statistics parameters to tune the driver
+ */
+typedef struct {
+ uint64_t intr_count;
+ uint64_t intr_lval;
+ uint64_t recv_realloc;
+ uint64_t poll_time;
+ uint64_t recy_free;
+ uint64_t recv_count;
+ uint64_t xmit_count;
+ uint64_t obytes;
+ uint64_t rbytes;
+ uint64_t mp_alloc_err;
+ uint64_t dma_alloc_err;
+ uint64_t kmem_alloc_err;
+ uint64_t load_context;
+ uint64_t ip_hwsum_err;
+ uint64_t tcp_hwsum_err;
+ uint64_t rx_nobuffer;
+ uint64_t rx_err;
+ uint64_t tx_stop_err;
+ uint64_t tx_stall;
+ uint64_t tx_rsrv_fail;
+ uint64_t tx_resched;
+ fe_statistics_t fe_err;
+}nge_sw_statistics_t;
+
+typedef struct {
+ nge_hw_statistics_t hw_statistics;
+ nge_sw_statistics_t sw_statistics;
+}nge_statistics_t;
+
+struct nge_desc_attr {
+
+ size_t rxd_size;
+ size_t txd_size;
+
+ ddi_dma_attr_t *dma_attr;
+ ddi_dma_attr_t *tx_dma_attr;
+
+ void (*rxd_fill)(void *, const ddi_dma_cookie_t *, size_t);
+ uint32_t (*rxd_check)(const void *, size_t *);
+
+ void (*txd_fill)(void *, const ddi_dma_cookie_t *, size_t,
+ uint32_t, boolean_t);
+
+ uint32_t (*txd_check)(const void *, size_t *);
+};
+
+typedef struct nge_desc_attr nge_desc_attr_t;
+
+/*
+ * Structure used to hold the device-specific config parameters.
+ * The setting of such parameters may not consistent with the
+ * hardware feature of the device. It's used for software purpose.
+ */
+typedef struct nge_dev_spec_param {
+ boolean_t msi; /* specifies msi support */
+ boolean_t msi_x; /* specifies msi_x support */
+ boolean_t vlan; /* specifies vlan support */
+ boolean_t tx_pause_frame; /* specifies tx pause frame support */
+ boolean_t rx_pause_frame; /* specifies rx pause frame support */
+ boolean_t jumbo; /* jumbo frame support */
+ boolean_t tx_rx_64byte; /* set the max tx/rx prd fetch size */
+ boolean_t rx_hw_checksum; /* specifies tx hw checksum feature */
+ uint32_t tx_hw_checksum; /* specifies rx hw checksum feature */
+ uint32_t desc_type; /* specifies descriptor type */
+ uint32_t rx_desc_num; /* specifies rx descriptor number */
+ uint32_t tx_desc_num; /* specifies tx descriptor number */
+ uint32_t nge_split; /* specifies the split number */
+} nge_dev_spec_param_t;
+
+typedef struct nge {
+ /*
+ * These fields are set by attach() and unchanged thereafter ...
+ */
+ dev_info_t *devinfo; /* device instance */
+ mac_handle_t mh; /* mac module handle */
+ chip_info_t chipinfo;
+ ddi_acc_handle_t cfg_handle; /* DDI I/O handle */
+ ddi_acc_handle_t io_handle; /* DDI I/O handle */
+ void *io_regs; /* mapped registers */
+
+ ddi_periodic_t periodic_id; /* periodical callback */
+ uint32_t factotum_flag;
+ ddi_softint_handle_t factotum_hdl; /* factotum callback */
+ ddi_softint_handle_t resched_hdl; /* reschedule callback */
+ uint_t soft_pri;
+
+ ddi_intr_handle_t *htable; /* for array of interrupts */
+ int intr_type; /* type of interrupt */
+ int intr_actual_cnt; /* alloc intrs count */
+ int intr_req_cnt; /* request intrs count */
+ uint_t intr_pri; /* interrupt priority */
+ int intr_cap; /* interrupt capabilities */
+
+ uint32_t progress; /* attach tracking */
+ uint32_t debug; /* flag to debug function */
+
+ char ifname[8]; /* "nge0" ... "nge999" */
+
+
+ enum nge_mac_state nge_mac_state; /* definitions above */
+ enum nge_chip_state nge_chip_state; /* definitions above */
+ boolean_t promisc;
+ boolean_t suspended;
+
+ int resched_needed;
+ uint32_t default_mtu;
+ uint32_t max_sdu;
+ uint32_t buf_size;
+ uint32_t rx_desc;
+ uint32_t tx_desc;
+ uint32_t rx_buf;
+ uint32_t nge_split;
+ uint32_t watchdog;
+ uint32_t lowmem_mode;
+
+
+ /*
+ * Runtime read-write data starts here ...
+ * 1 Receive Rings
+ * 1 Send Rings
+ *
+ * Note: they're not necessarily all used.
+ */
+ struct buff_ring buff[1];
+ struct recv_ring recv[1];
+ struct send_ring send[1];
+
+
+ kmutex_t genlock[1];
+ krwlock_t rwlock[1];
+ kmutex_t softlock[1];
+ uint32_t intr_masks;
+ boolean_t poll;
+ boolean_t ch_intr_mode;
+ uint32_t recv_count;
+ uint32_t poll_time;
+ uint32_t sw_intr_intv;
+ nge_mac_addr_t cur_uni_addr;
+ uint32_t rx_datahwm;
+ uint32_t rx_prdlwm;
+ uint32_t rx_prdhwm;
+ uint32_t rx_def;
+ uint32_t desc_mode;
+
+ mul_item *pcur_mulist;
+ nge_mac_addr_t cur_mul_addr;
+ nge_mac_addr_t cur_mul_mask;
+
+ nge_desc_attr_t desc_attr;
+
+ /*
+ * Link state data (protected by genlock)
+ */
+ int32_t link_state; /* See GLD #defines */
+ uint32_t stall_cknum; /* Stall check number */
+
+ uint32_t phy_xmii_addr;
+ uint32_t phy_id;
+ uint32_t phy_mode;
+ const phys_ops_t *physops;
+ uint16_t phy_gen_status;
+
+ uint32_t param_loop_mode;
+
+ /*
+ * NDD parameters (protected by genlock)
+ */
+ caddr_t nd_data_p;
+ nd_param_t nd_params[PARAM_COUNT];
+
+ kstat_t *nge_kstats[NGE_KSTAT_COUNT];
+ nge_statistics_t statistics;
+
+ nge_dev_spec_param_t dev_spec_param;
+
+} nge_t;
+
+extern const nge_ksindex_t nge_statistics[];
+
+/*
+ * Shorthand for the NDD parameters
+ */
+#define param_adv_autoneg nd_params[PARAM_ADV_AUTONEG_CAP].ndp_val
+#define param_adv_pause nd_params[PARAM_ADV_PAUSE_CAP].ndp_val
+#define param_adv_asym_pause nd_params[PARAM_ADV_ASYM_PAUSE_CAP].ndp_val
+#define param_adv_1000fdx nd_params[PARAM_ADV_1000FDX_CAP].ndp_val
+#define param_adv_1000hdx nd_params[PARAM_ADV_1000HDX_CAP].ndp_val
+#define param_adv_100fdx nd_params[PARAM_ADV_100FDX_CAP].ndp_val
+#define param_adv_100hdx nd_params[PARAM_ADV_100HDX_CAP].ndp_val
+#define param_adv_10fdx nd_params[PARAM_ADV_10FDX_CAP].ndp_val
+#define param_adv_10hdx nd_params[PARAM_ADV_10HDX_CAP].ndp_val
+
+#define param_lp_autoneg nd_params[PARAM_LP_AUTONEG_CAP].ndp_val
+#define param_lp_pause nd_params[PARAM_LP_PAUSE_CAP].ndp_val
+#define param_lp_asym_pause nd_params[PARAM_LP_ASYM_PAUSE_CAP].ndp_val
+#define param_lp_1000fdx nd_params[PARAM_LP_1000FDX_CAP].ndp_val
+#define param_lp_1000hdx nd_params[PARAM_LP_1000HDX_CAP].ndp_val
+#define param_lp_100fdx nd_params[PARAM_LP_100FDX_CAP].ndp_val
+#define param_lp_100hdx nd_params[PARAM_LP_100HDX_CAP].ndp_val
+#define param_lp_10fdx nd_params[PARAM_LP_10FDX_CAP].ndp_val
+#define param_lp_10hdx nd_params[PARAM_LP_10HDX_CAP].ndp_val
+
+#define param_link_up nd_params[PARAM_LINK_STATUS].ndp_val
+#define param_link_speed nd_params[PARAM_LINK_SPEED].ndp_val
+#define param_link_duplex nd_params[PARAM_LINK_DUPLEX].ndp_val
+
+#define param_link_autoneg nd_params[PARAM_LINK_AUTONEG].ndp_val
+#define param_link_rx_pause nd_params[PARAM_LINK_RX_PAUSE].ndp_val
+#define param_link_tx_pause nd_params[PARAM_LINK_TX_PAUSE].ndp_val
+
+#define param_loop_mode nd_params[PARAM_LOOP_MODE].ndp_val
+
+#define param_txbcopy_threshold nd_params[PARAM_TXBCOPY_THRESHOLD].ndp_val
+#define param_rxbcopy_threshold nd_params[PARAM_RXBCOPY_THRESHOLD].ndp_val
+#define param_recv_max_packet nd_params[PARAM_RECV_MAX_PACKET].ndp_val
+
+/*
+ * Sync a DMA area described by a dma_area_t
+ */
+#define DMA_SYNC(area, flag) ((void) ddi_dma_sync((area).dma_hdl, \
+ (area).offset, (area).alength, (flag)))
+
+/*
+ * Find the (kernel virtual) address of block of memory
+ * described by a dma_area_t
+ */
+#define DMA_VPTR(area) ((area).mem_va)
+
+/*
+ * Zero a block of memory described by a dma_area_t
+ */
+#define DMA_ZERO(area) bzero(DMA_VPTR(area), (area).alength)
+
+/*
+ * Next/Prev value of a cyclic index
+ */
+#define NEXT(index, limit) ((index) + 1 < (limit) ? (index) + 1 : 0)
+#define PREV(index, limit) (0 == (index) ? (limit - 1) : (index) - 1)
+
+#define NEXT_INDEX(ndx, num, lim)\
+ (((ndx) + (num) < (lim)) ? ((ndx) + (num)) : ((ndx) + (num) - (lim)))
+
+
+/*
+ * Property lookups
+ */
+#define NGE_PROP_EXISTS(d, n) ddi_prop_exists(DDI_DEV_T_ANY, (d), \
+ DDI_PROP_DONTPASS, (n))
+#define NGE_PROP_GET_INT(d, n) ddi_prop_get_int(DDI_DEV_T_ANY, (d), \
+ DDI_PROP_DONTPASS, (n), -1)
+
+
+/*
+ * Debugging ...
+ */
+#ifdef DEBUG
+#define NGE_DEBUGGING 1
+#else
+#define NGE_DEBUGGING 0
+#endif /* DEBUG */
+
+/*
+ * Bit flags in the 'debug' word ...
+ */
+#define NGE_DBG_STOP 0x00000001 /* early debug_enter() */
+#define NGE_DBG_TRACE 0x00000002 /* general flow tracing */
+
+#define NGE_DBG_MII 0x00000010 /* low-level MII access */
+#define NGE_DBG_CHIP 0x00000020 /* low(ish)-level code */
+
+#define NGE_DBG_RECV 0x00000100 /* receive-side code */
+#define NGE_DBG_SEND 0x00000200 /* packet-send code */
+
+#define NGE_DBG_INIT 0x00100000 /* initialisation */
+#define NGE_DBG_NEMO 0x00200000 /* MAC layer entry points */
+#define NGE_DBG_STATS 0x00400000 /* statistics */
+
+#define NGE_DBG_BADIOC 0x01000000 /* unknown ioctls */
+
+#define NGE_DBG_NDD 0x10000000 /* NDD operations */
+
+
+
+/*
+ * 'Do-if-debugging' macro. The parameter <command> should be one or more
+ * C statements (but without the *final* semicolon), which will either be
+ * compiled inline or completely ignored, depending on the NGE_DEBUGGING
+ * compile-time flag.
+ *
+ * You should get a compile-time error (at least on a DEBUG build) if
+ * your statement isn't actually a statement, rather than unexpected
+ * run-time behaviour caused by unintended matching of if-then-elses etc.
+ *
+ * Note that the NGE_DDB() macro itself can only be used as a statement,
+ * not an expression, and should always be followed by a semicolon.
+ */
+#if NGE_DEBUGGING
+#define NGE_DDB(command) do { \
+ { command; } \
+ _NOTE(CONSTANTCONDITION) \
+ } while (0)
+#else /* NGE_DEBUGGING */
+#define NGE_DDB(command)
+/*
+ * Old way of debugging. This is a poor way, as it leeaves empty
+ * statements that cause lint to croak.
+ * #define NGE_DDB(command) do { \
+ * { _NOTE(EMPTY); } \
+ * _NOTE(CONSTANTCONDITION) \
+ * } while (0)
+ */
+#endif /* NGE_DEBUGGING */
+
+/*
+ * 'Internal' macros used to construct the TRACE/DEBUG macros below.
+ * These provide the primitive conditional-call capability required.
+ * Note: the parameter <args> is a parenthesised list of the actual
+ * printf-style arguments to be passed to the debug function ...
+ */
+#define NGE_XDB(b, w, f, args) NGE_DDB(if ((b) & (w)) f args)
+#define NGE_GDB(b, args) NGE_XDB(b, nge_debug, (*nge_gdb()), args)
+#define NGE_LDB(b, args) NGE_XDB(b, ngep->debug, \
+ (*nge_db(ngep)), args)
+#define NGE_CDB(f, args) NGE_XDB(NGE_DBG, ngep->debug, f, args)
+
+/*
+ * Conditional-print macros.
+ *
+ * Define NGE_DBG to be the relevant member of the set of NGE_DBG_* values
+ * above before using the NGE_GDEBUG() or NGE_DEBUG() macros. The 'G'
+ * versions look at the Global debug flag word (nge_debug); the non-G
+ * versions look in the per-instance data (ngep->debug) and so require a
+ * variable called 'ngep' to be in scope (and initialised!) before use.
+ *
+ * You could redefine NGE_TRC too if you really need two different
+ * flavours of debugging output in the same area of code, but I don't
+ * really recommend it.
+ *
+ * Note: the parameter <args> is a parenthesised list of the actual
+ * arguments to be passed to the debug function, usually a printf-style
+ * format string and corresponding values to be formatted.
+ */
+
+#define NGE_TRC NGE_DBG_TRACE
+
+#define NGE_GTRACE(args) NGE_GDB(NGE_TRC, args)
+#define NGE_GDEBUG(args) NGE_GDB(NGE_DBG, args)
+#define NGE_TRACE(args) NGE_LDB(NGE_TRC, args)
+#define NGE_DEBUG(args) NGE_LDB(NGE_DBG, args)
+
+/*
+ * Debug-only action macros
+ */
+
+
+#define NGE_REPORT(args) NGE_DDB(nge_log args)
+
+boolean_t nge_atomic_decrease(uint64_t *count_p, uint64_t n);
+void nge_atomic_increase(uint64_t *count_p, uint64_t n);
+
+int nge_alloc_dma_mem(nge_t *ngep, size_t memsize,
+ ddi_device_acc_attr_t *attr_p, uint_t dma_flags, dma_area_t *dma_p);
+void nge_free_dma_mem(dma_area_t *dma_p);
+int nge_restart(nge_t *ngep);
+void nge_wake_factotum(nge_t *ngep);
+
+uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno);
+void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data);
+uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno);
+void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data);
+uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno);
+void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data);
+uint_t nge_chip_factotum(caddr_t args1, caddr_t args2);
+void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset);
+void nge_init_dev_spec_param(nge_t *ngep);
+int nge_chip_stop(nge_t *ngep, boolean_t fault);
+void nge_restore_mac_addr(nge_t *ngep);
+int nge_chip_reset(nge_t *ngep);
+int nge_chip_start(nge_t *ngep);
+void nge_chip_sync(nge_t *ngep);
+
+uint_t nge_chip_intr(caddr_t arg1, caddr_t arg2);
+enum ioc_reply nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp);
+
+void nge_phys_init(nge_t *ngep);
+boolean_t nge_phy_reset(nge_t *ngep);
+uint16_t nge_mii_get16(nge_t *ngep, nge_regno_t regno);
+void nge_mii_put16(nge_t *ngep, nge_regno_t regno, uint16_t data);
+
+void nge_recv_recycle(caddr_t arg);
+void nge_receive(nge_t *ngep);
+
+uint_t nge_reschedule(caddr_t args1, caddr_t args2);
+mblk_t *nge_m_tx(void *arg, mblk_t *mp);
+
+void nge_tx_recycle(nge_t *ngep, boolean_t is_intr);
+void nge_tx_recycle_all(nge_t *ngep);
+
+enum ioc_reply nge_nd_ioctl(nge_t *ngep, queue_t *wq,
+ mblk_t *mp, struct iocblk *iocp);
+int nge_nd_init(nge_t *ngep);
+void nge_nd_cleanup(nge_t *ngep);
+
+
+void nge_init_kstats(nge_t *ngep, int instance);
+void nge_fini_kstats(nge_t *ngep);
+int nge_m_stat(void *arg, uint_t stat, uint64_t *val);
+
+uint32_t nge_atomic_shl32(uint32_t *sp, uint_t count);
+
+void nge_log(nge_t *ngep, const char *fmt, ...);
+void nge_problem(nge_t *ngep, const char *fmt, ...);
+void nge_error(nge_t *ngep, const char *fmt, ...);
+void
+nge_report(nge_t *ngep, uint8_t error_id);
+
+void (*nge_db(nge_t *ngep))(const char *fmt, ...);
+void (*nge_gdb(void))(const char *fmt, ...);
+extern uint32_t nge_debug;
+
+/*
+ * DESC MODE 2
+ */
+
+extern void nge_sum_rxd_fill(void *, const ddi_dma_cookie_t *, size_t);
+extern uint32_t nge_sum_rxd_check(const void *, size_t *);
+
+extern void nge_sum_txd_fill(void *, const ddi_dma_cookie_t *,
+ size_t, uint32_t, boolean_t);
+extern uint32_t nge_sum_txd_check(const void *, size_t *);
+
+/*
+ * DESC MODE 3
+ */
+
+extern void nge_hot_rxd_fill(void *, const ddi_dma_cookie_t *, size_t);
+extern uint32_t nge_hot_rxd_check(const void *, size_t *);
+
+extern void nge_hot_txd_fill(void *, const ddi_dma_cookie_t *,
+ size_t, uint32_t, boolean_t);
+extern uint32_t nge_hot_txd_check(const void *, size_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_NGE_H */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_atomic.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+
+/*
+ * Atomically decrement a counter, but only if it will remain
+ * positive (>=0) afterwards.
+ */
+boolean_t
+nge_atomic_decrease(uint64_t *count_p, uint64_t n)
+{
+ uint64_t oldval;
+ uint64_t newval;
+
+ /* ATOMICALLY */
+ do {
+ oldval = *count_p;
+ newval = oldval - n;
+ if (oldval < n)
+ return (B_FALSE);
+ } while (cas64(count_p, oldval, newval) != oldval);
+
+ return (B_TRUE);
+}
+
+/*
+ * Atomically increment a counter
+ */
+void
+nge_atomic_increase(uint64_t *count_p, uint64_t n)
+{
+ uint64_t oldval;
+ uint64_t newval;
+
+ /* ATOMICALLY */
+ do {
+ oldval = *count_p;
+ newval = oldval + n;
+ } while (cas64(count_p, oldval, newval) != oldval);
+}
+
+
+/*
+ * Atomically shift a 32-bit word left, returning
+ * the value it had *before* the shift was applied
+ */
+uint32_t
+nge_atomic_shl32(uint32_t *sp, uint_t count)
+{
+ uint32_t oldval;
+ uint32_t newval;
+
+ /* ATOMICALLY */
+ do {
+ oldval = *sp;
+ newval = oldval << count;
+ } while (cas32(sp, oldval, newval) != oldval);
+
+ return (oldval);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_chip.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,1967 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+static uint32_t nge_watchdog_count = 1 << 29;
+extern boolean_t nge_enable_msi;
+static void nge_sync_mac_modes(nge_t *);
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_CHIP
+
+/*
+ * Operating register get/set access routines
+ */
+uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno);
+#pragma inline(nge_reg_get8)
+
+uint8_t
+nge_reg_get8(nge_t *ngep, nge_regno_t regno)
+{
+ NGE_TRACE(("nge_reg_get8($%p, 0x%lx)", (void *)ngep, regno));
+
+ return (ddi_get8(ngep->io_handle, PIO_ADDR(ngep, regno)));
+}
+
+void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data);
+#pragma inline(nge_reg_put8)
+
+void
+nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data)
+{
+ NGE_TRACE(("nge_reg_put8($%p, 0x%lx, 0x%x)",
+ (void *)ngep, regno, data));
+ ddi_put8(ngep->io_handle, PIO_ADDR(ngep, regno), data);
+
+}
+
+uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno);
+#pragma inline(nge_reg_get16)
+
+uint16_t
+nge_reg_get16(nge_t *ngep, nge_regno_t regno)
+{
+ NGE_TRACE(("nge_reg_get16($%p, 0x%lx)", (void *)ngep, regno));
+ return (ddi_get16(ngep->io_handle, PIO_ADDR(ngep, regno)));
+}
+
+void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data);
+#pragma inline(nge_reg_put16)
+
+void
+nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data)
+{
+ NGE_TRACE(("nge_reg_put16($%p, 0x%lx, 0x%x)",
+ (void *)ngep, regno, data));
+ ddi_put16(ngep->io_handle, PIO_ADDR(ngep, regno), data);
+
+}
+
+uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno);
+#pragma inline(nge_reg_get32)
+
+uint32_t
+nge_reg_get32(nge_t *ngep, nge_regno_t regno)
+{
+ NGE_TRACE(("nge_reg_get32($%p, 0x%lx)", (void *)ngep, regno));
+ return (ddi_get32(ngep->io_handle, PIO_ADDR(ngep, regno)));
+}
+
+void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data);
+#pragma inline(nge_reg_put32)
+
+void
+nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data)
+{
+ NGE_TRACE(("nge_reg_put32($%p, 0x%lx, 0x%x)",
+ (void *)ngep, regno, data));
+ ddi_put32(ngep->io_handle, PIO_ADDR(ngep, regno), data);
+
+}
+
+
+static int nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd);
+#pragma no_inline(nge_chip_peek_cfg)
+
+static int
+nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd)
+{
+ int err;
+ uint64_t regval;
+ uint64_t regno;
+
+ NGE_TRACE(("nge_chip_peek_cfg($%p, $%p)",
+ (void *)ngep, (void *)ppd));
+
+ err = DDI_SUCCESS;
+ regno = ppd->pp_acc_offset;
+
+ switch (ppd->pp_acc_size) {
+ case 1:
+ regval = pci_config_get8(ngep->cfg_handle, regno);
+ break;
+
+ case 2:
+ regval = pci_config_get16(ngep->cfg_handle, regno);
+ break;
+
+ case 4:
+ regval = pci_config_get32(ngep->cfg_handle, regno);
+ break;
+
+ case 8:
+ regval = pci_config_get64(ngep->cfg_handle, regno);
+ break;
+ }
+ ppd->pp_acc_data = regval;
+ return (err);
+}
+
+static int nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd);
+
+static int
+nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd)
+{
+ int err;
+ uint64_t regval;
+ uint64_t regno;
+
+ NGE_TRACE(("nge_chip_poke_cfg($%p, $%p)",
+ (void *)ngep, (void *)ppd));
+
+ err = DDI_SUCCESS;
+ regno = ppd->pp_acc_offset;
+ regval = ppd->pp_acc_data;
+
+ switch (ppd->pp_acc_size) {
+ case 1:
+ pci_config_put8(ngep->cfg_handle, regno, regval);
+ break;
+
+ case 2:
+ pci_config_put16(ngep->cfg_handle, regno, regval);
+ break;
+
+ case 4:
+ pci_config_put32(ngep->cfg_handle, regno, regval);
+ break;
+
+ case 8:
+ pci_config_put64(ngep->cfg_handle, regno, regval);
+ break;
+ }
+
+ return (err);
+
+}
+
+static int nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd);
+
+static int
+nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd)
+{
+ int err;
+ uint64_t regval;
+ void *regaddr;
+
+ NGE_TRACE(("nge_chip_peek_reg($%p, $%p)",
+ (void *)ngep, (void *)ppd));
+
+ err = DDI_SUCCESS;
+ regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset);
+
+ switch (ppd->pp_acc_size) {
+ case 1:
+ regval = ddi_get8(ngep->io_handle, regaddr);
+ break;
+
+ case 2:
+ regval = ddi_get16(ngep->io_handle, regaddr);
+ break;
+
+ case 4:
+ regval = ddi_get32(ngep->io_handle, regaddr);
+ break;
+
+ case 8:
+ regval = ddi_get64(ngep->io_handle, regaddr);
+ break;
+
+ default:
+ regval = 0x0ull;
+ break;
+ }
+ ppd->pp_acc_data = regval;
+ return (err);
+}
+
+static int nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd);
+
+static int
+nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd)
+{
+ int err;
+ uint64_t regval;
+ void *regaddr;
+
+ NGE_TRACE(("nge_chip_poke_reg($%p, $%p)",
+ (void *)ngep, (void *)ppd));
+
+ err = DDI_SUCCESS;
+ regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset);
+ regval = ppd->pp_acc_data;
+
+ switch (ppd->pp_acc_size) {
+ case 1:
+ ddi_put8(ngep->io_handle, regaddr, regval);
+ break;
+
+ case 2:
+ ddi_put16(ngep->io_handle, regaddr, regval);
+ break;
+
+ case 4:
+ ddi_put32(ngep->io_handle, regaddr, regval);
+ break;
+
+ case 8:
+ ddi_put64(ngep->io_handle, regaddr, regval);
+ break;
+ }
+ return (err);
+}
+
+static int nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd);
+#pragma no_inline(nge_chip_peek_mii)
+
+static int
+nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd)
+{
+ int err;
+
+ err = DDI_SUCCESS;
+ ppd->pp_acc_data = nge_mii_get16(ngep, ppd->pp_acc_offset/2);
+ return (err);
+}
+
+static int nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd);
+#pragma no_inline(nge_chip_poke_mii)
+
+static int
+nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd)
+{
+ int err;
+ err = DDI_SUCCESS;
+ nge_mii_put16(ngep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
+ return (err);
+}
+
+/*
+ * Basic SEEPROM get/set access routine
+ *
+ * This uses the chip's SEEPROM auto-access method, controlled by the
+ * Serial EEPROM Address/Data Registers at 0x504h, so the CPU
+ * doesn't have to fiddle with the individual bits.
+ *
+ * The caller should hold <genlock> and *also* have already acquired
+ * the right to access the SEEPROM.
+ *
+ * Return value:
+ * 0 on success,
+ * ENODATA on access timeout (maybe retryable: device may just be busy)
+ * EPROTO on other h/w or s/w errors.
+ *
+ * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output
+ * from a (successful) SEEPROM_ACCESS_READ.
+ */
+
+static int
+nge_seeprom_access(nge_t *ngep, uint32_t cmd, nge_regno_t addr, uint16_t *dp)
+{
+ uint32_t tries;
+ nge_ep_cmd cmd_reg;
+ nge_ep_data data_reg;
+
+ NGE_TRACE(("nge_seeprom_access($%p, %d, %x, $%p)",
+ (void *)ngep, cmd, addr, (void *)dp));
+
+ ASSERT(mutex_owned(ngep->genlock));
+
+ /*
+ * Check there's no command in progress.
+ *
+ * Note: this *shouldn't* ever find that there is a command
+ * in progress, because we already hold the <genlock> mutex.
+ * Also, to ensure we don't have a conflict with the chip's
+ * internal firmware or a process accessing the same (shared)
+ * So this is just a final consistency check: we shouldn't
+ * see EITHER the START bit (command started but not complete)
+ * OR the COMPLETE bit (command completed but not cleared).
+ */
+ cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
+ for (tries = 0; tries < 30; tries++) {
+ if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
+ break;
+ drv_usecwait(10);
+ cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
+ }
+
+ /*
+ * This should not happen. If so, we have to restart eeprom
+ * state machine
+ */
+ if (tries == 30) {
+ cmd_reg.cmd_bits.sts = SEEPROM_READY;
+ nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val);
+ drv_usecwait(10);
+ /*
+ * Polling the status bit to make assure the eeprom is ready
+ */
+ cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
+ for (tries = 0; tries < 30; tries++) {
+ if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
+ break;
+ drv_usecwait(10);
+ cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
+ }
+ }
+
+ /*
+ * Assemble the command ...
+ */
+ cmd_reg.cmd_bits.addr = addr;
+ cmd_reg.cmd_bits.cmd = cmd;
+ cmd_reg.cmd_bits.sts = 0;
+
+ nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val);
+
+ /*
+ * Polling whether the access is successful.
+ *
+ */
+ cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
+ for (tries = 0; tries < 30; tries++) {
+ if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
+ break;
+ drv_usecwait(10);
+ cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
+ }
+
+ if (tries == 30) {
+ nge_report(ngep, NGE_HW_ROM);
+ return (DDI_FAILURE);
+ }
+ switch (cmd) {
+ default:
+ case SEEPROM_CMD_WRITE_ENABLE:
+ case SEEPROM_CMD_ERASE:
+ case SEEPROM_CMD_ERALSE_ALL:
+ case SEEPROM_CMD_WRITE_DIS:
+ break;
+
+ case SEEPROM_CMD_READ:
+ data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA);
+ *dp = data_reg.data_bits.data;
+ break;
+
+ case SEEPROM_CMD_WRITE:
+ data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA);
+ data_reg.data_bits.data = *dp;
+ nge_reg_put32(ngep, NGE_EP_DATA, data_reg.data_val);
+ break;
+ }
+
+ return (DDI_SUCCESS);
+}
+
+
+static int
+nge_chip_peek_seeprom(nge_t *ngep, nge_peekpoke_t *ppd)
+{
+ uint16_t data;
+ int err;
+
+ err = nge_seeprom_access(ngep, SEEPROM_CMD_READ,
+ ppd->pp_acc_offset, &data);
+ ppd->pp_acc_data = data;
+ return (err);
+}
+
+static int
+nge_chip_poke_seeprom(nge_t *ngep, nge_peekpoke_t *ppd)
+{
+ uint16_t data;
+ int err;
+
+ data = ppd->pp_acc_data;
+ err = nge_seeprom_access(ngep, SEEPROM_CMD_WRITE,
+ ppd->pp_acc_offset, &data);
+ return (err);
+}
+
+void
+nge_init_dev_spec_param(nge_t *ngep)
+{
+ nge_dev_spec_param_t *dev_param_p;
+ chip_info_t *infop;
+
+ dev_param_p = &ngep->dev_spec_param;
+ infop = (chip_info_t *)&ngep->chipinfo;
+
+ switch (infop->device) {
+ case DEVICE_ID_NF3_E6:
+ case DEVICE_ID_NF3_DF:
+ case DEVICE_ID_MCP61_3EE:
+ case DEVICE_ID_MCP61_3EF:
+ case DEVICE_ID_MCP04_37:
+ case DEVICE_ID_MCP04_38:
+ dev_param_p->msi = B_FALSE;
+ dev_param_p->msi_x = B_FALSE;
+ dev_param_p->vlan = B_FALSE;
+ dev_param_p->tx_pause_frame = B_FALSE;
+ dev_param_p->rx_pause_frame = B_FALSE;
+ dev_param_p->jumbo = B_FALSE;
+ dev_param_p->tx_rx_64byte = B_FALSE;
+ dev_param_p->rx_hw_checksum = B_FALSE;
+ dev_param_p->tx_hw_checksum = 0;
+ dev_param_p->desc_type = DESC_OFFLOAD;
+ dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
+ dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
+ dev_param_p->nge_split = NGE_SPLIT_32;
+ break;
+
+ case DEVICE_ID_CK804_56:
+ case DEVICE_ID_CK804_57:
+ dev_param_p->msi = B_TRUE;
+ dev_param_p->msi_x = B_TRUE;
+ dev_param_p->vlan = B_FALSE;
+ dev_param_p->tx_pause_frame = B_FALSE;
+ dev_param_p->rx_pause_frame = B_TRUE;
+ dev_param_p->jumbo = B_TRUE;
+ dev_param_p->tx_rx_64byte = B_FALSE;
+ dev_param_p->rx_hw_checksum = B_TRUE;
+ dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM;
+ dev_param_p->desc_type = DESC_HOT;
+ dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072;
+ dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072;
+ dev_param_p->nge_split = NGE_SPLIT_96;
+ break;
+
+ case DEVICE_ID_MCP51_268:
+ case DEVICE_ID_MCP51_269:
+ dev_param_p->msi = B_FALSE;
+ dev_param_p->msi_x = B_FALSE;
+ dev_param_p->vlan = B_FALSE;
+ dev_param_p->tx_pause_frame = B_FALSE;
+ dev_param_p->rx_pause_frame = B_FALSE;
+ dev_param_p->jumbo = B_FALSE;
+ dev_param_p->tx_rx_64byte = B_TRUE;
+ dev_param_p->rx_hw_checksum = B_FALSE;
+ dev_param_p->tx_hw_checksum = 0;
+ dev_param_p->desc_type = DESC_OFFLOAD;
+ dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
+ dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
+ dev_param_p->nge_split = NGE_SPLIT_32;
+ break;
+
+ case DEVICE_ID_MCP55_372:
+ case DEVICE_ID_MCP55_373:
+ dev_param_p->msi = B_TRUE;
+ dev_param_p->msi_x = B_TRUE;
+ dev_param_p->vlan = B_TRUE;
+ dev_param_p->tx_pause_frame = B_TRUE;
+ dev_param_p->rx_pause_frame = B_TRUE;
+ dev_param_p->jumbo = B_TRUE;
+ dev_param_p->tx_rx_64byte = B_TRUE;
+ dev_param_p->rx_hw_checksum = B_TRUE;
+ dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM;
+ dev_param_p->desc_type = DESC_HOT;
+ dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072;
+ dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072;
+ dev_param_p->nge_split = NGE_SPLIT_96;
+ break;
+
+ default:
+ dev_param_p->msi = B_FALSE;
+ dev_param_p->msi_x = B_FALSE;
+ dev_param_p->vlan = B_FALSE;
+ dev_param_p->tx_pause_frame = B_FALSE;
+ dev_param_p->rx_pause_frame = B_FALSE;
+ dev_param_p->jumbo = B_FALSE;
+ dev_param_p->tx_rx_64byte = B_FALSE;
+ dev_param_p->rx_hw_checksum = B_FALSE;
+ dev_param_p->tx_hw_checksum = 0;
+ dev_param_p->desc_type = DESC_OFFLOAD;
+ dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
+ dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
+ dev_param_p->nge_split = NGE_SPLIT_32;
+ return;
+ }
+}
+/*
+ * Perform first-stage chip (re-)initialisation, using only config-space
+ * accesses:
+ *
+ * + Read the vendor/device/revision/subsystem/cache-line-size registers,
+ * returning the data in the structure pointed to by <infop>.
+ */
+void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset);
+#pragma no_inline(nge_chip_cfg_init)
+
+void
+nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset)
+{
+ uint16_t command;
+ ddi_acc_handle_t handle;
+ nge_interbus_conf interbus_conf;
+ nge_msi_mask_conf msi_mask_conf;
+ nge_msi_map_cap_conf cap_conf;
+
+ NGE_TRACE(("nge_chip_cfg_init($%p, $%p, %d)",
+ (void *)ngep, (void *)infop, reset));
+
+ /*
+ * save PCI cache line size and subsystem vendor ID
+ *
+ * Read all the config-space registers that characterise the
+ * chip, specifically vendor/device/revision/subsystem vendor
+ * and subsystem device id. We expect (but don't check) that
+ */
+ handle = ngep->cfg_handle;
+ /* reading the vendor information once */
+ if (reset == B_FALSE) {
+ infop->command = pci_config_get16(handle,
+ PCI_CONF_COMM);
+ infop->vendor = pci_config_get16(handle,
+ PCI_CONF_VENID);
+ infop->device = pci_config_get16(handle,
+ PCI_CONF_DEVID);
+ infop->subven = pci_config_get16(handle,
+ PCI_CONF_SUBVENID);
+ infop->subdev = pci_config_get16(handle,
+ PCI_CONF_SUBSYSID);
+ infop->class_code = pci_config_get8(handle,
+ PCI_CONF_BASCLASS);
+ infop->revision = pci_config_get8(handle,
+ PCI_CONF_REVID);
+ infop->clsize = pci_config_get8(handle,
+ PCI_CONF_CACHE_LINESZ);
+ infop->latency = pci_config_get8(handle,
+ PCI_CONF_LATENCY_TIMER);
+ }
+ if (nge_enable_msi) {
+ /* Disable the hidden for MSI support */
+ interbus_conf.conf_val = pci_config_get32(handle,
+ PCI_CONF_HT_INTERNAL);
+ if ((infop->device == DEVICE_ID_MCP55_373) ||
+ (infop->device == DEVICE_ID_MCP55_372))
+ interbus_conf.conf_bits.msix_off = NGE_SET;
+ interbus_conf.conf_bits.msi_off = NGE_CLEAR;
+ pci_config_put32(handle, PCI_CONF_HT_INTERNAL,
+ interbus_conf.conf_val);
+
+ if ((infop->device == DEVICE_ID_MCP55_373) ||
+ (infop->device == DEVICE_ID_MCP55_372)) {
+
+ /* Disable the vector off for mcp55 */
+ msi_mask_conf.msi_mask_conf_val =
+ pci_config_get32(handle, PCI_CONF_HT_MSI_MASK);
+ msi_mask_conf.msi_mask_bits.vec0_off = NGE_CLEAR;
+ msi_mask_conf.msi_mask_bits.vec1_off = NGE_CLEAR;
+ msi_mask_conf.msi_mask_bits.vec2_off = NGE_CLEAR;
+ msi_mask_conf.msi_mask_bits.vec3_off = NGE_CLEAR;
+ msi_mask_conf.msi_mask_bits.vec4_off = NGE_CLEAR;
+ msi_mask_conf.msi_mask_bits.vec5_off = NGE_CLEAR;
+ msi_mask_conf.msi_mask_bits.vec6_off = NGE_CLEAR;
+ msi_mask_conf.msi_mask_bits.vec7_off = NGE_CLEAR;
+ pci_config_put32(handle, PCI_CONF_HT_MSI_MASK,
+ msi_mask_conf.msi_mask_conf_val);
+
+ /* Enable the MSI mapping */
+ cap_conf.msi_map_cap_conf_val =
+ pci_config_get32(handle, PCI_CONF_HT_MSI_MAP_CAP);
+ cap_conf.map_cap_conf_bits.map_en = NGE_SET;
+ pci_config_put32(handle, PCI_CONF_HT_MSI_MAP_CAP,
+ cap_conf.msi_map_cap_conf_val);
+ }
+ } else {
+ interbus_conf.conf_val = pci_config_get32(handle,
+ PCI_CONF_HT_INTERNAL);
+ interbus_conf.conf_bits.msi_off = NGE_SET;
+ pci_config_put32(handle, PCI_CONF_HT_INTERNAL,
+ interbus_conf.conf_val);
+ }
+ command = infop->command | PCI_COMM_MAE;
+ command &= ~PCI_COMM_MEMWR_INVAL;
+ command |= PCI_COMM_ME;
+ pci_config_put16(handle, PCI_CONF_COMM, command);
+ pci_config_put16(handle, PCI_CONF_STAT, ~0);
+
+}
+
+int
+nge_chip_stop(nge_t *ngep, boolean_t fault)
+{
+ int err;
+ uint32_t reg_val;
+ uint32_t tries;
+ nge_intr_src intr_src;
+ nge_mintr_src mintr_src;
+ nge_mii_cs mii_cs;
+ nge_rx_poll rx_poll;
+ nge_tx_poll tx_poll;
+ nge_rx_en rx_en;
+ nge_tx_en tx_en;
+ nge_tx_sta tx_sta;
+ nge_rx_sta rx_sta;
+ nge_mode_cntl mode;
+ nge_pmu_cntl2 pmu_cntl2;
+
+ NGE_TRACE(("nge_chip_stop($%p, %d)", (void *)ngep, fault));
+
+ err = DDI_SUCCESS;
+ /* Clear all pending interrupts */
+ intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC);
+ nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val);
+ mintr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC);
+ nge_reg_put8(ngep, NGE_MINTR_SRC, mintr_src.src_val);
+
+ /* Mask all interrupts */
+ reg_val = nge_reg_get32(ngep, NGE_INTR_MASK);
+ reg_val &= ~NGE_INTR_ALL_EN;
+ nge_reg_put32(ngep, NGE_INTR_MASK, reg_val);
+
+ /* Disable auto-polling of phy */
+ mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS);
+ mii_cs.cs_bits.ap_en = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val);
+
+ /* Reset buffer management & DMA */
+ mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
+ mode.mode_bits.bm_reset = NGE_SET;
+ mode.mode_bits.dma_dis = NGE_SET;
+ mode.mode_bits.desc_type = ngep->desc_mode;
+ nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val);
+
+ drv_usecwait(50000);
+
+ /* Restore buffer management */
+ mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
+ mode.mode_bits.bm_reset = NGE_CLEAR;
+ mode.mode_bits.tx_rcom_en = NGE_SET;
+ nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val);
+
+ nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val);
+ for (tries = 0; tries < 5000; tries++) {
+ drv_usecwait(10);
+ mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
+ if (mode.mode_bits.dma_status == NGE_SET)
+ break;
+ }
+ if (tries == 5000) {
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * For mcp55, the bits 1:31 of NGE_RX_EN and NGE_TX_EN are
+ * defined to be used by SMU. The newer PXE than 527 began to
+ * support SMU and bit 24 of NGE_RX_EN/NGE_TX_EN are set
+ * when leaving PXE to prevents the MAC from winning
+ * arbitration to the main transmit/receive channels.
+ */
+ if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
+
+ /* Disable rx's machine */
+ nge_reg_put32(ngep, NGE_RX_EN, 0x0);
+
+ /* Disable tx's machine */
+ nge_reg_put32(ngep, NGE_TX_EN, 0x0);
+ } else {
+
+ /* Disable rx's machine */
+ rx_en.val = nge_reg_get8(ngep, NGE_RX_EN);
+ rx_en.bits.rx_en = NGE_CLEAR;
+ nge_reg_put8(ngep, NGE_RX_EN, rx_en.val);
+
+
+ /* Disable tx's machine */
+ tx_en.val = nge_reg_get8(ngep, NGE_TX_EN);
+ tx_en.bits.tx_en = NGE_CLEAR;
+ nge_reg_put8(ngep, NGE_TX_EN, tx_en.val);
+ }
+
+ /* Disable auto-poll of rx's state machine */
+ rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL);
+ rx_poll.poll_bits.rpen = NGE_CLEAR;
+ rx_poll.poll_bits.rpi = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val);
+
+ /* Disable auto-polling of tx's state machine */
+ tx_poll.poll_val = nge_reg_get32(ngep, NGE_TX_POLL);
+ tx_poll.poll_bits.tpen = NGE_CLEAR;
+ tx_poll.poll_bits.tpi = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_TX_POLL, tx_poll.poll_val);
+
+
+ /*
+ * Clean the status of tx's state machine
+ * and Make assure the tx's channel is idle
+ */
+ tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA);
+ for (tries = 0; tries < 1000; tries++) {
+ if (tx_sta.sta_bits.tx_chan_sta == NGE_CLEAR)
+ break;
+ drv_usecwait(10);
+ tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA);
+ }
+ if (tries == 1000) {
+ return (DDI_FAILURE);
+ }
+ nge_reg_put32(ngep, NGE_TX_STA, tx_sta.sta_val);
+
+ /*
+ * Clean the status of rx's state machine
+ * and Make assure the tx's channel is idle
+ */
+ rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA);
+ for (tries = 0; tries < 1000; tries++) {
+ if (rx_sta.sta_bits.rx_chan_sta == NGE_CLEAR)
+ break;
+ drv_usecwait(10);
+ rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA);
+ }
+ if (tries == 1000) {
+ return (DDI_FAILURE);
+ }
+ nge_reg_put32(ngep, NGE_RX_STA, rx_sta.sta_val);
+
+ if (ngep->chipinfo.device == DEVICE_ID_MCP51_269 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP51_268) {
+
+ nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 0);
+ nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 0);
+
+ pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2);
+ pmu_cntl2.cntl2_bits.cidle_timer = NGE_CLEAR;
+ pmu_cntl2.cntl2_bits.didle_timer = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val);
+ }
+ if (fault)
+ ngep->nge_chip_state = NGE_CHIP_FAULT;
+ else
+ ngep->nge_chip_state = NGE_CHIP_STOPPED;
+
+ return (err);
+}
+
+static void
+nge_rx_setup(nge_t *ngep)
+{
+ uint64_t desc_addr;
+ nge_rxtx_dlen dlen;
+ nge_rx_poll rx_poll;
+
+ /*
+ * Filling the address and length of rx's descriptors
+ */
+ desc_addr = ngep->recv->desc.cookie.dmac_laddress;
+ nge_reg_put32(ngep, NGE_RX_DADR, desc_addr);
+ nge_reg_put32(ngep, NGE_RX_DADR_HI, desc_addr >> 32);
+ dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN);
+ dlen.dlen_bits.rdlen = ngep->recv->desc.nslots - 1;
+ nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val);
+
+ rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL);
+ rx_poll.poll_bits.rpi = RX_POLL_INTV_1G;
+ rx_poll.poll_bits.rpen = NGE_SET;
+ nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val);
+}
+
+static void
+nge_tx_setup(nge_t *ngep)
+{
+ uint64_t desc_addr;
+ nge_rxtx_dlen dlen;
+
+ /*
+ * Filling the address and length of tx's descriptors
+ */
+ desc_addr = ngep->send->desc.cookie.dmac_laddress;
+ nge_reg_put32(ngep, NGE_TX_DADR, desc_addr);
+ nge_reg_put32(ngep, NGE_TX_DADR_HI, desc_addr >> 32);
+ dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN);
+ dlen.dlen_bits.tdlen = ngep->send->desc.nslots - 1;
+ nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val);
+}
+
+static int
+nge_buff_setup(nge_t *ngep)
+{
+ nge_mode_cntl mode_cntl;
+ nge_dev_spec_param_t *dev_param_p;
+
+ dev_param_p = &ngep->dev_spec_param;
+
+ /*
+ * Configure Rx&Tx's buffer
+ */
+ nge_rx_setup(ngep);
+ nge_tx_setup(ngep);
+
+ /*
+ * Configure buffer attribute
+ */
+ mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
+
+ /*
+ * Enable Dma access request
+ */
+ mode_cntl.mode_bits.dma_dis = NGE_CLEAR;
+
+ /*
+ * Enbale Buffer management
+ */
+ mode_cntl.mode_bits.bm_reset = NGE_CLEAR;
+
+ /*
+ * Support Standoffload Descriptor
+ */
+ mode_cntl.mode_bits.desc_type = ngep->desc_mode;
+
+ /*
+ * Support receive hardware checksum
+ */
+ if (dev_param_p->rx_hw_checksum) {
+ mode_cntl.mode_bits.rx_sum_en = NGE_SET;
+ } else
+ mode_cntl.mode_bits.rx_sum_en = NGE_CLEAR;
+
+ /*
+ * Disable Tx PRD coarse update
+ */
+ mode_cntl.mode_bits.tx_prd_cu_en = NGE_CLEAR;
+
+ /*
+ * Disable 64-byte access
+ */
+ mode_cntl.mode_bits.w64_dis = NGE_SET;
+
+ /*
+ * Skip Rx Error Frame is not supported and if
+ * enable it, jumbo frame does not work any more.
+ */
+ mode_cntl.mode_bits.rx_filter_en = NGE_CLEAR;
+
+ /*
+ * Can not support hot mode now
+ */
+ mode_cntl.mode_bits.resv15 = NGE_CLEAR;
+
+ if (dev_param_p->vlan) {
+ /* Disable the vlan strip for devices which support vlan */
+ mode_cntl.mode_bits.vlan_strip = NGE_CLEAR;
+
+ /* Disable the vlan insert for devices which supprot vlan */
+ mode_cntl.mode_bits.vlan_ins = NGE_CLEAR;
+ }
+
+ if (dev_param_p->tx_rx_64byte) {
+
+ /* Set the maximum TX PRD fetch size to 64 bytes */
+ mode_cntl.mode_bits.tx_fetch_prd = NGE_SET;
+
+ /* Set the maximum RX PRD fetch size to 64 bytes */
+ mode_cntl.mode_bits.rx_fetch_prd = NGE_SET;
+ }
+ /*
+ * Upload Rx data as it arrives, rather than waiting for full frame
+ */
+ mode_cntl.mode_bits.resv16 = NGE_CLEAR;
+
+ /*
+ * Normal HOT table accesses
+ */
+ mode_cntl.mode_bits.resv17 = NGE_CLEAR;
+
+ /*
+ * Normal HOT buffer requesting
+ */
+ mode_cntl.mode_bits.resv18 = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
+
+ /*
+ * Signal controller to check for new Rx descriptors
+ */
+ mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
+ mode_cntl.mode_bits.rxdm = NGE_SET;
+ mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
+ nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
+
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * When chipset resets, the chipset can not restore the orignial
+ * mac address to the mac address registers.
+ *
+ * When the driver is dettached, the function will write the orignial
+ * mac address to the mac address registers.
+ */
+
+void
+nge_restore_mac_addr(nge_t *ngep)
+{
+ uint32_t mac_addr;
+
+ mac_addr = (uint32_t)ngep->chipinfo.hw_mac_addr;
+ nge_reg_put32(ngep, NGE_UNI_ADDR0, mac_addr);
+ mac_addr = (uint32_t)(ngep->chipinfo.hw_mac_addr >> 32);
+ nge_reg_put32(ngep, NGE_UNI_ADDR1, mac_addr);
+}
+
+int
+nge_chip_reset(nge_t *ngep)
+{
+ int err;
+ uint8_t i;
+ uint32_t regno;
+ uint64_t mac;
+ nge_uni_addr1 uaddr1;
+ nge_mul_addr1 maddr1;
+ nge_cp_cntl ee_cntl;
+ nge_soft_misc soft_misc;
+ nge_pmu_cntl0 pmu_cntl0;
+ nge_pmu_cntl2 pmu_cntl2;
+ nge_pm_cntl2 pm_cntl2;
+ const nge_ksindex_t *ksip;
+ nge_sw_statistics_t *sw_stp;
+ sw_stp = &ngep->statistics.sw_statistics;
+
+ NGE_TRACE(("nge_chip_reset($%p)", (void *)ngep));
+
+ /*
+ * Clear the statistics by reading the statistics register
+ */
+ for (ksip = nge_statistics; ksip->name != NULL; ++ksip) {
+ regno = KS_BASE + ksip->index * sizeof (uint32_t);
+ (void) nge_reg_get32(ngep, regno);
+ }
+ /* Clear the software statistics */
+ sw_stp->recv_count = 0;
+ sw_stp->xmit_count = 0;
+ sw_stp->rbytes = 0;
+ sw_stp->obytes = 0;
+
+ /*
+ * Clear the Multicast mac address table
+ */
+ nge_reg_put32(ngep, NGE_MUL_ADDR0, 0);
+ maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1);
+ maddr1.addr_bits.addr = 0;
+ nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val);
+
+ /*
+ * Setup seeprom control
+ */
+ ee_cntl.cntl_val = nge_reg_get32(ngep, NGE_EP_CNTL);
+ ee_cntl.cntl_bits.clkdiv = EEPROM_CLKDIV;
+ ee_cntl.cntl_bits.rom_size = EEPROM_32K;
+ ee_cntl.cntl_bits.word_wid = ACCESS_16BIT;
+ ee_cntl.cntl_bits.wait_slots = EEPROM_WAITCLK;
+ nge_reg_put32(ngep, NGE_EP_CNTL, ee_cntl.cntl_val);
+
+ /*
+ * Reading the unicast mac address table
+ */
+ if (ngep->nge_chip_state == NGE_CHIP_INITIAL) {
+ uaddr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1);
+ mac = uaddr1.addr_bits.addr;
+ mac <<= 32;
+ mac |= nge_reg_get32(ngep, NGE_UNI_ADDR0);
+ if (mac != 0ULL && mac != ~0ULL) {
+ ngep->chipinfo.hw_mac_addr = mac;
+ for (i = ETHERADDRL; i-- != 0; ) {
+ ngep->chipinfo.vendor_addr.addr[i] =
+ (uchar_t)mac;
+ ngep->cur_uni_addr.addr[i] = (uchar_t)mac;
+ mac >>= 8;
+ }
+ ngep->chipinfo.vendor_addr.set = 1;
+ }
+ }
+ pci_config_put8(ngep->cfg_handle, PCI_CONF_CACHE_LINESZ,
+ ngep->chipinfo.clsize);
+ pci_config_put8(ngep->cfg_handle, PCI_CONF_LATENCY_TIMER,
+ ngep->chipinfo.latency);
+
+ /*
+ * Stop the chipset and clear buffer management
+ */
+ err = nge_chip_stop(ngep, B_FALSE);
+ if (err == DDI_FAILURE)
+ return (err);
+ if (ngep->chipinfo.device == DEVICE_ID_MCP51_269 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP51_268) {
+
+ /* Program software misc register */
+ soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC);
+ soft_misc.misc_bits.rx_clk_vx_rst = NGE_SET;
+ soft_misc.misc_bits.tx_clk_vx_rst = NGE_SET;
+ soft_misc.misc_bits.clk12m_vx_rst = NGE_SET;
+ soft_misc.misc_bits.fpci_clk_vx_rst = NGE_SET;
+ soft_misc.misc_bits.rx_clk_vc_rst = NGE_SET;
+ soft_misc.misc_bits.tx_clk_vc_rst = NGE_SET;
+ soft_misc.misc_bits.fs_clk_vc_rst = NGE_SET;
+ soft_misc.misc_bits.rst_ex_m2pintf = NGE_SET;
+ nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val);
+
+ /* wait for 4 us */
+ drv_usecwait(4);
+
+ soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC);
+ soft_misc.misc_bits.rx_clk_vx_rst = NGE_CLEAR;
+ soft_misc.misc_bits.tx_clk_vx_rst = NGE_CLEAR;
+ soft_misc.misc_bits.clk12m_vx_rst = NGE_CLEAR;
+ soft_misc.misc_bits.fpci_clk_vx_rst = NGE_CLEAR;
+ soft_misc.misc_bits.rx_clk_vc_rst = NGE_CLEAR;
+ soft_misc.misc_bits.tx_clk_vc_rst = NGE_CLEAR;
+ soft_misc.misc_bits.fs_clk_vc_rst = NGE_CLEAR;
+ soft_misc.misc_bits.rst_ex_m2pintf = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val);
+
+ /* Program PMU registers */
+ pmu_cntl0.cntl0_val = nge_reg_get32(ngep, NGE_PMU_CNTL0);
+ pmu_cntl0.cntl0_bits.core_spd10_fp =
+ NGE_PMU_CORE_SPD10_BUSY;
+ pmu_cntl0.cntl0_bits.core_spd10_idle =
+ NGE_PMU_CORE_SPD10_IDLE;
+ pmu_cntl0.cntl0_bits.core_spd100_fp =
+ NGE_PMU_CORE_SPD100_BUSY;
+ pmu_cntl0.cntl0_bits.core_spd100_idle =
+ NGE_PMU_CORE_SPD100_IDLE;
+ pmu_cntl0.cntl0_bits.core_spd1000_fp =
+ NGE_PMU_CORE_SPD1000_BUSY;
+ pmu_cntl0.cntl0_bits.core_spd1000_idle =
+ NGE_PMU_CORE_SPD100_IDLE;
+ pmu_cntl0.cntl0_bits.core_spd10_idle =
+ NGE_PMU_CORE_SPD10_IDLE;
+ nge_reg_put32(ngep, NGE_PMU_CNTL0, pmu_cntl0.cntl0_val);
+
+ /* Set the core idle limit value */
+ nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT,
+ NGE_PMU_CIDLE_LIMIT_DEF);
+
+ /* Set the device idle limit value */
+ nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT,
+ NGE_PMU_DIDLE_LIMIT_DEF);
+
+ /* Enable the core/device idle timer in PMU control 2 */
+ pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2);
+ pmu_cntl2.cntl2_bits.cidle_timer = NGE_SET;
+ pmu_cntl2.cntl2_bits.didle_timer = NGE_SET;
+ pmu_cntl2.cntl2_bits.core_enable = NGE_SET;
+ pmu_cntl2.cntl2_bits.dev_enable = NGE_SET;
+ nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val);
+ }
+
+ /*
+ * Clear the power state bits for phy since interface no longer
+ * works after rebooting from Windows on a multi-boot machine
+ */
+ if (ngep->chipinfo.device == DEVICE_ID_MCP51_268 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP51_269 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP55_372 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP61_3EE ||
+ ngep->chipinfo.device == DEVICE_ID_MCP61_3EF) {
+
+ pm_cntl2.cntl_val = nge_reg_get32(ngep, NGE_PM_CNTL2);
+ /* bring phy out of coma mode */
+ pm_cntl2.cntl_bits.phy_coma_set = NGE_CLEAR;
+ /* disable auto reset coma bits */
+ pm_cntl2.cntl_bits.resv4 = NGE_CLEAR;
+ /* restore power to gated clocks */
+ pm_cntl2.cntl_bits.resv8_11 = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_PM_CNTL2, pm_cntl2.cntl_val);
+ }
+
+ /*
+ * Reset the external phy
+ */
+ (void) nge_phy_reset(ngep);
+ ngep->nge_chip_state = NGE_CHIP_RESET;
+ return (DDI_SUCCESS);
+}
+
+int
+nge_chip_start(nge_t *ngep)
+{
+ int err;
+ nge_itc itc;
+ nge_tx_cntl tx_cntl;
+ nge_rx_cntrl0 rx_cntl0;
+ nge_rx_cntl1 rx_cntl1;
+ nge_tx_en tx_en;
+ nge_rx_en rx_en;
+ nge_mii_cs mii_cs;
+ nge_swtr_cntl swtr_cntl;
+ nge_rx_fifo_wm rx_fifo;
+ nge_intr_mask intr_mask;
+ nge_mintr_mask mintr_mask;
+ nge_dev_spec_param_t *dev_param_p;
+
+ NGE_TRACE(("nge_chip_start($%p)", (void *)ngep));
+
+ /*
+ * Setup buffer management
+ */
+ err = nge_buff_setup(ngep);
+ if (err == DDI_FAILURE)
+ return (err);
+
+ dev_param_p = &ngep->dev_spec_param;
+
+ /*
+ * Enable polling attribute
+ */
+ mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS);
+ mii_cs.cs_bits.ap_paddr = ngep->phy_xmii_addr;
+ mii_cs.cs_bits.ap_en = NGE_SET;
+ mii_cs.cs_bits.ap_intv = MII_POLL_INTV;
+ nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val);
+
+ /*
+ * Setup link
+ */
+ (*ngep->physops->phys_update)(ngep);
+
+ /*
+ * Configure the tx's parameters
+ */
+ tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL);
+ if (dev_param_p->tx_pause_frame)
+ tx_cntl.cntl_bits.paen = NGE_SET;
+ else
+ tx_cntl.cntl_bits.paen = NGE_CLEAR;
+ tx_cntl.cntl_bits.retry_en = NGE_SET;
+ tx_cntl.cntl_bits.pad_en = NGE_SET;
+ tx_cntl.cntl_bits.fappend_en = NGE_SET;
+ tx_cntl.cntl_bits.two_def_en = NGE_SET;
+ tx_cntl.cntl_bits.max_retry = 15;
+ tx_cntl.cntl_bits.burst_en = NGE_CLEAR;
+ tx_cntl.cntl_bits.uflo_err_mask = NGE_CLEAR;
+ tx_cntl.cntl_bits.tlcol_mask = NGE_CLEAR;
+ tx_cntl.cntl_bits.lcar_mask = NGE_CLEAR;
+ tx_cntl.cntl_bits.def_mask = NGE_CLEAR;
+ tx_cntl.cntl_bits.exdef_mask = NGE_SET;
+ tx_cntl.cntl_bits.lcar_mask = NGE_SET;
+ tx_cntl.cntl_bits.tlcol_mask = NGE_SET;
+ tx_cntl.cntl_bits.uflo_err_mask = NGE_SET;
+ tx_cntl.cntl_bits.jam_seq_en = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val);
+
+
+ /*
+ * Configure the parameters of Rx's state machine
+ * Enabe the parameters:
+ * 1). Pad Strip
+ * 2). FCS Relay
+ * 3). Pause
+ * 4). Address filter
+ * 5). Runt Packet receive
+ * 6). Broadcast
+ * 7). Receive Deferral
+ *
+ * Disable the following parameters for decreasing
+ * the number of interrupts:
+ * 1). Runt Inerrupt.
+ * 2). Rx's Late Collision interrupt.
+ * 3). Rx's Max length Error Interrupt.
+ * 4). Rx's Length Field error Interrupt.
+ * 5). Rx's FCS error interrupt.
+ * 6). Rx's overflow error interrupt.
+ * 7). Rx's Frame alignment error interrupt.
+ */
+ rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
+ rx_cntl0.cntl_bits.padsen = NGE_CLEAR;
+ rx_cntl0.cntl_bits.fcsren = NGE_CLEAR;
+ if (dev_param_p->rx_pause_frame)
+ rx_cntl0.cntl_bits.paen = NGE_SET;
+ else
+ rx_cntl0.cntl_bits.paen = NGE_CLEAR;
+ rx_cntl0.cntl_bits.lben = NGE_CLEAR;
+ rx_cntl0.cntl_bits.afen = NGE_SET;
+ rx_cntl0.cntl_bits.runten = NGE_CLEAR;
+ rx_cntl0.cntl_bits.brdis = NGE_CLEAR;
+ rx_cntl0.cntl_bits.rdfen = NGE_CLEAR;
+ rx_cntl0.cntl_bits.runtm = NGE_CLEAR;
+ rx_cntl0.cntl_bits.slfb = NGE_CLEAR;
+ rx_cntl0.cntl_bits.rlcolm = NGE_CLEAR;
+ rx_cntl0.cntl_bits.maxerm = NGE_CLEAR;
+ rx_cntl0.cntl_bits.lferm = NGE_CLEAR;
+ rx_cntl0.cntl_bits.crcm = NGE_CLEAR;
+ rx_cntl0.cntl_bits.ofolm = NGE_CLEAR;
+ rx_cntl0.cntl_bits.framerm = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val);
+
+ /*
+ * Configure the watermark for the rx's statemachine
+ */
+ rx_fifo.wm_val = nge_reg_get32(ngep, NGE_RX_FIFO_WM);
+ rx_fifo.wm_bits.data_hwm = ngep->rx_datahwm;
+ rx_fifo.wm_bits.prd_lwm = ngep->rx_prdlwm;
+ rx_fifo.wm_bits.prd_hwm = ngep->rx_prdhwm;
+ nge_reg_put32(ngep, NGE_RX_FIFO_WM, rx_fifo.wm_val);
+
+ /*
+ * Configure the deffer time slot for rx's state machine
+ */
+ nge_reg_put8(ngep, NGE_RX_DEf, ngep->rx_def);
+
+ /*
+ * Configure the length of rx's packet
+ */
+ rx_cntl1.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL1);
+ rx_cntl1.cntl_bits.length = ngep->max_sdu;
+ nge_reg_put32(ngep, NGE_RX_CNTL1, rx_cntl1.cntl_val);
+ /*
+ * Enable Tx's state machine
+ */
+ tx_en.val = nge_reg_get8(ngep, NGE_TX_EN);
+ tx_en.bits.tx_en = NGE_SET;
+ nge_reg_put8(ngep, NGE_TX_EN, tx_en.val);
+
+ /*
+ * Enable Rx's state machine
+ */
+ rx_en.val = nge_reg_get8(ngep, NGE_RX_EN);
+ rx_en.bits.rx_en = NGE_SET;
+ nge_reg_put8(ngep, NGE_RX_EN, rx_en.val);
+
+ itc.itc_val = nge_reg_get32(ngep, NGE_SWTR_ITC);
+ itc.itc_bits.sw_intv = ngep->sw_intr_intv;
+ nge_reg_put32(ngep, NGE_SWTR_ITC, itc.itc_val);
+
+ swtr_cntl.ctrl_val = nge_reg_get8(ngep, NGE_SWTR_CNTL);
+ swtr_cntl.cntl_bits.sten = NGE_SET;
+ swtr_cntl.cntl_bits.stren = NGE_SET;
+ nge_reg_put32(ngep, NGE_SWTR_CNTL, swtr_cntl.ctrl_val);
+
+ /*
+ * Disable all mii read/write operation Interrupt
+ */
+ mintr_mask.mask_val = nge_reg_get8(ngep, NGE_MINTR_MASK);
+ mintr_mask.mask_bits.mrei = NGE_CLEAR;
+ mintr_mask.mask_bits.mcc2 = NGE_CLEAR;
+ mintr_mask.mask_bits.mcc1 = NGE_CLEAR;
+ mintr_mask.mask_bits.mapi = NGE_SET;
+ mintr_mask.mask_bits.mpdi = NGE_SET;
+ nge_reg_put8(ngep, NGE_MINTR_MASK, mintr_mask.mask_val);
+
+ /*
+ * Enable all interrupt event
+ */
+ intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
+ intr_mask.mask_bits.reint = NGE_SET;
+ intr_mask.mask_bits.rcint = NGE_SET;
+ intr_mask.mask_bits.miss = NGE_SET;
+ intr_mask.mask_bits.teint = NGE_CLEAR;
+ intr_mask.mask_bits.tcint = NGE_SET;
+ intr_mask.mask_bits.stint = NGE_CLEAR;
+ intr_mask.mask_bits.mint = NGE_CLEAR;
+ intr_mask.mask_bits.rfint = NGE_CLEAR;
+ intr_mask.mask_bits.tfint = NGE_CLEAR;
+ intr_mask.mask_bits.feint = NGE_SET;
+ intr_mask.mask_bits.resv10 = NGE_CLEAR;
+ intr_mask.mask_bits.resv11 = NGE_CLEAR;
+ intr_mask.mask_bits.resv12 = NGE_CLEAR;
+ intr_mask.mask_bits.resv13 = NGE_CLEAR;
+ intr_mask.mask_bits.phyint = NGE_CLEAR;
+ ngep->intr_masks = intr_mask.mask_val;
+ nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
+ ngep->nge_chip_state = NGE_CHIP_RUNNING;
+ return (DDI_SUCCESS);
+}
+
+/*
+ * nge_chip_sync() -- program the chip with the unicast MAC address,
+ * the multicast hash table, the required level of promiscuity.
+ */
+void
+nge_chip_sync(nge_t *ngep)
+{
+ uint8_t i;
+ uint64_t macaddr;
+ uint64_t mul_addr;
+ uint64_t mul_mask;
+ nge_rx_cntrl0 rx_cntl;
+ nge_uni_addr1 uni_adr1;
+
+ NGE_TRACE(("nge_chip_sync($%p)", (void *)ngep));
+
+ macaddr = 0x0ull;
+ mul_addr = 0x0ull;
+ mul_mask = 0x0ull;
+ rx_cntl.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
+
+ if (ngep->promisc) {
+ rx_cntl.cntl_bits.afen = NGE_CLEAR;
+ rx_cntl.cntl_bits.brdis = NGE_SET;
+ } else {
+ rx_cntl.cntl_bits.afen = NGE_SET;
+ rx_cntl.cntl_bits.brdis = NGE_CLEAR;
+ }
+
+ /*
+ * Transform the MAC address from host to chip format, the unicast
+ * MAC address(es) ...
+ */
+ for (i = ETHERADDRL, macaddr = 0ull; i != 0; --i) {
+ macaddr |= ngep->cur_uni_addr.addr[i-1];
+ macaddr <<= (i > 1) ? 8 : 0;
+ }
+
+ nge_reg_put32(ngep, NGE_UNI_ADDR0, (uint32_t)macaddr);
+ macaddr = macaddr >>32;
+ uni_adr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1);
+ uni_adr1.addr_bits.addr = (uint16_t)macaddr;
+ uni_adr1.addr_bits.resv16_31 = (uint16_t)0;
+ nge_reg_put32(ngep, NGE_UNI_ADDR1, uni_adr1.addr_val);
+
+ /*
+ * Reprogram the multicast address table ...
+ */
+ for (i = ETHERADDRL, mul_addr = 0ull; i != 0; --i) {
+ mul_addr |= ngep->cur_mul_addr.addr[i-1];
+ mul_addr <<= (i > 1) ? 8 : 0;
+ mul_mask |= ngep->cur_mul_mask.addr[i-1];
+ mul_mask <<= (i > 1) ? 8 : 0;
+ }
+ nge_reg_put32(ngep, NGE_MUL_ADDR0, (uint32_t)mul_addr);
+ mul_addr >>= 32;
+ nge_reg_put32(ngep, NGE_MUL_ADDR1, mul_addr);
+ nge_reg_put32(ngep, NGE_MUL_MASK, (uint32_t)mul_mask);
+ mul_mask >>= 32;
+ nge_reg_put32(ngep, NGE_MUL_MASK1, mul_mask);
+ /*
+ * Set or clear the PROMISCUOUS mode bit
+ */
+ nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl.cntl_val);
+ /*
+ * For internal PHY loopback, the link will
+ * not be up, so it need to sync mac modes directly.
+ */
+ if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY)
+ nge_sync_mac_modes(ngep);
+}
+
+static void
+nge_chip_err(nge_t *ngep)
+{
+ nge_reg010 reg010_ins;
+ nge_sw_statistics_t *psw_stat;
+ nge_intr_mask intr_mask;
+
+ NGE_TRACE(("nge_chip_err($%p)", (void *)ngep));
+
+ psw_stat = (nge_sw_statistics_t *)&ngep->statistics.sw_statistics;
+ reg010_ins.reg010_val = nge_reg_get32(ngep, NGE_REG010);
+ if (reg010_ins.reg010_bits.resv0)
+ psw_stat->fe_err.tso_err_mss ++;
+
+ if (reg010_ins.reg010_bits.resv1)
+ psw_stat->fe_err.tso_dis ++;
+
+ if (reg010_ins.reg010_bits.resv2)
+ psw_stat->fe_err.tso_err_nosum ++;
+
+ if (reg010_ins.reg010_bits.resv3)
+ psw_stat->fe_err.tso_err_hov ++;
+
+ if (reg010_ins.reg010_bits.resv4)
+ psw_stat->fe_err.tso_err_huf ++;
+
+ if (reg010_ins.reg010_bits.resv5)
+ psw_stat->fe_err.tso_err_l2 ++;
+
+ if (reg010_ins.reg010_bits.resv6)
+ psw_stat->fe_err.tso_err_ip ++;
+
+ if (reg010_ins.reg010_bits.resv7)
+ psw_stat->fe_err.tso_err_l4 ++;
+
+ if (reg010_ins.reg010_bits.resv8)
+ psw_stat->fe_err.tso_err_tcp ++;
+
+ if (reg010_ins.reg010_bits.resv9)
+ psw_stat->fe_err.hsum_err_ip ++;
+
+ if (reg010_ins.reg010_bits.resv10)
+ psw_stat->fe_err.hsum_err_l4 ++;
+
+ if (reg010_ins.reg010_val != 0) {
+
+ /*
+ * Fatal error is triggered by malformed driver commands.
+ * Disable unless debugging.
+ */
+ intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
+ intr_mask.mask_bits.feint = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
+ ngep->intr_masks = intr_mask.mask_val;
+
+ }
+}
+
+static void
+nge_sync_mac_modes(nge_t *ngep)
+{
+ nge_tx_def tx_def;
+ nge_tx_fifo_wm tx_fifo;
+ nge_bkoff_cntl bk_cntl;
+ nge_mac2phy m2p;
+ nge_rx_cntrl0 rx_cntl0;
+ nge_dev_spec_param_t *dev_param_p;
+
+ dev_param_p = &ngep->dev_spec_param;
+
+ tx_def.def_val = nge_reg_get32(ngep, NGE_TX_DEF);
+ m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY);
+ tx_fifo.wm_val = nge_reg_get32(ngep, NGE_TX_FIFO_WM);
+ bk_cntl.cntl_val = nge_reg_get32(ngep, NGE_BKOFF_CNTL);
+ bk_cntl.bkoff_bits.rseed = BKOFF_RSEED;
+ switch (ngep->param_link_speed) {
+ case 10:
+ m2p.m2p_bits.speed = low_speed;
+ tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
+ if (ngep->phy_mode == RGMII_IN) {
+ tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100;
+ tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
+ } else {
+ tx_def.def_bits.if_def = TX_TIFG_MII;
+ tx_def.def_bits.ifg2_def = TX_IFG2_MII;
+ }
+ tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII;
+ bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII;
+ break;
+
+ case 100:
+ m2p.m2p_bits.speed = fast_speed;
+ tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
+ if (ngep->phy_mode == RGMII_IN) {
+ tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100;
+ tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
+ } else {
+ tx_def.def_bits.if_def = TX_TIFG_MII;
+ tx_def.def_bits.ifg2_def = TX_IFG2_MII;
+ }
+ tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII;
+ bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII;
+ break;
+
+ case 1000:
+ m2p.m2p_bits.speed = giga_speed;
+ tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
+ if (ngep->param_link_duplex == LINK_DUPLEX_FULL) {
+ tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000;
+ tx_def.def_bits.if_def = TX_IFG_RGMII_1000_FD;
+ } else {
+ tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000;
+ tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
+ }
+
+ tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_GMII;
+ bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_GMII;
+ break;
+ }
+
+ if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
+ m2p.m2p_bits.phyintr = NGE_CLEAR;
+ m2p.m2p_bits.phyintrlvl = NGE_CLEAR;
+ }
+ if (ngep->param_link_duplex == LINK_DUPLEX_HALF) {
+ m2p.m2p_bits.hdup_en = NGE_SET;
+ }
+ else
+ m2p.m2p_bits.hdup_en = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val);
+ nge_reg_put32(ngep, NGE_TX_DEF, tx_def.def_val);
+
+ tx_fifo.wm_bits.data_lwm = TX_FIFO_DATA_LWM;
+ tx_fifo.wm_bits.prd_lwm = TX_FIFO_PRD_LWM;
+ tx_fifo.wm_bits.uprd_hwm = TX_FIFO_PRD_HWM;
+ tx_fifo.wm_bits.fb_wm = TX_FIFO_TBFW;
+ nge_reg_put32(ngep, NGE_TX_FIFO_WM, tx_fifo.wm_val);
+
+ nge_reg_put32(ngep, NGE_BKOFF_CNTL, bk_cntl.cntl_val);
+
+ rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
+ if (ngep->param_link_rx_pause && dev_param_p->rx_pause_frame)
+ rx_cntl0.cntl_bits.paen = NGE_SET;
+ else
+ rx_cntl0.cntl_bits.paen = NGE_CLEAR;
+ nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val);
+}
+
+/*
+ * Handler for hardware link state change.
+ *
+ * When this routine is called, the hardware link state has changed
+ * and the new state is reflected in the param_* variables. Here
+ * we must update the softstate, reprogram the MAC to match, and
+ * record the change in the log and/or on the console.
+ */
+static void
+nge_factotum_link_handler(nge_t *ngep)
+{
+ /*
+ * Update the s/w link_state
+ */
+ if (ngep->param_link_up)
+ ngep->link_state = LINK_STATE_UP;
+ else
+ ngep->link_state = LINK_STATE_DOWN;
+
+ /*
+ * Reprogram the MAC modes to match
+ */
+ nge_sync_mac_modes(ngep);
+}
+
+static boolean_t
+nge_factotum_link_check(nge_t *ngep)
+{
+ boolean_t lchg;
+ boolean_t check;
+
+ ASSERT(mutex_owned(ngep->genlock));
+
+ (*ngep->physops->phys_check)(ngep);
+ switch (ngep->link_state) {
+ case LINK_STATE_UP:
+ lchg = (ngep->param_link_up == B_FALSE);
+ check = (ngep->param_link_up == B_FALSE);
+ break;
+
+ case LINK_STATE_DOWN:
+ lchg = (ngep->param_link_up == B_TRUE);
+ check = (ngep->param_link_up == B_TRUE);
+ break;
+
+ default:
+ check = B_TRUE;
+ break;
+ }
+
+ /*
+ * If <check> is false, we're sure the link hasn't changed.
+ * If true, however, it's not yet definitive; we have to call
+ * nge_phys_check() to determine whether the link has settled
+ * into a new state yet ... and if it has, then call the link
+ * state change handler.But when the chip is 5700 in Dell 6650
+ * ,even if check is false, the link may have changed.So we
+ * have to call nge_phys_check() to determine the link state.
+ */
+ if (check)
+ nge_factotum_link_handler(ngep);
+
+ return (lchg);
+}
+
+/*
+ * Factotum routine to check for Tx stall, using the 'watchdog' counter
+ */
+static boolean_t nge_factotum_stall_check(nge_t *ngep);
+
+static boolean_t
+nge_factotum_stall_check(nge_t *ngep)
+{
+ uint32_t dogval;
+ /*
+ * Specific check for Tx stall ...
+ *
+ * The 'watchdog' counter is incremented whenever a packet
+ * is queued, reset to 1 when some (but not all) buffers
+ * are reclaimed, reset to 0 (disabled) when all buffers
+ * are reclaimed, and shifted left here. If it exceeds the
+ * threshold value, the chip is assumed to have stalled and
+ * is put into the ERROR state. The factotum will then reset
+ * it on the next pass.
+ *
+ * All of which should ensure that we don't get into a state
+ * where packets are left pending indefinitely!
+ */
+ dogval = nge_atomic_shl32(&ngep->watchdog, 1);
+ if (dogval < nge_watchdog_count) {
+ ngep->stall_cknum = 0;
+ } else {
+ ngep->stall_cknum++;
+ }
+ if (ngep->stall_cknum < 8) {
+ return (B_FALSE);
+ } else {
+ ngep->stall_cknum = 0;
+ ngep->statistics.sw_statistics.tx_stall++;
+ return (B_TRUE);
+ }
+}
+
+
+
+/*
+ * The factotum is woken up when there's something to do that we'd rather
+ * not do from inside a hardware interrupt handler or high-level cyclic.
+ * Its two main tasks are:
+ * reset & restart the chip after an error
+ * check the link status whenever necessary
+ */
+/* ARGSUSED */
+uint_t
+nge_chip_factotum(caddr_t args1, caddr_t args2)
+{
+ uint_t result;
+ nge_t *ngep;
+ boolean_t err;
+ boolean_t linkchg;
+
+ ngep = (nge_t *)args1;
+
+ NGE_TRACE(("nge_chip_factotum($%p)", (void *)ngep));
+
+ mutex_enter(ngep->softlock);
+ if (ngep->factotum_flag == 0) {
+ mutex_exit(ngep->softlock);
+ return (DDI_INTR_UNCLAIMED);
+ }
+ ngep->factotum_flag = 0;
+ mutex_exit(ngep->softlock);
+ err = B_FALSE;
+ linkchg = B_FALSE;
+ result = DDI_INTR_CLAIMED;
+
+ mutex_enter(ngep->genlock);
+ switch (ngep->nge_chip_state) {
+ default:
+ break;
+
+ case NGE_CHIP_RUNNING:
+ linkchg = nge_factotum_link_check(ngep);
+ err = nge_factotum_stall_check(ngep);
+ break;
+
+ case NGE_CHIP_FAULT:
+ (void) nge_restart(ngep);
+ NGE_REPORT((ngep, "automatic recovery activated"));
+ break;
+ }
+
+ if (err)
+ (void) nge_chip_stop(ngep, B_TRUE);
+ mutex_exit(ngep->genlock);
+
+ /*
+ * If the link state changed, tell the world about it (if
+ * this version of MAC supports link state notification).
+ * Note: can't do this while still holding the mutex.
+ */
+ if (linkchg)
+ mac_link_update(ngep->mh, ngep->link_state);
+
+ return (result);
+
+}
+
+static void
+nge_intr_handle(nge_t *ngep, nge_intr_src *pintr_src)
+{
+ boolean_t brx;
+ boolean_t btx;
+ nge_mintr_src mintr_src;
+
+ brx = B_FALSE;
+ btx = B_FALSE;
+ ngep->statistics.sw_statistics.intr_count++;
+ ngep->statistics.sw_statistics.intr_lval = pintr_src->intr_val;
+ brx = (pintr_src->int_bits.reint | pintr_src->int_bits.miss
+ | pintr_src->int_bits.rcint | pintr_src->int_bits.stint)
+ > 0 ? B_TRUE : B_FALSE;
+ if (pintr_src->int_bits.reint)
+ ngep->statistics.sw_statistics.rx_err++;
+ if (pintr_src->int_bits.miss)
+ ngep->statistics.sw_statistics.rx_nobuffer++;
+
+ if (brx)
+ nge_receive(ngep);
+ btx = (pintr_src->int_bits.teint | pintr_src->int_bits.tcint)
+ > 0 ? B_TRUE : B_FALSE;
+ if (btx)
+ nge_tx_recycle(ngep, B_TRUE);
+ if (pintr_src->int_bits.teint)
+ ngep->statistics.sw_statistics.tx_stop_err++;
+ if (pintr_src->int_bits.stint) {
+ if ((ngep->poll) &&
+ (ngep->recv_count < INTR_HWATER)) {
+ ngep->poll_time++;
+ }
+ if ((ngep->recv_count > POLL_LWATER) &&
+ (!ngep->poll)) {
+ ngep->poll = B_TRUE;
+ }
+
+ if (ngep->poll_time == 10) {
+ ngep->poll = B_FALSE;
+ ngep->poll_time = 0;
+ }
+ ngep->recv_count = 0;
+ }
+ if (pintr_src->int_bits.feint)
+ nge_chip_err(ngep);
+ /* link interrupt, check the link state */
+ if (pintr_src->int_bits.mint) {
+ mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC);
+ nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val);
+ nge_wake_factotum(ngep);
+ }
+}
+
+/*
+ * nge_chip_intr() -- handle chip interrupts
+ */
+/* ARGSUSED */
+uint_t
+nge_chip_intr(caddr_t arg1, caddr_t arg2)
+{
+ nge_t *ngep = (nge_t *)arg1;
+ nge_intr_src intr_src;
+ nge_intr_mask intr_mask;
+
+ mutex_enter(ngep->genlock);
+
+ /*
+ * Check whether chip's says it's asserting #INTA;
+ * if not, don't process or claim the interrupt.
+ */
+ intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC);
+ if (intr_src.intr_val == 0) {
+ mutex_exit(ngep->genlock);
+ return (DDI_INTR_UNCLAIMED);
+ }
+ /*
+ * Ack the interrupt
+ */
+ nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val);
+
+ if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
+ mutex_exit(ngep->genlock);
+ return (DDI_INTR_CLAIMED);
+ }
+ nge_intr_handle(ngep, &intr_src);
+ if (ngep->poll && !ngep->ch_intr_mode) {
+ intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
+ intr_mask.mask_val &= ~(ngep->intr_masks);
+ intr_mask.mask_bits.stint = NGE_SET;
+ nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
+ ngep->ch_intr_mode = B_TRUE;
+ } else if ((ngep->ch_intr_mode) && (!ngep->poll)) {
+ nge_reg_put32(ngep, NGE_INTR_MASK, ngep->intr_masks);
+ ngep->ch_intr_mode = B_FALSE;
+ }
+ mutex_exit(ngep->genlock);
+ return (DDI_INTR_CLAIMED);
+}
+
+static enum ioc_reply
+nge_pp_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp)
+{
+ int err;
+ uint64_t sizemask;
+ uint64_t mem_va;
+ uint64_t maxoff;
+ boolean_t peek;
+ nge_peekpoke_t *ppd;
+ int (*ppfn)(nge_t *ngep, nge_peekpoke_t *ppd);
+
+ switch (cmd) {
+ default:
+ return (IOC_INVAL);
+
+ case NGE_PEEK:
+ peek = B_TRUE;
+ break;
+
+ case NGE_POKE:
+ peek = B_FALSE;
+ break;
+ }
+
+ /*
+ * Validate format of ioctl
+ */
+ if (iocp->ioc_count != sizeof (nge_peekpoke_t))
+ return (IOC_INVAL);
+ if (mp->b_cont == NULL)
+ return (IOC_INVAL);
+ ppd = (nge_peekpoke_t *)mp->b_cont->b_rptr;
+
+ /*
+ * Validate request parameters
+ */
+ switch (ppd->pp_acc_space) {
+ default:
+ return (IOC_INVAL);
+
+ case NGE_PP_SPACE_CFG:
+ /*
+ * Config space
+ */
+ sizemask = 8|4|2|1;
+ mem_va = 0;
+ maxoff = PCI_CONF_HDR_SIZE;
+ ppfn = peek ? nge_chip_peek_cfg : nge_chip_poke_cfg;
+ break;
+
+ case NGE_PP_SPACE_REG:
+ /*
+ * Memory-mapped I/O space
+ */
+ sizemask = 8|4|2|1;
+ mem_va = 0;
+ maxoff = NGE_REG_SIZE;
+ ppfn = peek ? nge_chip_peek_reg : nge_chip_poke_reg;
+ break;
+
+ case NGE_PP_SPACE_MII:
+ sizemask = 4|2|1;
+ mem_va = 0;
+ maxoff = NGE_MII_SIZE;
+ ppfn = peek ? nge_chip_peek_mii : nge_chip_poke_mii;
+ break;
+
+ case NGE_PP_SPACE_SEEPROM:
+ sizemask = 4|2|1;
+ mem_va = 0;
+ maxoff = NGE_SEEROM_SIZE;
+ ppfn = peek ? nge_chip_peek_seeprom : nge_chip_poke_seeprom;
+ break;
+ }
+
+ switch (ppd->pp_acc_size) {
+ default:
+ return (IOC_INVAL);
+
+ case 8:
+ case 4:
+ case 2:
+ case 1:
+ if ((ppd->pp_acc_size & sizemask) == 0)
+ return (IOC_INVAL);
+ break;
+ }
+
+ if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
+ return (IOC_INVAL);
+
+ if (ppd->pp_acc_offset >= maxoff)
+ return (IOC_INVAL);
+
+ if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
+ return (IOC_INVAL);
+
+ /*
+ * All OK - go do it!
+ */
+ ppd->pp_acc_offset += mem_va;
+ if (ppfn)
+ err = (*ppfn)(ngep, ppd);
+ if (err != DDI_SUCCESS)
+ return (IOC_INVAL);
+ return (peek ? IOC_REPLY : IOC_ACK);
+}
+
+static enum ioc_reply nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp,
+ struct iocblk *iocp);
+#pragma no_inline(nge_diag_ioctl)
+
+static enum ioc_reply
+nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp)
+{
+ ASSERT(mutex_owned(ngep->genlock));
+
+ switch (cmd) {
+ default:
+ nge_error(ngep, "nge_diag_ioctl: invalid cmd 0x%x", cmd);
+ return (IOC_INVAL);
+
+ case NGE_DIAG:
+ return (IOC_ACK);
+
+ case NGE_PEEK:
+ case NGE_POKE:
+ return (nge_pp_ioctl(ngep, cmd, mp, iocp));
+
+ case NGE_PHY_RESET:
+ return (IOC_RESTART_ACK);
+
+ case NGE_SOFT_RESET:
+ case NGE_HARD_RESET:
+ return (IOC_ACK);
+ }
+
+ /* NOTREACHED */
+}
+
+enum ioc_reply
+nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
+{
+ int cmd;
+
+ ASSERT(mutex_owned(ngep->genlock));
+
+ cmd = iocp->ioc_cmd;
+
+ switch (cmd) {
+ default:
+ return (IOC_INVAL);
+
+ case NGE_DIAG:
+ case NGE_PEEK:
+ case NGE_POKE:
+ case NGE_PHY_RESET:
+ case NGE_SOFT_RESET:
+ case NGE_HARD_RESET:
+#if NGE_DEBUGGING
+ return (nge_diag_ioctl(ngep, cmd, mp, iocp));
+#else
+ return (IOC_INVAL);
+#endif
+
+ case NGE_MII_READ:
+ case NGE_MII_WRITE:
+ return (IOC_INVAL);
+
+#if NGE_SEE_IO32
+ case NGE_SEE_READ:
+ case NGE_SEE_WRITE:
+ return (IOC_INVAL);
+#endif
+
+#if NGE_FLASH_IO32
+ case NGE_FLASH_READ:
+ case NGE_FLASH_WRITE:
+ return (IOC_INVAL);
+#endif
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_chip.h Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,1619 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#ifndef _SYS_NGE_CHIP_H
+#define _SYS_NGE_CHIP_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "nge.h"
+
+#define VENDOR_ID_NVIDIA 0x10de
+
+#define DEVICE_ID_MCP04_37 0x37
+#define DEVICE_ID_MCP04_38 0x38
+#define DEVICE_ID_CK804_56 0x56
+#define DEVICE_ID_CK804_57 0x57
+#define DEVICE_ID_MCP51_269 0x269
+#define DEVICE_ID_MCP51_268 0x268
+#define DEVICE_ID_MCP55_373 0x373
+#define DEVICE_ID_MCP55_372 0x372
+#define DEVICE_ID_MCP61_3EE 0x3ee
+#define DEVICE_ID_MCP61_3EF 0x3ef
+#define DEVICE_ID_NF3_E6 0xe6
+#define DEVICE_ID_NF3_DF 0xdf
+
+/* Private PCI configuration register for bus config of ck804/mcp55 */
+#define PCI_CONF_HT_INTERNAL 0x4c
+
+typedef union _nge_interbus_conf {
+ uint32_t conf_val;
+ struct {
+ uint32_t unit_id:5;
+ uint32_t resv5_23:19;
+ uint32_t aux_val:3;
+ uint32_t resv27:1;
+ uint32_t msi_off:1;
+ uint32_t msix_off:1; /* mcp55 only */
+ uint32_t resv30_31:2;
+ } conf_bits;
+} nge_interbus_conf;
+
+/* Private PCI configuration register for MSI mask of mcp55 */
+#define PCI_CONF_HT_MSI_MASK 0x60
+
+typedef union _nge_msi_mask_conf {
+ uint32_t msi_mask_conf_val;
+ struct {
+ uint32_t vec0_off:1;
+ uint32_t vec1_off:1;
+ uint32_t vec2_off:1;
+ uint32_t vec3_off:1;
+ uint32_t vec4_off:1;
+ uint32_t vec5_off:1;
+ uint32_t vec6_off:1;
+ uint32_t vec7_off:1;
+ uint32_t resv8_31:24;
+ } msi_mask_bits;
+} nge_msi_mask_conf;
+
+/* Private PCI configuration register for MSI map capability of mcp55 */
+#define PCI_CONF_HT_MSI_MAP_CAP 0x6c
+
+typedef union _nge_msi_map_cap_conf {
+ uint32_t msi_map_cap_conf_val;
+ struct {
+ uint32_t cap_id:8;
+ uint32_t next_ptr:8;
+ uint32_t map_en:1;
+ uint32_t map_fixed:1;
+ uint32_t resv18_26:9;
+ uint32_t cap_type:5;
+ } map_cap_conf_bits;
+} nge_msi_map_cap_conf;
+
+/*
+ * Master interrupt
+ */
+#define NGE_INTR_SRC 0x000
+#define INTR_SRC_ALL 0x00007fff
+typedef union _nge_intr_src {
+ uint32_t intr_val;
+ struct {
+ uint32_t reint:1;
+ uint32_t rcint:1;
+ uint32_t miss:1;
+ uint32_t teint:1;
+ uint32_t tcint:1;
+ uint32_t stint:1;
+ uint32_t mint:1;
+ uint32_t rfint:1;
+ uint32_t tfint:1;
+ uint32_t feint:1;
+ uint32_t resv10:1;
+ uint32_t resv11:1;
+ uint32_t resv12:1;
+ uint32_t resv13:1;
+ uint32_t phyint:1;
+ uint32_t resv15_31:17;
+ } int_bits;
+} nge_intr_src;
+
+/*
+ * Master interrupt Mask
+ */
+#define NGE_INTR_MASK 0x004
+#define NGE_INTR_ALL_EN 0x00007fff
+typedef union _nge_intr_mask {
+ uint32_t mask_val;
+ struct {
+ uint32_t reint:1;
+ uint32_t rcint:1;
+ uint32_t miss:1;
+ uint32_t teint:1;
+ uint32_t tcint:1;
+ uint32_t stint:1;
+ uint32_t mint:1;
+ uint32_t rfint:1;
+ uint32_t tfint:1;
+ uint32_t feint:1;
+ uint32_t resv10:1;
+ uint32_t resv11:1;
+ uint32_t resv12:1;
+ uint32_t resv13:1;
+ uint32_t phyint:1;
+ uint32_t resv15_31:17;
+ } mask_bits;
+} nge_intr_mask;
+
+/*
+ * Software timer control register
+ */
+#define NGE_SWTR_CNTL 0x008
+typedef union _nge_swtr_cntl {
+ uint8_t ctrl_val;
+ struct {
+ uint8_t stren:1;
+ uint8_t sten:1;
+ uint8_t resv2_7:6;
+ } cntl_bits;
+} nge_swtr_cntl;
+
+/*
+ * Software Timer Interval
+ */
+#define NGE_SWTR_ITC 0x00c
+#define POLL_LWATER 0x10
+#define INTR_HWATER 0x5
+#define SWTR_ITC 0x10
+typedef union _nge_itc {
+ uint32_t itc_val;
+ struct {
+ uint32_t sw_intv:16;
+ uint32_t sw_cur_val:16;
+ } itc_bits;
+} nge_itc;
+
+/*
+ * Fatal error register
+ */
+#define NGE_REG010 0x010
+typedef union _nge_reg010 {
+ uint32_t reg010_val;
+ struct {
+ uint32_t resv0:1;
+ uint32_t resv1:1;
+ uint32_t resv2:1;
+ uint32_t resv3:1;
+ uint32_t resv4:1;
+ uint32_t resv5:1;
+ uint32_t resv6:1;
+ uint32_t resv7:1;
+ uint32_t resv8:1;
+ uint32_t resv9:1;
+ uint32_t resv10:1;
+ uint32_t resv11_31:21;
+ } reg010_bits;
+} nge_reg010;
+
+/*
+ * MSI vector map register 0
+ */
+#define NGE_MSI_MAP0 0x020
+typedef union _nge_msi_map0_vec {
+ uint32_t msi_map0_val;
+ struct {
+ uint32_t reint_vec:4;
+ uint32_t rcint_vec:4;
+ uint32_t miss_vec:4;
+ uint32_t teint_vec:4;
+ uint32_t tcint_vec:4;
+ uint32_t stint_vec:4;
+ uint32_t mint_vec:4;
+ uint32_t rfint_vec:4;
+ } vecs_bits;
+} nge_msi_map0_vec;
+
+/*
+ * MSI vector map register 1
+ */
+#define NGE_MSI_MAP1 0x024
+typedef union _nge_msi_map1_vec {
+ uint32_t msi_map1_val;
+ struct {
+ uint32_t tfint_vec:4;
+ uint32_t feint_vec:4;
+ uint32_t resv8_11:4;
+ uint32_t resv12_15:4;
+ uint32_t resv16_19:4;
+ uint32_t resv20_23:4;
+ uint32_t resv24_31:8;
+ } vecs_bits;
+} nge_msi_map1_vec;
+
+
+/*
+ * MSI vector map register 2
+ */
+#define NGE_MSI_MAP2 0x028
+
+/*
+ * MSI vector map register 2
+ */
+#define NGE_MSI_MAP3 0x02c
+
+/*
+ * MSI mask register for mcp55
+ */
+#define NGE_MSI_MASK 0x30
+typedef union _nge_msi_mask {
+ uint32_t msi_mask_val;
+ struct {
+ uint32_t vec0:1;
+ uint32_t vec1:1;
+ uint32_t vec2:1;
+ uint32_t vec3:1;
+ uint32_t vec4:1;
+ uint32_t vec5:1;
+ uint32_t vec6:1;
+ uint32_t vec7:1;
+ uint32_t resv8_31:24;
+ }msi_msk_bits;
+}nge_msi_mask;
+
+/*
+ * Software misc register for mcp51
+ */
+#define NGE_SOFT_MISC 0x034
+typedef union _nge_soft_misc {
+ uint32_t misc_val;
+ struct {
+ uint32_t rx_clk_vx_rst:1;
+ uint32_t tx_clk_vx_rst:1;
+ uint32_t clk12m_vx_rst:1;
+ uint32_t fpci_clk_vx_rst:1;
+ uint32_t rx_clk_vc_rst:1;
+ uint32_t tx_clk_vc_rst:1;
+ uint32_t fs_clk_vc_rst:1;
+ uint32_t rst_ex_m2pintf:1;
+ uint32_t resv8_31:24;
+ } misc_bits;
+} nge_soft_misc;
+
+/*
+ * DMA configuration
+ */
+#define NGE_DMA_CFG 0x040
+typedef union _nge_dma_cfg {
+ uint32_t cfg_val;
+ struct {
+ uint32_t tx_start_pri:3;
+ uint32_t tx_start_pri_flag:1;
+ uint32_t tx_prd_rpri:3;
+ uint32_t tx_prd_rpri_flag:1;
+ uint32_t tx_prd_wpri:3;
+ uint32_t tx_prd_wpri_flag:1;
+ uint32_t rx_start_pri:3;
+ uint32_t rx_start_pri_flag:1;
+ uint32_t rx_prd_rpri:3;
+ uint32_t rx_prd_rpri_flag:1;
+ uint32_t rx_prd_wpri:3;
+ uint32_t rx_prd_wpri_flag:1;
+ uint32_t dma_max_pri:3;
+ uint32_t dma_wrr_disable:1;
+ uint32_t dma_pri_disable:1;
+ } cfg_bits;
+} nge_dma_cfg;
+
+/*
+ * Request DMA configuration
+ */
+#define NGE_DMA_RCFG 0x044
+typedef union _nge_dma_rcfg {
+ uint32_t dma_rcfg_val;
+ struct {
+ uint32_t tx_prd_coh_state:2;
+ uint32_t tx_data_coh_state:2;
+ uint32_t rx_prd_coh_state:2;
+ uint32_t rx_data_coh_state:2;
+ uint32_t max_roffset:5;
+ uint32_t resv13_31:19;
+ } rcfg_bis;
+} nge_dma_rcfg;
+
+/*
+ * Hot DMA configuration
+ */
+#define NGE_DMA_HOT_CFG 0x048
+typedef union _nge_dma_hcfg {
+ uint32_t dma_hcfg_val;
+ struct {
+ uint32_t resv0_3:4;
+ uint32_t noti_wstart_pri:3;
+ uint32_t noti_wstart_pri_flag:1;
+ uint32_t cmd_rstart_pri:3;
+ uint32_t cmd_rstart_pri_flag:1;
+ uint32_t cmd_wstart_pri:3;
+ uint32_t cmd_wstart_pri_flag:1;
+ uint32_t resv16_31:16;
+ } hcfg_bits;
+} nge_dma_hcfg;
+
+/*
+ * PMU control register 0 for mcp51
+ */
+#define NGE_PMU_CNTL0 0x060
+#define NGE_PMU_CORE_SPD10_BUSY 0x8
+#define NGE_PMU_CORE_SPD10_IDLE 0xB
+#define NGE_PMU_CORE_SPD100_BUSY 0x4
+#define NGE_PMU_CORE_SPD100_IDLE 0x7
+#define NGE_PMU_CORE_SPD1000_BUSY 0x0
+#define NGE_PMU_CORE_SPD1000_IDLE 0x3
+
+typedef union _nge_pmu_cntl0 {
+ uint32_t cntl0_val;
+ struct {
+ uint32_t core_spd10_fp:4;
+ uint32_t core_spd10_idle:4;
+ uint32_t core_spd100_fp:4;
+ uint32_t core_spd100_idle:4;
+ uint32_t core_spd1000_fp:4;
+ uint32_t core_spd1000_idle:4;
+ uint32_t core_sts_cur:8;
+ } cntl0_bits;
+} nge_pmu_cntl0;
+
+/*
+ * PMU control register 1 for mcp51
+ */
+#define NGE_PMU_CNTL1 0x064
+typedef union _nge_pmu_cntl1 {
+ uint32_t cntl1_val;
+ struct {
+ uint32_t dev_fp:4;
+ uint32_t dev_idle:4;
+ uint32_t resv8_27:20;
+ uint32_t dev_sts_cur:4;
+ } cntl1_bits;
+} nge_pmu_cntl1;
+
+/*
+ * PMU control register 2 for mcp51
+ */
+#define NGE_PMU_CNTL2 0x068
+typedef union _nge_pmu_cntl2 {
+ uint32_t cntl2_val;
+ struct {
+ uint32_t core_override:4;
+ uint32_t resv4_7:4;
+ uint32_t dev_override:4;
+ uint32_t resv12_15:4;
+ uint32_t core_override_en:1;
+ uint32_t dev_override_en:1;
+ uint32_t core_enable:1;
+ uint32_t dev_enable:1;
+ uint32_t rx_wake_dis:1;
+ uint32_t cidle_timer:1;
+ uint32_t didle_timer:1;
+ uint32_t resv23_31:9;
+ } cntl2_bits;
+} nge_pmu_cntl2;
+
+/*
+ * PMU core idle limit register for mcp51
+ */
+#define NGE_PMU_CIDLE_LIMIT 0x06c
+#define NGE_PMU_CIDLE_LIMIT_DEF 0xffff
+
+/*
+ * PMU device idle limit register for mcp51
+ */
+#define NGE_PMU_DIDLE_LIMIT 0x070
+#define NGE_PMU_DIDLE_LIMIT_DEF 0xffff
+
+/*
+ * PMU core idle count value register for mcp51
+ */
+#define NGE_PMU_CIDLE_COUNT 0x074
+#define NGE_PMU_CIDEL_COUNT_DEF 0xffff
+
+/*
+ * PMU device idle count value register for mcp51
+ */
+#define NGE_PMU_DIDLE_COUNT 0x078
+#define NGE_PMU_DIDEL_COUNT_DEF 0xffff
+
+/*
+ * Transmit control
+ */
+#define NGE_TX_CNTL 0x080
+typedef union _nge_tx_cntl {
+ uint32_t cntl_val;
+ struct {
+ uint32_t paen:1; /* only for mcp55, otherwise reserve */
+ uint32_t resv1:1;
+ uint32_t retry_en:1;
+ uint32_t pad_en:1;
+ uint32_t fappend_en:1;
+ uint32_t two_def_en:1;
+ uint32_t resv6_7:2;
+ uint32_t max_retry:4;
+ uint32_t burst_en:1;
+ uint32_t resv13_15:3;
+ uint32_t retry_emask:1;
+ uint32_t exdef_mask:1;
+ uint32_t def_mask:1;
+ uint32_t lcar_mask:1;
+ uint32_t tlcol_mask:1;
+ uint32_t uflo_err_mask:1;
+ uint32_t resv22_23:2;
+ uint32_t jam_seq_en:1;
+ uint32_t resv25_31:7;
+ } cntl_bits;
+} nge_tx_cntl;
+
+/*
+ * Transmit enable
+ * Note: for ck804 or mcp51, this is 8-bit register;
+ * for mcp55, it is a 32-bit register.
+ */
+#define NGE_TX_EN 0x084
+typedef union _nge_tx_en {
+ uint8_t val;
+ struct {
+ uint8_t tx_en:1;
+ uint8_t resv1_7:7;
+ } bits;
+} nge_tx_en;
+
+/*
+ * Transmit status
+ */
+#define NGE_TX_STA 0x088
+typedef union _nge_tx_sta {
+ uint32_t sta_val;
+ struct {
+ uint32_t tx_chan_sta:1;
+ uint32_t resv1_15:15;
+ uint32_t retry_err:1;
+ uint32_t exdef:1;
+ uint32_t def:1;
+ uint32_t lcar:1;
+ uint32_t tlcol:1;
+ uint32_t uflo:1;
+ uint32_t resv22_31:10;
+ } sta_bits;
+} nge_tx_sta;
+
+/*
+ * Receive control
+ */
+#define NGE_RX_CNTL0 0x08c
+typedef union _nge_rx_cntrl0 {
+ uint32_t cntl_val;
+ struct {
+ uint32_t resv0:1;
+ uint32_t padsen:1;
+ uint32_t fcsren:1;
+ uint32_t paen:1;
+ uint32_t lben:1;
+ uint32_t afen:1;
+ uint32_t runten:1;
+ uint32_t brdis:1;
+ uint32_t rdfen:1;
+ uint32_t slfb:1;
+ uint32_t resv10_15:6;
+ uint32_t runtm:1;
+ uint32_t rlcolm:1;
+ uint32_t maxerm:1;
+ uint32_t lferm:1;
+ uint32_t crcm:1;
+ uint32_t ofolm:1;
+ uint32_t framerm:1;
+ uint32_t resv23_31:9;
+ } cntl_bits;
+} nge_rx_cntrl0;
+
+/*
+ * Maximum receive Frame size
+ */
+#define NGE_RX_CNTL1 0x090
+typedef union _nge_rx_cntl1 {
+ uint32_t cntl_val;
+ struct {
+ uint32_t length:14;
+ uint32_t resv14_31:18;
+ } cntl_bits;
+} nge_rx_cntl1;
+
+/*
+ * Receive enable register
+ * Note: for ck804 and mcp51, this is a 8-bit register;
+ * for mcp55, it is a 32-bit register.
+ */
+#define NGE_RX_EN 0x094
+typedef union _nge_rx_en {
+ uint8_t val;
+ struct {
+ uint8_t rx_en:1;
+ uint8_t resv1_7:7;
+ } bits;
+} nge_rx_en;
+
+/*
+ * Receive status register
+ */
+#define NGE_RX_STA 0x098
+typedef union _nge_rx_sta {
+ uint32_t sta_val;
+ struct {
+ uint32_t rx_chan_sta:1;
+ uint32_t resv1_15:15;
+ uint32_t runt_sta:1;
+ uint32_t rlcol_sta:1;
+ uint32_t mlen_err:1;
+ uint32_t lf_err:1;
+ uint32_t crc_err:1;
+ uint32_t ofol_err:1;
+ uint32_t fram_err:1;
+ uint32_t resv23_31:9;
+ } sta_bits;
+} nge_rx_sta;
+
+/*
+ * Backoff Control
+ */
+#define NGE_BKOFF_CNTL 0x09c
+#define BKOFF_RSEED 0x8
+#define BKOFF_SLIM_GMII 0x3ff
+#define BKOFF_SLIM_MII 0x7f
+typedef union _nge_bkoff_cntl {
+ uint32_t cntl_val;
+ struct {
+ uint32_t rseed:8;
+ uint32_t sltm:10;
+ uint32_t resv18_30:13;
+ uint32_t leg_bk_en:1;
+ } bkoff_bits;
+} nge_bkoff_cntl;
+
+/*
+ * Transmit defferral timing
+ */
+#define NGE_TX_DEF 0x0a0
+#define TX_TIFG_MII 0x15
+#define TX_IFG_RGMII_1000_FD 0x14
+#define TX_IFG_RGMII_OTHER 0x16
+#define TX_IFG2_MII 0x5
+#define TX_IFG2_RGMII_10_100 0x7
+#define TX_IFG2_RGMII_1000 0x5
+#define TX_IFG2_DEFAULT 0X0
+#define TX_IFG1_DEFAULT 0xf
+typedef union _nge_tx_def {
+ uint32_t def_val;
+ struct {
+ uint32_t ifg1_def:8;
+ uint32_t ifg2_def:8;
+ uint32_t if_def:8;
+ uint32_t resv24_31:8;
+ } def_bits;
+} nge_tx_def;
+
+/*
+ * Receive defferral timing
+ */
+#define NGE_RX_DEf 0x0a4
+#define RX_DEF_DEFAULT 0x16
+typedef union _nge_rx_def {
+ uint8_t def_val;
+ struct {
+ uint8_t rifg;
+ } def_bits;
+} nge_rx_def;
+
+/*
+ * Low 32 bit unicast address
+ */
+#define NGE_UNI_ADDR0 0x0a8
+union {
+ uint32_t addr_val;
+ struct {
+ uint32_t addr;
+ } addr_bits;
+} nge_uni_addr0;
+
+/*
+ * High 32 bit unicast address
+ */
+#define NGE_UNI_ADDR1 0x0ac
+typedef union _nge_uni_addr1 {
+ uint32_t addr_val;
+ struct {
+ uint32_t addr:16;
+ uint32_t resv16_31:16;
+ } addr_bits;
+} nge_uni_addr1;
+
+/*
+ * Low 32 bit multicast address
+ */
+#define NGE_MUL_ADDR0 0x0b0
+union {
+ uint32_t addr_val;
+ struct {
+ uint32_t addr;
+ }addr_bits;
+}nge_mul_addr0;
+
+/*
+ * High 32 bit multicast address
+ */
+#define NGE_MUL_ADDR1 0x0b4
+typedef union _nge_mul_addr1 {
+ uint32_t addr_val;
+ struct {
+ uint32_t addr:16;
+ uint32_t resv16_31:16;
+ }addr_bits;
+}nge_mul_addr1;
+
+/*
+ * Low 32 bit multicast mask
+ */
+#define NGE_MUL_MASK 0x0b8
+union {
+ uint32_t mask_val;
+ struct {
+ uint32_t mask;
+ } mask_bits;
+} nge_mul_mask0;
+
+/*
+ * High 32 bit multicast mask
+ */
+#define NGE_MUL_MASK1 0x0bc
+union {
+ uint32_t mask_val;
+ struct {
+ uint32_t mask:16;
+ uint32_t resv16_31:16;
+ } mask_bits;
+} nge_mul_mask1;
+
+/*
+ * Mac-to Phy Interface
+ */
+#define NGE_MAC2PHY 0x0c0
+#define low_speed 0x0
+#define fast_speed 0x1
+#define giga_speed 0x2
+#define err_speed 0x4
+#define MII_IN 0x0
+#define RGMII_IN 0x1
+#define ERR_IN1 0x3
+#define ERR_IN2 0x4
+typedef union _nge_mac2phy {
+ uint32_t m2p_val;
+ struct {
+ uint32_t speed:2;
+ uint32_t resv2_7:6;
+ uint32_t hdup_en:1;
+ uint32_t resv9:1;
+ uint32_t phyintr:1; /* for mcp55 only */
+ uint32_t phyintrlvl:1; /* for mcp55 only */
+ uint32_t resv12_27:16;
+ uint32_t in_type:2;
+ uint32_t resv30_31:2;
+ } m2p_bits;
+} nge_mac2phy;
+
+/*
+ * Transmit Descriptor Ring address
+ */
+#define NGE_TX_DADR 0x100
+typedef union _nge_tx_addr {
+ uint32_t addr_val;
+ struct {
+ uint32_t resv0_2:3;
+ uint32_t addr:29;
+ } addr_bits;
+} nge_tx_addr;
+
+/*
+ * Receive Descriptor Ring address
+ */
+#define NGE_RX_DADR 0x104
+typedef union _nge_rx_addr {
+ uint32_t addr_val;
+ struct {
+ uint32_t resv0_2:3;
+ uint32_t addr:29;
+ } addr_bits;
+} nge_rx_addr;
+
+/*
+ * Rx/tx descriptor ring leng
+ * Note: for mcp55, tdlen/rdlen are 14 bit.
+ */
+#define NGE_RXTX_DLEN 0x108
+typedef union _nge_rxtx_dlen {
+ uint32_t dlen_val;
+ struct {
+ uint32_t tdlen:14;
+ uint32_t resv14_15:2;
+ uint32_t rdlen:14;
+ uint32_t resv30_31:2;
+ } dlen_bits;
+} nge_rxtx_dlen;
+
+/*
+ * Transmit polling register
+ */
+#define NGE_TX_POLL 0x10c
+#define TX_POLL_INTV_1G 10
+#define TX_POLL_INTV_100M 100
+#define TX_POLL_INTV_10M 1000
+
+typedef union _nge_tx_poll {
+ uint32_t poll_val;
+ struct {
+ uint32_t tpi:16;
+ uint32_t tpen:1;
+ uint32_t resv17_31:15;
+ } poll_bits;
+} nge_tx_poll;
+
+/*
+ * Receive polling register
+ */
+#define NGE_RX_POLL 0x110
+#define RX_POLL_INTV_1G 10
+#define RX_POLL_INTV_100M 100
+#define RX_POLL_INTV_10M 1000
+typedef union _nge_rx_poll {
+ uint32_t poll_val;
+ struct {
+ uint32_t rpi:16;
+ uint32_t rpen:1;
+ uint32_t resv17_31:15;
+ } poll_bits;
+} nge_rx_poll;
+
+/*
+ * Transmit polling count
+ */
+#define NGE_TX_PCNT 0x114
+union {
+ uint32_t cnt_val;
+ struct {
+ uint32_t pcnt:32;
+ } cnt_bits;
+} nge_tx_pcnt;
+
+/*
+ * Receive polling count
+ */
+#define NGE_RX_PCNT 0x118
+union {
+ uint32_t cnt_val;
+ struct {
+ uint32_t pcnt:32;
+ } cnt_bits;
+} nge_rx_pcnt;
+
+
+/*
+ * Current tx's descriptor address
+ */
+#define NGE_TX_CUR_DADR 0x11c
+union {
+ uint32_t addr_val;
+ struct {
+ uint32_t resv0_2:3;
+ uint32_t addr:29;
+ } addr_bits;
+} nge_tx_cur_addr;
+
+/*
+ * Current rx's descriptor address
+ */
+#define NGE_RX_CUR_DADR 0x120
+union {
+ uint32_t addr_val;
+ struct {
+ uint32_t resv0_2:3;
+ uint32_t addr:29;
+ } addr_bits;
+} nge_rx_cur_addr;
+
+/*
+ * Current tx's data buffer address
+ */
+#define NGE_TX_CUR_PRD0 0x124
+union {
+ uint32_t prd0_val;
+ struct {
+ uint32_t prd0:32;
+ } prd0_bits;
+} nge_tx_cur_prd0;
+
+/*
+ * Current tx's data buffer status
+ */
+#define NGE_TX_CUR_PRD1 0x128
+union {
+ uint32_t prd1_val;
+ struct {
+ uint32_t rebytes:16;
+ uint32_t status:16;
+ } prd1_bits;
+} nge_tx_cur_prd1;
+
+/*
+ * Current rx's data buffer address
+ */
+#define NGE_RX_CUR_PRD0 0x12c
+union {
+ uint32_t prd0_val;
+ struct {
+ uint32_t prd0:32;
+ }prd0_bits;
+}nge_rx_cur_prd0;
+
+/*
+ * Current rx's data buffer status
+ */
+#define NGE_RX_CUR_PRD1 0x130
+
+/*
+ * Next tx's descriptor address
+ */
+#define NGE_TX_NXT_DADR 0x134
+union {
+ uint32_t dadr_val;
+ struct {
+ uint32_t addr:32;
+ }addr_bits;
+}nge_tx_nxt_dadr;
+
+/*
+ * Next rx's descriptor address
+ */
+#define NGE_RX_NXT_DADR 0x138
+union {
+ uint32_t dadr_val;
+ struct {
+ uint32_t addr:32;
+ } addr_bits;
+} nge_rx_nxt_dadr;
+
+/*
+ * Transmit fifo watermark
+ */
+#define NGE_TX_FIFO_WM 0x13c
+#define TX_FIFO_TBFW 0
+#define TX_FIFO_NOB_WM_MII 1
+#define TX_FIFO_NOB_WM_GMII 8
+#define TX_FIFO_DATA_LWM 0x20
+#define TX_FIFO_PRD_LWM 0x8
+#define TX_FIFO_PRD_HWM 0x38
+typedef union _nge_tx_fifo_wm {
+ uint32_t wm_val;
+ struct {
+ uint32_t data_lwm:9;
+ uint32_t resv8_11:3;
+ uint32_t prd_lwm:6;
+ uint32_t uprd_hwm:6;
+ uint32_t nbfb_wm:4;
+ uint32_t fb_wm:4;
+ } wm_bits;
+} nge_tx_fifo_wm;
+
+/*
+ * Receive fifo watermark
+ */
+#define NGE_RX_FIFO_WM 0x140
+typedef union _nge_rx_fifo_wm {
+ uint32_t wm_val;
+ struct {
+ uint32_t data_hwm:9;
+ uint32_t resv9_11:3;
+ uint32_t prd_lwm:4;
+ uint32_t resv16_17:2;
+ uint32_t prd_hwm:4;
+ uint32_t resv22_31:10;
+ } wm_bits;
+} nge_rx_fifo_wm;
+
+/*
+ * Chip mode control
+ */
+#define NGE_MODE_CNTL 0x144
+#define DESC_MCP1 0x0
+#define DESC_OFFLOAD 0x1
+#define DESC_HOT 0x2
+#define DESC_RESV 0x3
+#define MACHINE_BUSY 0x0
+#define MACHINE_IDLE 0x1
+typedef union _nge_mode_cntl {
+ uint32_t mode_val;
+ struct {
+ uint32_t txdm:1;
+ uint32_t rxdm:1;
+ uint32_t dma_dis:1;
+ uint32_t dma_status:1;
+ uint32_t bm_reset:1;
+ uint32_t resv5:1;
+ uint32_t vlan_strip:1; /* mcp55 chip only */
+ uint32_t vlan_ins:1; /* mcp55 chip only */
+ uint32_t desc_type:2;
+ uint32_t rx_sum_en:1;
+ uint32_t tx_prd_cu_en:1;
+ uint32_t w64_dis:1;
+ uint32_t tx_rcom_en:1;
+ uint32_t rx_filter_en:1;
+ uint32_t resv15:1;
+ uint32_t resv16:1; /* ck804 and mcp51 only */
+ uint32_t resv17:1; /* ck804 and mcp51 only */
+ uint32_t resv18:1; /* ck804 and mcp51 only */
+ uint32_t resv19_21:3;
+ uint32_t tx_fetch_prd:1; /* mcp51/mcp55 only */
+ uint32_t rx_fetch_prd:1; /* mcp51/mcp55 only */
+ uint32_t resv24_29:6;
+ uint32_t rx_status:1;
+ uint32_t tx_status:1;
+ } mode_bits;
+} nge_mode_cntl;
+
+#define NGE_TX_DADR_HI 0x148
+#define NGE_RX_DADR_HI 0x14c
+
+/*
+ * Mii interrupt register
+ * Note: for mcp55, this is a 32-bit register.
+ */
+#define NGE_MINTR_SRC 0x180
+typedef union _nge_mintr_src {
+ uint8_t src_val;
+ struct {
+ uint8_t mrei:1;
+ uint8_t mcc2:1;
+ uint8_t mcc1:1;
+ uint8_t mapi:1;
+ uint8_t mpdi:1;
+ uint8_t resv5_7:3;
+ } src_bits;
+} nge_mintr_src;
+
+/*
+ * Mii interrupt mask
+ * Note: for mcp55, this is a 32-bit register.
+ */
+#define NGE_MINTR_MASK 0x184
+typedef union _nge_mintr_mask {
+ uint8_t mask_val;
+ struct {
+ uint8_t mrei:1;
+ uint8_t mcc2:1;
+ uint8_t mcc1:1;
+ uint8_t mapi:1;
+ uint8_t mpdi:1;
+ uint8_t resv5_7:3;
+ } mask_bits;
+} nge_mintr_mask;
+
+/*
+ * Mii control and status
+ */
+#define NGE_MII_CS 0x188
+#define MII_POLL_INTV 0x4
+typedef union _nge_mii_cs {
+ uint32_t cs_val;
+ struct {
+ uint32_t excap:1;
+ uint32_t jab_dec:1;
+ uint32_t lk_up:1;
+ uint32_t ana_cap:1;
+ uint32_t rfault:1;
+ uint32_t auto_neg:1;
+ uint32_t mfps:1;
+ uint32_t resv7:1;
+ uint32_t exst:1;
+ uint32_t hdup_100m_t2:1;
+ uint32_t fdup_100m_t2:1;
+ uint32_t hdup_10m:1;
+ uint32_t fdup_10m:1;
+ uint32_t hdup_100m_x:1;
+ uint32_t fdup_100m_x:1;
+ uint32_t cap_100m_t4:1;
+ uint32_t ap_intv:4;
+ uint32_t ap_en:1;
+ uint32_t resv21_23:3;
+ uint32_t ap_paddr:5;
+ uint32_t resv29_31:3;
+ } cs_bits;
+} nge_mii_cs;
+
+/*
+ * Mii Clock timer register
+ */
+#define NGE_MII_TM 0x18c
+typedef union _nge_mii_tm {
+ uint16_t tm_val;
+ struct {
+ uint16_t timer_interv:8;
+ uint16_t timer_en:1;
+ uint16_t resv9_14:6;
+ uint16_t timer_status:1;
+ } tm_bits;
+} nge_mii_tm;
+
+/*
+ * Mdio address
+ */
+#define NGE_MDIO_ADR 0x190
+typedef union _nge_mdio_adr {
+ uint16_t adr_val;
+ struct {
+ uint16_t phy_reg:5;
+ uint16_t phy_adr:5;
+ uint16_t mdio_rw:1;
+ uint16_t resv11_14:4;
+ uint16_t mdio_clc:1;
+ } adr_bits;
+} nge_mdio_adr;
+
+/*
+ * Mdio data
+ */
+#define NGE_MDIO_DATA 0x194
+
+/*
+ * Power Management and Control
+ */
+#define NGE_PM_CNTL 0x200
+typedef union _nge_pm_cntl {
+ uint32_t cntl_val;
+ struct {
+ /*
+ * mp_en: Magic Packet Enable
+ * pm_en: Pattern Match Enable
+ * lc_en: Link Change Enable
+ */
+ uint32_t mp_en_d0:1;
+ uint32_t pm_en_d0:1;
+ uint32_t lc_en_d0:1;
+ uint32_t resv3:1;
+ uint32_t mp_en_d1:1;
+ uint32_t pm_en_d1:1;
+ uint32_t lc_en_d1:1;
+ uint32_t resv7:1;
+ uint32_t mp_en_d2:1;
+ uint32_t pm_en_d2:1;
+ uint32_t lc_en_d2:1;
+ uint32_t resv11:1;
+ uint32_t mp_en_d3:1;
+ uint32_t pm_en_d3:1;
+ uint32_t lc_en_d3:1;
+ uint32_t resv15:1;
+ uint32_t pat_match_en:5;
+ uint32_t resv21_23:3;
+ uint32_t pat_match_stat:5;
+ uint32_t magic_status:1;
+ uint32_t netman_status:1;
+ uint32_t resv31:1;
+ } cntl_bits;
+} nge_pm_cntl;
+
+#define NGE_MPT_CRC0 0x204
+#define NGE_PMC_MK00 0x208
+#define NGE_PMC_MK01 0x20C
+#define NGE_PMC_MK02 0x210
+#define NGE_PMC_MK03 0x214
+#define NGE_MPT_CRC1 0x218
+#define NGE_PMC_MK10 0x21c
+#define NGE_PMC_MK11 0x220
+#define NGE_PMC_MK12 0x224
+#define NGE_PMC_MK13 0x228
+#define NGE_MPT_CRC2 0x22c
+#define NGE_PMC_MK20 0x230
+#define NGE_PMC_MK21 0x234
+#define NGE_PMC_MK22 0x238
+#define NGE_PMC_MK23 0x23c
+#define NGE_MPT_CRC3 0x240
+#define NGE_PMC_MK30 0x244
+#define NGE_PMC_MK31 0x248
+#define NGE_PMC_MK32 0x24c
+#define NGE_PMC_MK33 0x250
+#define NGE_MPT_CRC4 0x254
+#define NGE_PMC_MK40 0x258
+#define NGE_PMC_MK41 0x25c
+#define NGE_PMC_MK42 0x260
+#define NGE_PMC_MK43 0x264
+#define NGE_PMC_ALIAS 0x268
+#define NGE_PMCSR_ALIAS 0x26c
+
+/*
+ * Seeprom control
+ */
+#define NGE_EP_CNTL 0x500
+#define EEPROM_CLKDIV 249
+#define EEPROM_WAITCLK 0x7
+typedef union _nge_cp_cntl {
+ uint32_t cntl_val;
+ struct {
+ uint32_t clkdiv:8;
+ uint32_t rom_size:3;
+ uint32_t resv11:1;
+ uint32_t word_wid:1;
+ uint32_t resv13_15:3;
+ uint32_t wait_slots:4;
+ uint32_t resv20_31:12;
+ } cntl_bits;
+} nge_cp_cntl;
+
+/*
+ * Seeprom cmd control
+ */
+#define NGE_EP_CMD 0x504
+#define SEEPROM_CMD_READ 0x0
+#define SEEPROM_CMD_WRITE_ENABLE 0x1
+#define SEEPROM_CMD_ERASE 0x2
+#define SEEPROM_CMD_WRITE 0x3
+#define SEEPROM_CMD_ERALSE_ALL 0x4
+#define SEEPROM_CMD_WRITE_ALL 0x5
+#define SEEPROM_CMD_WRITE_DIS 0x6
+#define SEEPROM_READY 0x1
+typedef union _nge_ep_cmd {
+ uint32_t cmd_val;
+ struct {
+ uint32_t addr:16;
+ uint32_t cmd:3;
+ uint32_t resv19_30:12;
+ uint32_t sts:1;
+ } cmd_bits;
+} nge_ep_cmd;
+
+/*
+ * Seeprom data register
+ */
+#define NGE_EP_DATA 0x508
+typedef union _nge_ep_data {
+ uint32_t data_val;
+ struct {
+ uint32_t data:16;
+ uint32_t resv16_31:16;
+ } data_bits;
+} nge_ep_data;
+
+/*
+ * Power management control 2nd register (since MCP51)
+ */
+#define NGE_PM_CNTL2 0x600
+typedef union _nge_pm_cntl2 {
+ uint32_t cntl_val;
+ struct {
+ uint32_t phy_coma_set:1;
+ uint32_t phy_coma_status:1;
+ uint32_t resv2_3:2;
+ uint32_t resv4:1;
+ uint32_t resv5_7:3;
+ uint32_t resv8_11:4;
+ uint32_t resv12_15:4;
+ uint32_t pmt5_en:1;
+ uint32_t pmt6_en:1;
+ uint32_t pmt7_en:1;
+ uint32_t resv19_23:5;
+ uint32_t pmt5_status:1;
+ uint32_t pmt6_status:1;
+ uint32_t pmt7_status:1;
+ uint32_t resv27_31:5;
+ } cntl_bits;
+} nge_pm_cntl2;
+
+
+/*
+ * ASF RAM 0x800-0xfff
+ */
+
+/*
+ * Hardware-defined Statistics Block Offsets
+ *
+ * These are given in the manual as addresses in NIC memory, starting
+ * from the NIC statistics area base address of 0x2000;
+ */
+
+#define KS_BASE 0x0280
+#define KS_ADDR(x) (((x)-KS_BASE)/sizeof (uint32_t))
+
+typedef enum {
+ KS_ifHOutOctets = KS_ADDR(0x0280),
+ KS_ifHOutZeroRetranCount,
+ KS_ifHOutOneRetranCount,
+ KS_ifHOutMoreRetranCount,
+ KS_ifHOutColCount,
+ KS_ifHOutFifoovCount,
+ KS_ifHOutLOCCount,
+ KS_ifHOutExDecCount,
+ KS_ifHOutRetryCount,
+
+ KS_ifHInFrameErrCount,
+ KS_ifHInExtraOctErrCount,
+ KS_ifHInLColErrCount,
+ KS_ifHInRuntCount,
+ KS_ifHInOversizeErrCount,
+ KS_ifHInFovErrCount,
+ KS_ifHInFCSErrCount,
+ KS_ifHInAlignErrCount,
+ KS_ifHInLenErrCount,
+ KS_ifHInUniPktsCount,
+ KS_ifHInBroadPksCount,
+ KS_ifHInMulPksCount,
+ KS_STATS_SIZE = KS_ADDR(0x2d0)
+
+} nge_stats_offset_t;
+
+/*
+ * Hardware-defined Statistics Block
+ *
+ * Another view of the statistic block, as a array and a structure ...
+ */
+
+typedef union {
+ uint32_t a[KS_STATS_SIZE];
+ struct {
+ uint32_t OutOctets;
+ uint32_t OutZeroRetranCount;
+ uint32_t OutOneRetranCount;
+ uint32_t OutMoreRetranCount;
+ uint32_t OutColCount;
+ uint32_t OutFifoovCount;
+ uint32_t OutLOCCount;
+ uint32_t OutExDecCount;
+ uint32_t OutRetryCount;
+
+ uint32_t InFrameErrCount;
+ uint32_t InExtraOctErrCount;
+ uint32_t InLColErrCount;
+ uint32_t InRuntCount;
+ uint32_t InOversizeErrCount;
+ uint32_t InFovErrCount;
+ uint32_t InFCSErrCount;
+ uint32_t InAlignErrCount;
+ uint32_t InLenErrCount;
+ uint32_t InUniPktsCount;
+ uint32_t InBroadPksCount;
+ uint32_t InMulPksCount;
+ } s;
+} nge_hw_statistics_t;
+
+/*
+ * MII (PHY) registers, beyond those already defined in <sys/miiregs.h>
+ */
+
+#define NGE_PHY_NUMBER 32
+#define MII_LP_ASYM_PAUSE 0x0800
+#define MII_LP_PAUSE 0x0400
+
+#define MII_100BASE_T4 0x0200
+#define MII_100BASET_FD 0x0100
+#define MII_100BASET_HD 0x0080
+#define MII_10BASET_FD 0x0040
+#define MII_10BASET_HD 0x0020
+
+#define MII_ID_MARVELL 0x5043
+#define MII_ID_CICADA 0x03f1
+#define MII_IDL_MASK 0xfc00
+#define MII_AN_LPNXTPG 8
+
+
+#define MII_IEEE_EXT_STATUS 15
+
+/*
+ * New bits in the MII_CONTROL register
+ */
+#define MII_CONTROL_1000MB 0x0040
+
+/*
+ * New bits in the MII_AN_ADVERT register
+ */
+#define MII_ABILITY_ASYM_PAUSE 0x0800
+#define MII_ABILITY_PAUSE 0x0400
+
+/*
+ * Values for the <selector> field of the MII_AN_ADVERT register
+ */
+#define MII_AN_SELECTOR_8023 0x0001
+
+/*
+ * Bits in the MII_1000BASE_T_CONTROL register
+ *
+ * The MASTER_CFG bit enables manual configuration of Master/Slave mode
+ * (otherwise, roles are automatically negotiated). When this bit is set,
+ * the MASTER_SEL bit forces Master mode, otherwise Slave mode is forced.
+ */
+#define MII_1000BASE_T_CONTROL 9
+#define MII_1000BT_CTL_MASTER_CFG 0x1000 /* enable role select */
+#define MII_1000BT_CTL_MASTER_SEL 0x0800 /* role select bit */
+#define MII_1000BT_CTL_ADV_FDX 0x0200
+#define MII_1000BT_CTL_ADV_HDX 0x0100
+
+/*
+ * Bits in the MII_1000BASE_T_STATUS register
+ */
+#define MII_1000BASE_T_STATUS 10
+#define MII_1000BT_STAT_MASTER_FAULT 0x8000
+#define MII_1000BT_STAT_MASTER_MODE 0x4000
+#define MII_1000BT_STAT_LCL_RCV_OK 0x2000
+#define MII_1000BT_STAT_RMT_RCV_OK 0x1000
+#define MII_1000BT_STAT_LP_FDX_CAP 0x0800
+#define MII_1000BT_STAT_LP_HDX_CAP 0x0400
+
+#define MII_CICADA_BYPASS_CONTROL MII_VENDOR(2)
+#define CICADA_125MHZ_CLOCK_ENABLE 0x0001
+
+#define MII_CICADA_10BASET_CONTROL MII_VENDOR(6)
+#define MII_CICADA_DISABLE_ECHO_MODE 0x2000
+
+#define MII_CICADA_EXT_CONTROL MII_VENDOR(7)
+#define MII_CICADA_MODE_SELECT_BITS 0xf000
+#define MII_CICADA_MODE_SELECT_RGMII 0x1000
+#define MII_CICADA_POWER_SUPPLY_BITS 0x0e00
+#define MII_CICADA_POWER_SUPPLY_3_3V 0x0000
+#define MII_CICADA_POWER_SUPPLY_2_5V 0x0200
+
+#define MII_CICADA_AUXCTRL_STATUS MII_VENDOR(12)
+#define MII_CICADA_PIN_PRORITY_SETTING 0x0004
+#define MII_CICADA_PIN_PRORITY_DEFAULT 0x0000
+
+
+#define NGE_REG_SIZE 0xfff
+#define NGE_MII_SIZE 0x20
+#define NGE_SEEROM_SIZE 0x800
+/*
+ * Legacy rx's bd which does not support
+ * any hardware offload
+ */
+typedef struct _legacy_rx_bd {
+ uint32_t host_buf_addr;
+ union {
+ uint32_t cntl_val;
+ struct {
+ uint32_t bcnt:16;
+ uint32_t end:1;
+ uint32_t miss:1;
+ uint32_t extra:1;
+ uint32_t inten:1;
+ uint32_t bam:1;
+ uint32_t mam:1;
+ uint32_t pam:1;
+ uint32_t runt:1;
+ uint32_t lcol:1;
+ uint32_t max:1;
+ uint32_t lfer:1;
+ uint32_t crc:1;
+ uint32_t ofol:1;
+ uint32_t fram:1;
+ uint32_t err:1;
+ uint32_t own:1;
+ } cntl_bits;
+ } cntl_status;
+} legacy_rx_bd, *plegacy_rx_bd;
+
+/*
+ * Stand offload rx's bd which supports hareware checksum
+ * for tcp/ip
+ */
+#define CK8G_NO_HSUM 0x0
+#define CK8G_TCP_SUM_ERR 0x1
+#define CK8G_UDP_SUM_ERR 0x2
+#define CK8G_IP_HSUM_ERR 0x3
+#define CK8G_IP_HSUM 0x4
+#define CK8G_TCP_SUM 0x5
+#define CK8G_UDP_SUM 0x6
+#define CK8G_RESV 0x7
+typedef struct _sum_rx_bd {
+ uint32_t host_buf_addr;
+ union {
+ uint32_t cntl_val;
+ struct {
+ uint32_t bcnt:14;
+ uint32_t resv14_29:16;
+ uint32_t inten:1;
+ uint32_t own:1;
+ } control_bits;
+ struct {
+ uint32_t bcnt:14;
+ uint32_t resv14:1;
+ uint32_t bam:1;
+ uint32_t mam:1;
+ uint32_t pam:1;
+ uint32_t runt:1;
+ uint32_t lcol:1;
+ uint32_t max:1;
+ uint32_t lfer:1;
+ uint32_t crc:1;
+ uint32_t ofol:1;
+ uint32_t fram:1;
+ uint32_t extra:1;
+ uint32_t l3_l4_sum:3;
+ uint32_t rend:1;
+ uint32_t err:1;
+ uint32_t own:1;
+ } status_bits;
+ } cntl_status;
+} sum_rx_bd, *psum_rx_bd;
+/*
+ * Hot offload rx's bd which support 64bit access and
+ * full-tcp hardware offload
+ */
+typedef struct _hot_rx_bd {
+ uint32_t host_buf_addr_hi;
+ uint32_t host_buf_addr_lo;
+ uint32_t sw_tag;
+ union {
+ uint32_t cntl_val;
+ struct {
+ uint32_t bcnt:14;
+ uint32_t resv14_29:16;
+ uint32_t inten:1;
+ uint32_t own:1;
+ } control_bits;
+
+ struct {
+ uint32_t bcnt:14;
+ uint32_t ctmach_rd:1;
+ uint32_t bam:1;
+ uint32_t mam:1;
+ uint32_t pam:1;
+ uint32_t runt:1;
+ uint32_t lcol:1;
+ uint32_t max:1;
+ uint32_t lfer:1;
+ uint32_t crc:1;
+ uint32_t ofol:1;
+ uint32_t fram:1;
+ uint32_t extra:1;
+ uint32_t l3_l4_sum:3;
+ uint32_t rend:1;
+ uint32_t err:1;
+ uint32_t own:1;
+ } status_bits_legacy;
+ } cntl_status;
+} hot_rx_bd, *phot_rx_bd;
+
+/*
+ * Legacy tx's bd which does not support
+ * any hardware offload
+ */
+typedef struct _legacy_tx_bd {
+ uint32_t host_buf_addr;
+ union {
+ uint32_t cntl_val;
+ struct {
+ uint32_t bcnt:16;
+ uint32_t end:1;
+ uint32_t resv17_23:7;
+ uint32_t inten:1;
+ uint32_t resv25_30:6;
+ uint32_t own:1;
+ } control_bits;
+
+ struct {
+ uint32_t bcnt:16;
+ uint32_t end:1;
+ uint32_t rtry:1;
+ uint32_t trc:4;
+ uint32_t inten:1;
+ uint32_t exdef:1;
+ uint32_t def:1;
+ uint32_t lcar:1;
+ uint32_t lcol:1;
+ uint32_t uflo:1;
+ uint32_t err:1;
+ uint32_t own:1;
+ } status_bits;
+ } cntl_status;
+} legacy_tx_bd, *plegacy_tx_bd;
+
+/*
+ * Stand offload tx's bd which supports hareware checksum
+ * for tcp/ip
+ */
+typedef struct _sum_tx_bd {
+ uint32_t host_buf_addr;
+ union {
+ uint32_t cntl_val;
+ struct {
+ uint32_t bcnt:14;
+ uint32_t resv14_25:12;
+ uint32_t tcp_hsum:1;
+ uint32_t ip_hsum:1;
+ uint32_t segen:1;
+ uint32_t end:1;
+ uint32_t inten:1;
+ uint32_t own:1;
+ } control_sum_bits;
+
+ struct {
+ uint32_t bcnt:14;
+ uint32_t mss:14;
+ uint32_t segen:1;
+ uint32_t end:1;
+ uint32_t inten:1;
+ uint32_t own:1;
+ } control_tso_bits;
+
+ struct {
+ uint32_t bcnt:14;
+ uint32_t resv14_17:4;
+ uint32_t rtry:1;
+ uint32_t trc:4;
+ uint32_t inten:1;
+ uint32_t exdef:1;
+ uint32_t def:1;
+ uint32_t lcar:1;
+ uint32_t lcol:1;
+ uint32_t uflo:1;
+ uint32_t end:1;
+ uint32_t err:1;
+ uint32_t own:1;
+ } status_bits;
+ } control_status;
+} sum_tx_bd, *psum_tx_bd;
+
+/*
+ * Hot offload tx's bd which support 64bit access and
+ * full-tcp hardware offload
+ */
+
+typedef struct _hot_tx_bd {
+ uint32_t host_buf_addr_hi;
+ uint32_t host_buf_addr_lo;
+ union {
+ uint32_t parm_val;
+ struct {
+ uint32_t resv0_15:16;
+ uint32_t resv16:1;
+ uint32_t resv17:1;
+ uint32_t resv18_31:14;
+ } parm_bits;
+ } hot_parms;
+
+ union {
+ uint32_t cntl_val;
+ struct {
+ uint32_t bcnt:14;
+ uint32_t resv14_25:12;
+ uint32_t tcp_hsum:1;
+ uint32_t ip_hsum:1;
+ uint32_t segen:1;
+ uint32_t end:1;
+ uint32_t inten:1;
+ uint32_t own:1;
+ } control_sum_bits;
+
+ struct {
+ uint32_t bcnt:14;
+ uint32_t mss:14;
+ uint32_t segen:1;
+ uint32_t end:1;
+ uint32_t inten:1;
+ uint32_t own:1;
+ } control_tso_bits;
+
+ struct {
+ uint32_t bcnt:14;
+ uint32_t resv14_17:4;
+ uint32_t rtry:1;
+ uint32_t trc:4;
+ uint32_t inten:1;
+ uint32_t exdef:1;
+ uint32_t def:1;
+ uint32_t lcar:1;
+ uint32_t lcol:1;
+ uint32_t uflo:1;
+ uint32_t end:1;
+ uint32_t err:1;
+ uint32_t own:1;
+ } status_bits;
+ } control_status;
+} hot_tx_bd, *phot_tx_bd;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_NGE_CHIP_H */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_kstats.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,535 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_STATS /* debug flag for this code */
+
+/*
+ * Table of Hardware-defined Statistics Block Offsets and Names
+ */
+#define KS_NAME(s) { KS_ ## s, #s }
+
+const nge_ksindex_t nge_statistics[] = {
+
+ KS_NAME(ifHOutOctets),
+ KS_NAME(ifHOutZeroRetranCount),
+ KS_NAME(ifHOutOneRetranCount),
+ KS_NAME(ifHOutMoreRetranCount),
+ KS_NAME(ifHOutColCount),
+ KS_NAME(ifHOutFifoovCount),
+ KS_NAME(ifHOutLOCCount),
+ KS_NAME(ifHOutExDecCount),
+ KS_NAME(ifHOutRetryCount),
+ KS_NAME(ifHInFrameErrCount),
+ KS_NAME(ifHInExtraOctErrCount),
+ KS_NAME(ifHInLColErrCount),
+ KS_NAME(ifHInOversizeErrCount),
+ KS_NAME(ifHInFovErrCount),
+ KS_NAME(ifHInFCSErrCount),
+ KS_NAME(ifHInAlignErrCount),
+ KS_NAME(ifHInLenErrCount),
+ KS_NAME(ifHInUniPktsCount),
+ KS_NAME(ifHInBroadPksCount),
+ KS_NAME(ifHInMulPksCount),
+ { KS_STATS_SIZE, NULL }
+};
+
+/*
+ * Local datatype for defining tables of (Offset, Name) pairs
+ */
+static int
+nge_statistics_update(kstat_t *ksp, int flag)
+{
+ uint32_t regno;
+ nge_t *ngep;
+ nge_statistics_t *istp;
+ nge_hw_statistics_t *hw_stp;
+ kstat_named_t *knp;
+ const nge_ksindex_t *ksip;
+
+ if (flag != KSTAT_READ)
+ return (EACCES);
+
+ ngep = ksp->ks_private;
+ istp = &ngep->statistics;
+ hw_stp = &istp->hw_statistics;
+ knp = ksp->ks_data;
+
+ /*
+ * Transfer the statistics values from the hardware statistics regs
+ */
+ for (ksip = nge_statistics; ksip->name != NULL; ++knp, ++ksip) {
+ regno = KS_BASE + ksip->index * sizeof (uint32_t);
+ hw_stp->a[ksip->index] = nge_reg_get32(ngep, regno);
+ knp->value.ui64 += hw_stp->a[ksip->index];
+ }
+
+ return (0);
+}
+
+
+static const nge_ksindex_t nge_chipinfo[] = {
+ { 0, "businfo" },
+ { 1, "command" },
+ { 2, "vendor_id" },
+ { 3, "device_id" },
+ { 4, "subsystem_vendor_id" },
+ { 5, "subsystem_device_id" },
+ { 6, "revision_id" },
+ { 7, "cache_line_size" },
+ { 8, "latency_timer" },
+ { 9, "phy_mode" },
+ { 10, "phy_id" },
+ { 11, "hw_mac_addr" },
+ { 12, "&bus_type" },
+ { 13, "&bus_speed" },
+ { 14, "&bus_size" },
+ { -1, NULL }
+};
+
+static const nge_ksindex_t nge_debuginfo[] = {
+ { 0, "rx_realloc" },
+ { 1, "rx_realloc_fails" },
+ { 2, "rx_realloc_DMA_fails" },
+ { 3, "rx_realloc_MP_fails" },
+ { 4, "rx_rcfree" },
+ { 5, "context_switch" },
+ { 6, "ip_hsum_err" },
+ { 7, "tcp_hsum_err" },
+ { 8, "tc_next" },
+ { 9, "tx_next" },
+ { 10, "tx_free" },
+ { 11, "tx_flow" },
+ { 12, "rx_prod" },
+ { 13, "rx_hold" },
+ { 14, "rx_nobuf" },
+ { 15, "rx_err" },
+ {16, "tx_err" },
+ {17, "tx_stall" },
+ { -1, NULL }
+};
+
+static int
+nge_chipinfo_update(kstat_t *ksp, int flag)
+{
+ nge_t *ngep;
+ kstat_named_t *knp;
+ chip_info_t *infop;
+
+ if (flag != KSTAT_READ)
+ return (EACCES);
+
+ ngep = ksp->ks_private;
+ infop = &ngep->chipinfo;
+ knp = ksp->ks_data;
+
+ (knp++)->value.ui64 = infop->businfo;
+ (knp++)->value.ui64 = infop->command;
+ (knp++)->value.ui64 = infop->vendor;
+ (knp++)->value.ui64 = infop->device;
+ (knp++)->value.ui64 = infop->subven;
+ (knp++)->value.ui64 = infop->subdev;
+ (knp++)->value.ui64 = infop->revision;
+ (knp++)->value.ui64 = infop->clsize;
+ (knp++)->value.ui64 = infop->latency;
+ (knp++)->value.ui64 = ngep->phy_mode;
+ (knp++)->value.ui64 = ngep->phy_id;
+ (knp++)->value.ui64 = infop->hw_mac_addr;
+ return (0);
+}
+
+static int
+nge_debuginfo_update(kstat_t *ksp, int flag)
+{
+ nge_t *ngep;
+ kstat_named_t *knp;
+ nge_sw_statistics_t *sw_stp;
+
+ if (flag != KSTAT_READ)
+ return (EACCES);
+
+ ngep = ksp->ks_private;
+ sw_stp = &ngep->statistics.sw_statistics;
+ knp = ksp->ks_data;
+
+ (knp++)->value.ui64 = sw_stp->recv_realloc;
+ (knp++)->value.ui64 = sw_stp->kmem_alloc_err;
+ (knp++)->value.ui64 = sw_stp->dma_alloc_err;
+ (knp++)->value.ui64 = sw_stp->mp_alloc_err;
+ (knp++)->value.ui64 = sw_stp->recy_free;
+ (knp++)->value.ui64 = sw_stp->load_context;
+ (knp++)->value.ui64 = sw_stp->ip_hwsum_err;
+ (knp++)->value.ui64 = sw_stp->tcp_hwsum_err;
+ (knp++)->value.ui64 = ngep->send->tc_next;
+ (knp++)->value.ui64 = ngep->send->tx_next;
+ (knp++)->value.ui64 = ngep->send->tx_free;
+ (knp++)->value.ui64 = ngep->send->tx_flow;
+ (knp++)->value.ui64 = ngep->recv->prod_index;
+ (knp++)->value.ui64 = ngep->buff->rx_hold;
+ (knp++)->value.ui64 = sw_stp->rx_nobuffer;
+ (knp++)->value.ui64 = sw_stp->rx_err;
+ (knp++)->value.ui64 = sw_stp->tx_stop_err;
+ (knp++)->value.ui64 = sw_stp->tx_stall;
+ return (0);
+}
+
+static kstat_t *
+nge_setup_named_kstat(nge_t *ngep, int instance, char *name,
+ const nge_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
+{
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ char *np;
+ int type;
+
+ size /= sizeof (nge_ksindex_t);
+ ksp = kstat_create(NGE_DRIVER_NAME, instance, name, "net",
+ KSTAT_TYPE_NAMED, size-1, KSTAT_FLAG_PERSISTENT);
+ if (ksp == NULL)
+ return (NULL);
+
+ ksp->ks_private = ngep;
+ ksp->ks_update = update;
+ for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
+ switch (*np) {
+ default:
+ type = KSTAT_DATA_UINT64;
+ break;
+ case '%':
+ np += 1;
+ type = KSTAT_DATA_UINT32;
+ break;
+
+ case '$':
+ np ++;
+ type = KSTAT_DATA_STRING;
+ break;
+ case '&':
+ np ++;
+ type = KSTAT_DATA_CHAR;
+ break;
+ }
+ kstat_named_init(knp, np, type);
+ }
+ kstat_install(ksp);
+
+ return (ksp);
+}
+
+/*
+ * Create kstats corresponding to NDD parameters
+ */
+static kstat_t *
+nge_setup_params_kstat(nge_t *ngep, int instance, char *name,
+ int (*update)(kstat_t *, int))
+{
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ int i;
+
+ ksp = kstat_create(NGE_DRIVER_NAME, instance, name, "net",
+ KSTAT_TYPE_NAMED, PARAM_COUNT, KSTAT_FLAG_PERSISTENT);
+ if (ksp != NULL) {
+ ksp->ks_private = ngep;
+ ksp->ks_update = update;
+ for (knp = ksp->ks_data, i = 0; i < PARAM_COUNT; ++knp, ++i)
+ kstat_named_init(knp, ngep->nd_params[i].ndp_name+1,
+ KSTAT_DATA_UINT64);
+ kstat_install(ksp);
+ }
+
+ return (ksp);
+}
+
+void
+nge_init_kstats(nge_t *ngep, int instance)
+{
+ const nge_ksindex_t *ksip;
+
+ NGE_TRACE(("nge_init_kstats($%p, %d)", (void *)ngep, instance));
+ for (ksip = nge_statistics; ksip->name != NULL; ++ksip) {
+ (void) nge_reg_get32(ngep,
+ (nge_regno_t)(KS_BASE + sizeof (uint32_t)*ksip->index));
+ }
+
+ ngep->nge_kstats[NGE_KSTAT_STATS] = nge_setup_named_kstat(ngep,
+ instance, "statistics", nge_statistics,
+ sizeof (nge_statistics), nge_statistics_update);
+
+ ngep->nge_kstats[NGE_KSTAT_CHIPID] = nge_setup_named_kstat(ngep,
+ instance, "chipinfo", nge_chipinfo,
+ sizeof (nge_chipinfo), nge_chipinfo_update);
+
+ ngep->nge_kstats[NGE_KSTAT_DEBUG] = nge_setup_named_kstat(ngep,
+ instance, "driver-debug", nge_debuginfo,
+ sizeof (nge_debuginfo), nge_debuginfo_update);
+
+}
+
+void
+nge_fini_kstats(nge_t *ngep)
+{
+ int i;
+
+ NGE_TRACE(("nge_fini_kstats($%p)", (void *)ngep));
+ for (i = NGE_KSTAT_COUNT; --i >= 0; )
+ if (ngep->nge_kstats[i] != NULL)
+ kstat_delete(ngep->nge_kstats[i]);
+}
+
+int
+nge_m_stat(void *arg, uint_t stat, uint64_t *val)
+{
+ nge_t *ngep = arg;
+ nge_statistics_t *nstp = &ngep->statistics;
+ nge_hw_statistics_t *hw_stp = &nstp->hw_statistics;
+ nge_sw_statistics_t *sw_stp = &nstp->sw_statistics;
+
+ switch (stat) {
+ case MAC_STAT_IFSPEED:
+ *val = ngep->param_link_speed * 1000000ull;
+ break;
+
+ case MAC_STAT_MULTIRCV:
+ *val = hw_stp->s.InMulPksCount;
+ break;
+
+ case MAC_STAT_BRDCSTRCV:
+ *val = hw_stp->s.InBroadPksCount;
+ break;
+
+ case MAC_STAT_NORCVBUF:
+ *val = sw_stp->rx_nobuffer;
+ break;
+
+ case MAC_STAT_IERRORS:
+ *val = hw_stp->s.InFrameErrCount +
+ hw_stp->s.InExtraOctErrCount +
+ hw_stp->s.InLColErrCount +
+ hw_stp->s.InOversizeErrCount +
+ hw_stp->s.InFovErrCount +
+ hw_stp->s.InFCSErrCount +
+ hw_stp->s.InAlignErrCount +
+ hw_stp->s.InLenErrCount;
+ break;
+
+ case MAC_STAT_OERRORS:
+ *val = hw_stp->s.OutFifoovCount +
+ hw_stp->s.OutLOCCount +
+ hw_stp->s.OutExDecCount +
+ hw_stp->s.OutRetryCount;
+ break;
+
+ case MAC_STAT_COLLISIONS:
+ *val = hw_stp->s.OutColCount;
+ break;
+
+ case MAC_STAT_RBYTES:
+ *val = sw_stp->rbytes;
+ break;
+
+ case MAC_STAT_IPACKETS:
+ *val = sw_stp->recv_count;
+ break;
+
+ case MAC_STAT_OBYTES:
+ *val = sw_stp->obytes;
+ break;
+
+ case MAC_STAT_OPACKETS:
+ *val = sw_stp->xmit_count;
+ break;
+
+ case ETHER_STAT_ALIGN_ERRORS:
+ *val = hw_stp->s.InAlignErrCount;
+ break;
+
+ case ETHER_STAT_FCS_ERRORS:
+ *val = hw_stp->s.InFCSErrCount;
+ break;
+
+ case ETHER_STAT_FIRST_COLLISIONS:
+ *val = hw_stp->s.OutZeroRetranCount;
+ break;
+
+ case ETHER_STAT_MULTI_COLLISIONS:
+ *val = hw_stp->s.OutOneRetranCount +
+ hw_stp->s.OutMoreRetranCount;
+ break;
+
+ case ETHER_STAT_DEFER_XMTS:
+ *val = hw_stp->s.OutExDecCount;
+ break;
+
+ case ETHER_STAT_TX_LATE_COLLISIONS:
+ *val = hw_stp->s.OutColCount;
+ break;
+
+ case ETHER_STAT_EX_COLLISIONS:
+ *val = hw_stp->s.OutRetryCount;
+ break;
+
+ case ETHER_STAT_CARRIER_ERRORS:
+ *val = hw_stp->s.OutLOCCount;
+ break;
+
+ case ETHER_STAT_TOOLONG_ERRORS:
+ *val = hw_stp->s.InOversizeErrCount;
+ break;
+
+ case ETHER_STAT_XCVR_ADDR:
+ *val = ngep->phy_xmii_addr;
+ break;
+
+ case ETHER_STAT_XCVR_ID:
+ *val = ngep->phy_id;
+ break;
+
+ case ETHER_STAT_XCVR_INUSE:
+ *val = XCVR_1000T;
+ break;
+
+ case ETHER_STAT_CAP_1000FDX:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_CAP_1000HDX:
+ *val = 0;
+ break;
+
+ case ETHER_STAT_CAP_100FDX:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_CAP_100HDX:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_CAP_10FDX:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_CAP_10HDX:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_CAP_ASMPAUSE:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_CAP_PAUSE:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_CAP_AUTONEG:
+ *val = 1;
+ break;
+
+ case ETHER_STAT_ADV_CAP_1000FDX:
+ *val = ngep->param_adv_1000fdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_1000HDX:
+ *val = ngep->param_adv_1000hdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_100FDX:
+ *val = ngep->param_adv_100fdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_100HDX:
+ *val = ngep->param_adv_100hdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_10FDX:
+ *val = ngep->param_adv_10fdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_10HDX:
+ *val = ngep->param_adv_10hdx;
+ break;
+
+ case ETHER_STAT_ADV_CAP_ASMPAUSE:
+ *val = ngep->param_adv_asym_pause;
+ break;
+
+ case ETHER_STAT_ADV_CAP_PAUSE:
+ *val = ngep->param_adv_pause;
+ break;
+
+ case ETHER_STAT_ADV_CAP_AUTONEG:
+ *val = ngep->param_adv_autoneg;
+ break;
+
+ case ETHER_STAT_LP_CAP_1000FDX:
+ *val = ngep->param_lp_1000fdx;
+ break;
+
+ case ETHER_STAT_LP_CAP_1000HDX:
+ *val = ngep->param_lp_1000hdx;
+ break;
+
+ case ETHER_STAT_LP_CAP_100FDX:
+ *val = ngep->param_lp_100fdx;
+ break;
+
+ case ETHER_STAT_LP_CAP_100HDX:
+ *val = ngep->param_lp_100hdx;
+ break;
+
+ case ETHER_STAT_LP_CAP_10FDX:
+ *val = ngep->param_lp_10fdx;
+ break;
+
+ case ETHER_STAT_LP_CAP_10HDX:
+ *val = ngep->param_lp_10hdx;
+ break;
+
+ case ETHER_STAT_LP_CAP_ASMPAUSE:
+ *val = ngep->param_lp_asym_pause;
+ break;
+
+ case ETHER_STAT_LP_CAP_PAUSE:
+ *val = ngep->param_lp_pause;
+ break;
+
+ case ETHER_STAT_LP_CAP_AUTONEG:
+ *val = ngep->param_lp_autoneg;
+ break;
+
+ case ETHER_STAT_LINK_ASMPAUSE:
+ *val = ngep->param_adv_asym_pause &&
+ ngep->param_lp_asym_pause &&
+ ngep->param_adv_pause != ngep->param_lp_pause;
+ break;
+
+ case ETHER_STAT_LINK_PAUSE:
+ *val = ngep->param_link_rx_pause;
+ break;
+
+ case ETHER_STAT_LINK_AUTONEG:
+ *val = ngep->param_link_autoneg;
+ break;
+
+ case ETHER_STAT_LINK_DUPLEX:
+ *val = ngep->param_link_duplex;
+ break;
+
+ default:
+ return (ENOTSUP);
+ }
+
+ return (0);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_log.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+
+
+/*
+ * Global variable for default debug flags
+ */
+uint32_t nge_debug;
+
+/*
+ * Global mutex used by logging routines below
+ */
+kmutex_t nge_log_mutex[1];
+
+/*
+ * Static data used by logging routines; protected by <nge_log_mutex>
+ */
+static struct {
+ const char *who;
+ const char *fmt;
+ int level;
+} nge_log_data;
+
+
+/*
+ * Backend print routine for all the routines below
+ */
+static void
+nge_vprt(const char *fmt, va_list args)
+{
+ char buf[128];
+
+ ASSERT(mutex_owned(nge_log_mutex));
+
+ (void) vsnprintf(buf, sizeof (buf), fmt, args);
+ cmn_err(nge_log_data.level, nge_log_data.fmt, nge_log_data.who, buf);
+}
+
+
+/*
+ * Log a run-time event (CE_NOTE, log only)
+ */
+void
+nge_log(nge_t *ngep, const char *fmt, ...)
+{
+ va_list args;
+
+ mutex_enter(nge_log_mutex);
+ nge_log_data.who = ngep->ifname;
+ nge_log_data.fmt = "!%s: %s";
+ nge_log_data.level = CE_NOTE;
+
+ va_start(args, fmt);
+ nge_vprt(fmt, args);
+ va_end(args);
+
+ mutex_exit(nge_log_mutex);
+}
+
+/*
+ * Log a run-time problem (CE_WARN, log only)
+ */
+void
+nge_problem(nge_t *ngep, const char *fmt, ...)
+{
+ va_list args;
+
+ mutex_enter(nge_log_mutex);
+ nge_log_data.who = ngep->ifname;
+ nge_log_data.fmt = "!%s: %s";
+ nge_log_data.level = CE_WARN;
+
+ va_start(args, fmt);
+ nge_vprt(fmt, args);
+ va_end(args);
+
+ mutex_exit(nge_log_mutex);
+}
+
+/*
+ * Log a programming error (CE_WARN, log only)
+ */
+void
+nge_error(nge_t *ngep, const char *fmt, ...)
+{
+ va_list args;
+
+ mutex_enter(nge_log_mutex);
+ nge_log_data.who = ngep->ifname;
+ nge_log_data.fmt = "!%s: %s";
+ nge_log_data.level = CE_WARN;
+
+ va_start(args, fmt);
+ nge_vprt(fmt, args);
+ va_end(args);
+
+ mutex_exit(nge_log_mutex);
+}
+
+static const char *
+nge_class_string(uint8_t class_id)
+{
+ const char *msg;
+ switch (class_id) {
+ default:
+ msg = "none";
+ break;
+
+ case NGE_HW_ERR:
+ msg = "Hardware fatal error. Hardware will be reset";
+ break;
+
+ case NGE_HW_LINK:
+ msg = "the link is broken, please check the connection";
+ break;
+
+ case NGE_HW_BM:
+ msg = "Reset the hardware buffer management fails,"
+ "need to power off/power on system. It is hardware bug";
+ break;
+
+ case NGE_HW_RCHAN:
+ msg = "Reset rx's channel fails. Need to power off/power"
+ "on system";
+ break;
+
+ case NGE_HW_TCHAN:
+ msg = "Reset rx's channel fails. Need to power off/power"
+ "on system";
+ break;
+
+ case NGE_HW_ROM:
+ msg = "Unlock eeprom lock fails.";
+ break;
+
+ case NGE_SW_PROBLEM_ID:
+ msg = "Refill rx's bd fails";
+ break;
+ }
+ return (msg);
+}
+
+void
+nge_report(nge_t *ngep, uint8_t error_id)
+{
+ const char *err_msg;
+
+ err_msg = nge_class_string(error_id);
+ nge_error(ngep, err_msg);
+
+}
+static void
+nge_prt(const char *fmt, ...)
+{
+ va_list args;
+
+ ASSERT(mutex_owned(nge_log_mutex));
+
+ va_start(args, fmt);
+ nge_vprt(fmt, args);
+ va_end(args);
+
+ mutex_exit(nge_log_mutex);
+}
+
+void
+(*nge_gdb(void))(const char *fmt, ...)
+{
+ mutex_enter(nge_log_mutex);
+
+ nge_log_data.who = "nge";
+ nge_log_data.fmt = "?%s: %s\n";
+ nge_log_data.level = CE_CONT;
+
+ return (nge_prt);
+}
+
+void
+(*nge_db(nge_t *ngep))(const char *fmt, ...)
+{
+ mutex_enter(nge_log_mutex);
+
+ nge_log_data.who = ngep->ifname;
+ nge_log_data.fmt = "?%s: %s\n";
+ nge_log_data.level = CE_CONT;
+
+ return (nge_prt);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_main.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,2286 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+
+/*
+ * Describes the chip's DMA engine
+ */
+
+static ddi_dma_attr_t hot_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr version */
+ 0x0000000000000000ull, /* dma_attr_addr_lo */
+ 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */
+ 0x000000007FFFFFFFull, /* dma_attr_count_max */
+ 0x0000000000000010ull, /* dma_attr_align */
+ 0x00000FFF, /* dma_attr_burstsizes */
+ 0x00000001, /* dma_attr_minxfer */
+ 0x000000000000FFFFull, /* dma_attr_maxxfer */
+ 0x000000FFFFFFFFFFull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 0x00000001, /* dma_attr_granular */
+ 0
+};
+
+static ddi_dma_attr_t hot_tx_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr version */
+ 0x0000000000000000ull, /* dma_attr_addr_lo */
+ 0x000000FFFFFFFFFFull, /* dma_attr_addr_hi */
+ 0x0000000000003FFFull, /* dma_attr_count_max */
+ 0x0000000000000010ull, /* dma_attr_align */
+ 0x00000FFF, /* dma_attr_burstsizes */
+ 0x00000001, /* dma_attr_minxfer */
+ 0x0000000000003FFFull, /* dma_attr_maxxfer */
+ 0x000000FFFFFFFFFFull, /* dma_attr_seg */
+ NGE_MAX_COOKIES, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0
+};
+
+static ddi_dma_attr_t sum_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr version */
+ 0x0000000000000000ull, /* dma_attr_addr_lo */
+ 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */
+ 0x000000007FFFFFFFull, /* dma_attr_count_max */
+ 0x0000000000000010ull, /* dma_attr_align */
+ 0x00000FFF, /* dma_attr_burstsizes */
+ 0x00000001, /* dma_attr_minxfer */
+ 0x000000000000FFFFull, /* dma_attr_maxxfer */
+ 0x00000000FFFFFFFFull, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen */
+ 0x00000001, /* dma_attr_granular */
+ 0
+};
+
+static ddi_dma_attr_t sum_tx_dma_attr = {
+ DMA_ATTR_V0, /* dma_attr version */
+ 0x0000000000000000ull, /* dma_attr_addr_lo */
+ 0x00000000FFFFFFFFull, /* dma_attr_addr_hi */
+ 0x0000000000003FFFull, /* dma_attr_count_max */
+ 0x0000000000000010ull, /* dma_attr_align */
+ 0x00000FFF, /* dma_attr_burstsizes */
+ 0x00000001, /* dma_attr_minxfer */
+ 0x0000000000003FFFull, /* dma_attr_maxxfer */
+ 0x00000000FFFFFFFFull, /* dma_attr_seg */
+ NGE_MAX_COOKIES, /* dma_attr_sgllen */
+ 1, /* dma_attr_granular */
+ 0
+};
+
+/*
+ * DMA access attributes for data.
+ */
+ddi_device_acc_attr_t nge_data_accattr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_DEFAULT_ACC
+};
+
+/*
+ * DMA access attributes for descriptors.
+ */
+static ddi_device_acc_attr_t nge_desc_accattr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_DEFAULT_ACC
+};
+
+/*
+ * PIO access attributes for registers
+ */
+static ddi_device_acc_attr_t nge_reg_accattr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_LE_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_DEFAULT_ACC
+};
+
+/*
+ * NIC DESC MODE 2
+ */
+
+static const nge_desc_attr_t nge_sum_desc = {
+
+ sizeof (sum_rx_bd),
+ sizeof (sum_tx_bd),
+ &sum_dma_attr,
+ &sum_tx_dma_attr,
+ nge_sum_rxd_fill,
+ nge_sum_rxd_check,
+ nge_sum_txd_fill,
+ nge_sum_txd_check,
+};
+
+/*
+ * NIC DESC MODE 3
+ */
+
+static const nge_desc_attr_t nge_hot_desc = {
+
+ sizeof (hot_rx_bd),
+ sizeof (hot_tx_bd),
+ &hot_dma_attr,
+ &hot_tx_dma_attr,
+ nge_hot_rxd_fill,
+ nge_hot_rxd_check,
+ nge_hot_txd_fill,
+ nge_hot_txd_check,
+};
+
+static char nge_ident[] = "nVidia 1Gb Ethernet %I%";
+static char clsize_propname[] = "cache-line-size";
+static char latency_propname[] = "latency-timer";
+static char debug_propname[] = "nge-debug-flags";
+static char rx_data_hw[] = "rx-data-hw";
+static char rx_prd_lw[] = "rx-prd-lw";
+static char rx_prd_hw[] = "rx-prd-hw";
+static char sw_intr_intv[] = "sw-intr-intvl";
+static char nge_desc_mode[] = "desc-mode";
+static char default_mtu[] = "default_mtu";
+static char low_memory_mode[] = "minimal-memory-usage";
+extern kmutex_t nge_log_mutex[1];
+
+static int nge_m_start(void *);
+static void nge_m_stop(void *);
+static int nge_m_promisc(void *, boolean_t);
+static int nge_m_multicst(void *, boolean_t, const uint8_t *);
+static int nge_m_unicst(void *, const uint8_t *);
+static void nge_m_resources(void *);
+static void nge_m_ioctl(void *, queue_t *, mblk_t *);
+static boolean_t nge_m_getcapab(void *, mac_capab_t, void *);
+
+#define NGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
+
+static mac_callbacks_t nge_m_callbacks = {
+ NGE_M_CALLBACK_FLAGS,
+ nge_m_stat,
+ nge_m_start,
+ nge_m_stop,
+ nge_m_promisc,
+ nge_m_multicst,
+ nge_m_unicst,
+ nge_m_tx,
+ nge_m_resources,
+ nge_m_ioctl,
+ nge_m_getcapab
+};
+
+static int nge_add_intrs(nge_t *, int);
+static void nge_rem_intrs(nge_t *);
+static int nge_register_intrs_and_init_locks(nge_t *);
+
+/*
+ * NGE MSI tunable:
+ */
+boolean_t nge_enable_msi = B_FALSE;
+
+static enum ioc_reply
+nge_set_loop_mode(nge_t *ngep, uint32_t mode)
+{
+ /*
+ * If the mode isn't being changed, there's nothing to do ...
+ */
+ if (mode == ngep->param_loop_mode)
+ return (IOC_ACK);
+
+ /*
+ * Validate the requested mode and prepare a suitable message
+ * to explain the link down/up cycle that the change will
+ * probably induce ...
+ */
+ switch (mode) {
+ default:
+ return (IOC_INVAL);
+
+ case NGE_LOOP_NONE:
+ case NGE_LOOP_EXTERNAL_100:
+ case NGE_LOOP_EXTERNAL_10:
+ case NGE_LOOP_INTERNAL_PHY:
+ break;
+ }
+
+ /*
+ * All OK; tell the caller to reprogram
+ * the PHY and/or MAC for the new mode ...
+ */
+ ngep->param_loop_mode = mode;
+ return (IOC_RESTART_ACK);
+}
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_INIT
+
+/*
+ * Utility routine to carve a slice off a chunk of allocated memory,
+ * updating the chunk descriptor accordingly. The size of the slice
+ * is given by the product of the <qty> and <size> parameters.
+ */
+void
+nge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
+ uint32_t qty, uint32_t size)
+{
+ size_t totsize;
+
+ totsize = qty*size;
+ ASSERT(size > 0);
+ ASSERT(totsize <= chunk->alength);
+
+ *slice = *chunk;
+ slice->nslots = qty;
+ slice->size = size;
+ slice->alength = totsize;
+
+ chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
+ chunk->alength -= totsize;
+ chunk->offset += totsize;
+ chunk->cookie.dmac_laddress += totsize;
+ chunk->cookie.dmac_size -= totsize;
+}
+
+/*
+ * Allocate an area of memory and a DMA handle for accessing it
+ */
+int
+nge_alloc_dma_mem(nge_t *ngep, size_t memsize, ddi_device_acc_attr_t *attr_p,
+ uint_t dma_flags, dma_area_t *dma_p)
+{
+ int err;
+ caddr_t va;
+
+ NGE_TRACE(("nge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
+ (void *)ngep, memsize, attr_p, dma_flags, dma_p));
+ /*
+ * Allocate handle
+ */
+ err = ddi_dma_alloc_handle(ngep->devinfo, ngep->desc_attr.dma_attr,
+ DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
+ if (err != DDI_SUCCESS)
+ goto fail;
+
+ /*
+ * Allocate memory
+ */
+ err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
+ dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
+ DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, &dma_p->acc_hdl);
+ if (err != DDI_SUCCESS)
+ goto fail;
+
+ /*
+ * Bind the two together
+ */
+ dma_p->mem_va = va;
+ err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
+ va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
+ &dma_p->cookie, &dma_p->ncookies);
+
+ if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
+ goto fail;
+
+ dma_p->nslots = ~0U;
+ dma_p->size = ~0U;
+ dma_p->offset = 0;
+
+ return (DDI_SUCCESS);
+
+fail:
+ nge_free_dma_mem(dma_p);
+ NGE_DEBUG(("nge_alloc_dma_mem: fail to alloc dma memory!"));
+
+ return (DDI_FAILURE);
+}
+
+/*
+ * Free one allocated area of DMAable memory
+ */
+void
+nge_free_dma_mem(dma_area_t *dma_p)
+{
+ if (dma_p->dma_hdl != NULL) {
+ if (dma_p->ncookies) {
+ (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
+ dma_p->ncookies = 0;
+ }
+ }
+ if (dma_p->acc_hdl != NULL) {
+ ddi_dma_mem_free(&dma_p->acc_hdl);
+ dma_p->acc_hdl = NULL;
+ }
+ if (dma_p->dma_hdl != NULL) {
+ ddi_dma_free_handle(&dma_p->dma_hdl);
+ dma_p->dma_hdl = NULL;
+ }
+}
+
+#define ALLOC_TX_BUF 0x1
+#define ALLOC_TX_DESC 0x2
+#define ALLOC_RX_DESC 0x4
+
+int
+nge_alloc_bufs(nge_t *ngep)
+{
+ int err;
+ int split;
+ int progress;
+ size_t txbuffsize;
+ size_t rxdescsize;
+ size_t txdescsize;
+
+ txbuffsize = ngep->tx_desc * ngep->buf_size;
+ rxdescsize = ngep->rx_desc;
+ txdescsize = ngep->tx_desc;
+ rxdescsize *= ngep->desc_attr.rxd_size;
+ txdescsize *= ngep->desc_attr.txd_size;
+ progress = 0;
+
+ NGE_TRACE(("nge_alloc_bufs($%p)", (void *)ngep));
+ /*
+ * Allocate memory & handles for TX buffers
+ */
+ ASSERT((txbuffsize % ngep->nge_split) == 0);
+ for (split = 0; split < ngep->nge_split; ++split) {
+ err = nge_alloc_dma_mem(ngep, txbuffsize/ngep->nge_split,
+ &nge_data_accattr, DDI_DMA_WRITE | NGE_DMA_MODE,
+ &ngep->send->buf[split]);
+ if (err != DDI_SUCCESS)
+ goto fail;
+ }
+
+ progress |= ALLOC_TX_BUF;
+
+ /*
+ * Allocate memory & handles for receive return rings and
+ * buffer (producer) descriptor rings
+ */
+ err = nge_alloc_dma_mem(ngep, rxdescsize, &nge_desc_accattr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->recv->desc);
+ if (err != DDI_SUCCESS)
+ goto fail;
+ progress |= ALLOC_RX_DESC;
+
+ /*
+ * Allocate memory & handles for TX descriptor rings,
+ */
+ err = nge_alloc_dma_mem(ngep, txdescsize, &nge_desc_accattr,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &ngep->send->desc);
+ if (err != DDI_SUCCESS)
+ goto fail;
+ return (DDI_SUCCESS);
+
+fail:
+ if (progress & ALLOC_RX_DESC)
+ nge_free_dma_mem(&ngep->recv->desc);
+ if (progress & ALLOC_TX_BUF) {
+ for (split = 0; split < ngep->nge_split; ++split)
+ nge_free_dma_mem(&ngep->send->buf[split]);
+ }
+
+ return (DDI_FAILURE);
+}
+
+/*
+ * This routine frees the transmit and receive buffers and descriptors.
+ * Make sure the chip is stopped before calling it!
+ */
+void
+nge_free_bufs(nge_t *ngep)
+{
+ int split;
+
+ NGE_TRACE(("nge_free_bufs($%p)", (void *)ngep));
+
+ nge_free_dma_mem(&ngep->recv->desc);
+ nge_free_dma_mem(&ngep->send->desc);
+
+ for (split = 0; split < ngep->nge_split; ++split)
+ nge_free_dma_mem(&ngep->send->buf[split]);
+}
+
+/*
+ * Clean up initialisation done above before the memory is freed
+ */
+static void
+nge_fini_send_ring(nge_t *ngep)
+{
+ uint32_t slot;
+ size_t dmah_num;
+ send_ring_t *srp;
+ sw_tx_sbd_t *ssbdp;
+
+ srp = ngep->send;
+ ssbdp = srp->sw_sbds;
+
+ NGE_TRACE(("nge_fini_send_ring($%p)", (void *)ngep));
+
+ dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
+
+ for (slot = 0; slot < dmah_num; ++slot) {
+ if (srp->dmahndl[slot].hndl) {
+ (void) ddi_dma_unbind_handle(srp->dmahndl[slot].hndl);
+ ddi_dma_free_handle(&srp->dmahndl[slot].hndl);
+ srp->dmahndl[slot].hndl = NULL;
+ srp->dmahndl[slot].next = NULL;
+ }
+ }
+
+ srp->dmah_free.head = NULL;
+ srp->dmah_free.tail = NULL;
+
+ kmem_free(ssbdp, srp->desc.nslots*sizeof (*ssbdp));
+
+}
+
+/*
+ * Initialise the specified Send Ring, using the information in the
+ * <dma_area> descriptors that it contains to set up all the other
+ * fields. This routine should be called only once for each ring.
+ */
+static int
+nge_init_send_ring(nge_t *ngep)
+{
+ size_t dmah_num;
+ uint32_t nslots;
+ uint32_t err;
+ uint32_t slot;
+ uint32_t split;
+ send_ring_t *srp;
+ sw_tx_sbd_t *ssbdp;
+ dma_area_t desc;
+ dma_area_t pbuf;
+
+ srp = ngep->send;
+ srp->desc.nslots = ngep->tx_desc;
+ nslots = srp->desc.nslots;
+
+ NGE_TRACE(("nge_init_send_ring($%p)", (void *)ngep));
+ /*
+ * Other one-off initialisation of per-ring data
+ */
+ srp->ngep = ngep;
+
+ /*
+ * Allocate the array of s/w Send Buffer Descriptors
+ */
+ ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
+ srp->sw_sbds = ssbdp;
+
+ /*
+ * Now initialise each array element once and for all
+ */
+ desc = srp->desc;
+ for (split = 0; split < ngep->nge_split; ++split) {
+ pbuf = srp->buf[split];
+ for (slot = 0; slot < nslots/ngep->nge_split; ++ssbdp, ++slot) {
+ nge_slice_chunk(&ssbdp->desc, &desc, 1,
+ ngep->desc_attr.txd_size);
+ nge_slice_chunk(&ssbdp->pbuf, &pbuf, 1,
+ ngep->buf_size);
+ }
+ ASSERT(pbuf.alength == 0);
+ }
+ ASSERT(desc.alength == 0);
+
+ dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
+
+ /* preallocate dma handles for tx buffer */
+ for (slot = 0; slot < dmah_num; ++slot) {
+
+ err = ddi_dma_alloc_handle(ngep->devinfo,
+ ngep->desc_attr.tx_dma_attr, DDI_DMA_DONTWAIT,
+ NULL, &srp->dmahndl[slot].hndl);
+
+ if (err != DDI_SUCCESS) {
+ nge_fini_send_ring(ngep);
+ nge_error(ngep,
+ "nge_init_send_ring: alloc dma handle fails");
+ return (DDI_FAILURE);
+ }
+ srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
+ }
+
+ srp->dmah_free.head = srp->dmahndl;
+ srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
+ srp->dmah_free.tail->next = NULL;
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Intialize the tx recycle pointer and tx sending pointer of tx ring
+ * and set the type of tx's data descriptor by default.
+ */
+static void
+nge_reinit_send_ring(nge_t *ngep)
+{
+ size_t dmah_num;
+ uint32_t slot;
+ send_ring_t *srp;
+ sw_tx_sbd_t *ssbdp;
+
+ srp = ngep->send;
+
+ /*
+ * Reinitialise control variables ...
+ */
+
+ srp->tx_hwmark = NGE_DESC_MIN;
+ srp->tx_lwmark = NGE_DESC_MIN;
+
+ srp->tx_next = 0;
+ srp->tx_free = srp->desc.nslots;
+ srp->tc_next = 0;
+
+ dmah_num = sizeof (srp->dmahndl) / sizeof (srp->dmahndl[0]);
+
+ for (slot = 0; slot - dmah_num != 0; ++slot)
+ srp->dmahndl[slot].next = srp->dmahndl + slot + 1;
+
+ srp->dmah_free.head = srp->dmahndl;
+ srp->dmah_free.tail = srp->dmahndl + dmah_num - 1;
+ srp->dmah_free.tail->next = NULL;
+
+ /*
+ * Zero and sync all the h/w Send Buffer Descriptors
+ */
+ for (slot = 0; slot < srp->desc.nslots; ++slot) {
+ ssbdp = &srp->sw_sbds[slot];
+ ssbdp->flags = HOST_OWN;
+ }
+
+ DMA_ZERO(srp->desc);
+ DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
+}
+
+/*
+ * Initialize the slot number of rx's ring
+ */
+static void
+nge_init_recv_ring(nge_t *ngep)
+{
+ recv_ring_t *rrp;
+
+ rrp = ngep->recv;
+ rrp->desc.nslots = ngep->rx_desc;
+ rrp->ngep = ngep;
+}
+
+/*
+ * Intialize the rx recycle pointer and rx sending pointer of rx ring
+ */
+static void
+nge_reinit_recv_ring(nge_t *ngep)
+{
+ recv_ring_t *rrp;
+
+ rrp = ngep->recv;
+
+ /*
+ * Reinitialise control variables ...
+ */
+ rrp->prod_index = 0;
+ /*
+ * Zero and sync all the h/w Send Buffer Descriptors
+ */
+ DMA_ZERO(rrp->desc);
+ DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORDEV);
+}
+
+/*
+ * Clean up initialisation done above before the memory is freed
+ */
+static void
+nge_fini_buff_ring(nge_t *ngep)
+{
+ uint32_t i;
+ buff_ring_t *brp;
+ dma_area_t *bufp;
+ sw_rx_sbd_t *bsbdp;
+
+ brp = ngep->buff;
+ bsbdp = brp->sw_rbds;
+
+ NGE_DEBUG(("nge_fini_buff_ring($%p)", (void *)ngep));
+
+ mutex_enter(brp->recycle_lock);
+ brp->buf_sign++;
+ mutex_exit(brp->recycle_lock);
+ for (i = 0; i < ngep->rx_desc; i++, ++bsbdp) {
+ if (bsbdp->bufp) {
+ if (bsbdp->bufp->mp)
+ freemsg(bsbdp->bufp->mp);
+ nge_free_dma_mem(bsbdp->bufp);
+ kmem_free(bsbdp->bufp, sizeof (dma_area_t));
+ bsbdp->bufp = NULL;
+ }
+ }
+ while (brp->free_list != NULL) {
+ bufp = brp->free_list;
+ brp->free_list = bufp->next;
+ bufp->next = NULL;
+ if (bufp->mp)
+ freemsg(bufp->mp);
+ nge_free_dma_mem(bufp);
+ kmem_free(bufp, sizeof (dma_area_t));
+ }
+ while (brp->recycle_list != NULL) {
+ bufp = brp->recycle_list;
+ brp->recycle_list = bufp->next;
+ bufp->next = NULL;
+ if (bufp->mp)
+ freemsg(bufp->mp);
+ nge_free_dma_mem(bufp);
+ kmem_free(bufp, sizeof (dma_area_t));
+ }
+
+
+ kmem_free(brp->sw_rbds, (ngep->rx_desc * sizeof (*bsbdp)));
+ brp->sw_rbds = NULL;
+}
+
+/*
+ * Intialize the Rx's data ring and free ring
+ */
+static int
+nge_init_buff_ring(nge_t *ngep)
+{
+ uint32_t err;
+ uint32_t slot;
+ uint32_t nslots_buff;
+ uint32_t nslots_recv;
+ buff_ring_t *brp;
+ recv_ring_t *rrp;
+ dma_area_t desc;
+ dma_area_t *bufp;
+ sw_rx_sbd_t *bsbdp;
+
+ rrp = ngep->recv;
+ brp = ngep->buff;
+ brp->nslots = ngep->rx_buf;
+ brp->rx_bcopy = B_FALSE;
+ nslots_recv = rrp->desc.nslots;
+ nslots_buff = brp->nslots;
+ brp->ngep = ngep;
+
+ NGE_TRACE(("nge_init_buff_ring($%p)", (void *)ngep));
+
+ /*
+ * Allocate the array of s/w Recv Buffer Descriptors
+ */
+ bsbdp = kmem_zalloc(nslots_recv *sizeof (*bsbdp), KM_SLEEP);
+ brp->sw_rbds = bsbdp;
+ brp->free_list = NULL;
+ brp->recycle_list = NULL;
+ for (slot = 0; slot < nslots_buff; ++slot) {
+ bufp = kmem_zalloc(sizeof (dma_area_t), KM_SLEEP);
+ err = nge_alloc_dma_mem(ngep, (ngep->buf_size
+ + NGE_HEADROOM),
+ &nge_data_accattr, DDI_DMA_READ | NGE_DMA_MODE, bufp);
+ if (err != DDI_SUCCESS) {
+ kmem_free(bufp, sizeof (dma_area_t));
+ return (DDI_FAILURE);
+ }
+
+ bufp->alength -= NGE_HEADROOM;
+ bufp->offset += NGE_HEADROOM;
+ bufp->private = (caddr_t)ngep;
+ bufp->rx_recycle.free_func = nge_recv_recycle;
+ bufp->rx_recycle.free_arg = (caddr_t)bufp;
+ bufp->signature = brp->buf_sign;
+ bufp->rx_delivered = B_FALSE;
+ bufp->mp = desballoc(DMA_VPTR(*bufp),
+ ngep->buf_size + NGE_HEADROOM,
+ 0, &bufp->rx_recycle);
+
+ if (bufp->mp == NULL) {
+ return (DDI_FAILURE);
+ }
+ bufp->next = brp->free_list;
+ brp->free_list = bufp;
+ }
+
+ /*
+ * Now initialise each array element once and for all
+ */
+ desc = rrp->desc;
+ for (slot = 0; slot < nslots_recv; ++slot, ++bsbdp) {
+ nge_slice_chunk(&bsbdp->desc, &desc, 1,
+ ngep->desc_attr.rxd_size);
+ bufp = brp->free_list;
+ brp->free_list = bufp->next;
+ bsbdp->bufp = bufp;
+ bsbdp->flags = CONTROLER_OWN;
+ bufp->next = NULL;
+ }
+
+ ASSERT(desc.alength == 0);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Fill the host address of data in rx' descriptor
+ * and initialize free pointers of rx free ring
+ */
+static int
+nge_reinit_buff_ring(nge_t *ngep)
+{
+ uint32_t slot;
+ uint32_t nslots_recv;
+ buff_ring_t *brp;
+ recv_ring_t *rrp;
+ sw_rx_sbd_t *bsbdp;
+ void *hw_bd_p;
+
+ brp = ngep->buff;
+ rrp = ngep->recv;
+ bsbdp = brp->sw_rbds;
+ nslots_recv = rrp->desc.nslots;
+ for (slot = 0; slot < nslots_recv; ++bsbdp, ++slot) {
+ hw_bd_p = DMA_VPTR(bsbdp->desc);
+ /*
+ * There is a scenario: When the traffic of small tcp
+ * packet is heavy, suspending the tcp traffic will
+ * cause the preallocated buffers for rx not to be
+ * released in time by tcp taffic and cause rx's buffer
+ * pointers not to be refilled in time.
+ *
+ * At this point, if we reinitialize the driver, the bufp
+ * pointer for rx's traffic will be NULL.
+ * So the result of the reinitializion fails.
+ */
+ if (bsbdp->bufp == NULL)
+ return (DDI_FAILURE);
+
+ ngep->desc_attr.rxd_fill(hw_bd_p, &bsbdp->bufp->cookie,
+ bsbdp->bufp->alength);
+ }
+ return (DDI_SUCCESS);
+}
+
+static void
+nge_init_ring_param_lock(nge_t *ngep)
+{
+ buff_ring_t *brp;
+ send_ring_t *srp;
+
+ srp = ngep->send;
+ brp = ngep->buff;
+
+ /* Init the locks for send ring */
+ mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(ngep->intr_pri));
+ mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(ngep->intr_pri));
+ mutex_init(&srp->dmah_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(ngep->intr_pri));
+
+ /* Init parameters of buffer ring */
+ brp->free_list = NULL;
+ brp->recycle_list = NULL;
+ brp->rx_hold = 0;
+ brp->buf_sign = 0;
+
+ /* Init recycle list lock */
+ mutex_init(brp->recycle_lock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(ngep->intr_pri));
+}
+
+int
+nge_init_rings(nge_t *ngep)
+{
+ uint32_t err;
+
+ err = nge_init_send_ring(ngep);
+ if (err != DDI_SUCCESS) {
+ return (err);
+ }
+ nge_init_recv_ring(ngep);
+
+ err = nge_init_buff_ring(ngep);
+ if (err != DDI_SUCCESS) {
+ nge_fini_send_ring(ngep);
+ return (DDI_FAILURE);
+ }
+
+ return (err);
+}
+
+static int
+nge_reinit_ring(nge_t *ngep)
+{
+ int err;
+
+ nge_reinit_recv_ring(ngep);
+ nge_reinit_send_ring(ngep);
+ err = nge_reinit_buff_ring(ngep);
+ return (err);
+}
+
+
+void
+nge_fini_rings(nge_t *ngep)
+{
+ /*
+ * For receive ring, nothing need to be finished.
+ * So only finish buffer ring and send ring here.
+ */
+ nge_fini_buff_ring(ngep);
+ nge_fini_send_ring(ngep);
+}
+
+/*
+ * Loopback ioctl code
+ */
+
+static lb_property_t loopmodes[] = {
+ { normal, "normal", NGE_LOOP_NONE },
+ { external, "100Mbps", NGE_LOOP_EXTERNAL_100 },
+ { external, "10Mbps", NGE_LOOP_EXTERNAL_10 },
+ { internal, "PHY", NGE_LOOP_INTERNAL_PHY },
+};
+
+enum ioc_reply
+nge_loop_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
+{
+ int cmd;
+ uint32_t *lbmp;
+ lb_info_sz_t *lbsp;
+ lb_property_t *lbpp;
+
+ /*
+ * Validate format of ioctl
+ */
+ if (mp->b_cont == NULL)
+ return (IOC_INVAL);
+
+ cmd = iocp->ioc_cmd;
+
+ switch (cmd) {
+ default:
+ return (IOC_INVAL);
+
+ case LB_GET_INFO_SIZE:
+ if (iocp->ioc_count != sizeof (lb_info_sz_t))
+ return (IOC_INVAL);
+ lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
+ *lbsp = sizeof (loopmodes);
+ return (IOC_REPLY);
+
+ case LB_GET_INFO:
+ if (iocp->ioc_count != sizeof (loopmodes))
+ return (IOC_INVAL);
+ lbpp = (lb_property_t *)mp->b_cont->b_rptr;
+ bcopy(loopmodes, lbpp, sizeof (loopmodes));
+ return (IOC_REPLY);
+
+ case LB_GET_MODE:
+ if (iocp->ioc_count != sizeof (uint32_t))
+ return (IOC_INVAL);
+ lbmp = (uint32_t *)mp->b_cont->b_rptr;
+ *lbmp = ngep->param_loop_mode;
+ return (IOC_REPLY);
+
+ case LB_SET_MODE:
+ if (iocp->ioc_count != sizeof (uint32_t))
+ return (IOC_INVAL);
+ lbmp = (uint32_t *)mp->b_cont->b_rptr;
+ return (nge_set_loop_mode(ngep, *lbmp));
+ }
+}
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_NEMO
+
+
+static void
+nge_check_desc_prop(nge_t *ngep)
+{
+ if (ngep->desc_mode != DESC_HOT && ngep->desc_mode != DESC_OFFLOAD)
+ ngep->desc_mode = DESC_HOT;
+
+ if (ngep->desc_mode == DESC_OFFLOAD) {
+
+ ngep->desc_attr = nge_sum_desc;
+
+ } else if (ngep->desc_mode == DESC_HOT) {
+
+ ngep->desc_attr = nge_hot_desc;
+ }
+}
+
+/*
+ * nge_get_props -- get the parameters to tune the driver
+ */
+static void
+nge_get_props(nge_t *ngep)
+{
+ chip_info_t *infop;
+ dev_info_t *devinfo;
+ nge_dev_spec_param_t *dev_param_p;
+
+ devinfo = ngep->devinfo;
+ infop = (chip_info_t *)&ngep->chipinfo;
+ dev_param_p = &ngep->dev_spec_param;
+
+ infop->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, clsize_propname, 32);
+
+ infop->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, latency_propname, 64);
+ ngep->rx_datahwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, rx_data_hw, 0x20);
+ ngep->rx_prdlwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, rx_prd_lw, 0x4);
+ ngep->rx_prdhwm = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, rx_prd_hw, 0xc);
+
+ ngep->sw_intr_intv = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, sw_intr_intv, SWTR_ITC);
+ ngep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, debug_propname, NGE_DBG_CHIP);
+ ngep->desc_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, nge_desc_mode, dev_param_p->desc_type);
+ ngep->lowmem_mode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, low_memory_mode, 0);
+
+ if (dev_param_p->jumbo) {
+ ngep->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
+ DDI_PROP_DONTPASS, default_mtu, ETHERMTU);
+ } else
+ ngep->default_mtu = ETHERMTU;
+
+ if (ngep->default_mtu > ETHERMTU &&
+ ngep->default_mtu <= NGE_MTU_2500) {
+ ngep->buf_size = NGE_JB2500_BUFSZ;
+ ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
+ ngep->rx_desc = NGE_RECV_JB2500_SLOTS_DESC;
+ ngep->rx_buf = NGE_RECV_JB2500_SLOTS_DESC * 2;
+ ngep->nge_split = NGE_SPLIT_256;
+ } else if (ngep->default_mtu > NGE_MTU_2500 &&
+ ngep->default_mtu <= NGE_MTU_4500) {
+ ngep->buf_size = NGE_JB4500_BUFSZ;
+ ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
+ ngep->rx_desc = NGE_RECV_JB4500_SLOTS_DESC;
+ ngep->rx_buf = NGE_RECV_JB4500_SLOTS_DESC * 2;
+ ngep->nge_split = NGE_SPLIT_256;
+ } else if (ngep->default_mtu > NGE_MTU_4500 &&
+ ngep->default_mtu <= NGE_MAX_MTU) {
+ ngep->buf_size = NGE_JB9000_BUFSZ;
+ ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
+ ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
+ ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
+ ngep->nge_split = NGE_SPLIT_256;
+ } else if (ngep->default_mtu > NGE_MAX_MTU) {
+ ngep->default_mtu = NGE_MAX_MTU;
+ ngep->buf_size = NGE_JB9000_BUFSZ;
+ ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
+ ngep->rx_desc = NGE_RECV_JB9000_SLOTS_DESC;
+ ngep->rx_buf = NGE_RECV_JB9000_SLOTS_DESC * 2;
+ ngep->nge_split = NGE_SPLIT_256;
+ } else if (ngep->lowmem_mode != 0) {
+ ngep->default_mtu = ETHERMTU;
+ ngep->buf_size = NGE_STD_BUFSZ;
+ ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
+ ngep->rx_desc = NGE_RECV_LOWMEM_SLOTS_DESC;
+ ngep->rx_buf = NGE_RECV_LOWMEM_SLOTS_DESC * 2;
+ ngep->nge_split = NGE_SPLIT_32;
+ } else {
+ ngep->default_mtu = ETHERMTU;
+ ngep->buf_size = NGE_STD_BUFSZ;
+ ngep->tx_desc = dev_param_p->tx_desc_num;
+ ngep->rx_desc = dev_param_p->rx_desc_num;
+ ngep->rx_buf = dev_param_p->rx_desc_num * 2;
+ ngep->nge_split = dev_param_p->nge_split;
+ }
+
+ nge_check_desc_prop(ngep);
+}
+
+
+static int
+nge_reset(nge_t *ngep)
+{
+ int err;
+ send_ring_t *srp = ngep->send;
+
+ ASSERT(mutex_owned(ngep->genlock));
+ mutex_enter(srp->tc_lock);
+ mutex_enter(srp->tx_lock);
+
+ nge_tx_recycle_all(ngep);
+ err = nge_reinit_ring(ngep);
+ if (err == DDI_FAILURE) {
+ mutex_exit(srp->tx_lock);
+ mutex_exit(srp->tc_lock);
+ return (err);
+ }
+ err = nge_chip_reset(ngep);
+ mutex_exit(srp->tx_lock);
+ mutex_exit(srp->tc_lock);
+ if (err == DDI_FAILURE)
+ return (err);
+ ngep->watchdog = 0;
+ ngep->resched_needed = B_FALSE;
+ ngep->promisc = B_FALSE;
+ ngep->param_loop_mode = NGE_LOOP_NONE;
+ ngep->factotum_flag = 0;
+ ngep->resched_needed = 0;
+ ngep->nge_mac_state = NGE_MAC_RESET;
+ ngep->max_sdu = ngep->default_mtu + ETHER_HEAD_LEN + ETHERFCSL;
+ ngep->max_sdu += VTAG_SIZE;
+ ngep->rx_def = 0x16;
+ return (DDI_SUCCESS);
+}
+
+static void
+nge_m_stop(void *arg)
+{
+ nge_t *ngep = arg; /* private device info */
+
+ NGE_TRACE(("nge_m_stop($%p)", arg));
+
+ /*
+ * If suspended, adapter is already stopped, just return.
+ */
+ if (ngep->suspended) {
+ ASSERT(ngep->nge_mac_state == NGE_MAC_STOPPED);
+ return;
+ }
+
+ /*
+ * Just stop processing, then record new MAC state
+ */
+ mutex_enter(ngep->genlock);
+ rw_enter(ngep->rwlock, RW_WRITER);
+
+ (void) nge_chip_stop(ngep, B_FALSE);
+ /* Try to wait all the buffer post to upper layer be released */
+ ngep->nge_mac_state = NGE_MAC_STOPPED;
+
+ /* Recycle all the TX BD */
+ nge_tx_recycle_all(ngep);
+ nge_fini_rings(ngep);
+ nge_free_bufs(ngep);
+
+ NGE_DEBUG(("nge_m_stop($%p) done", arg));
+
+ rw_exit(ngep->rwlock);
+ mutex_exit(ngep->genlock);
+}
+
+static int
+nge_m_start(void *arg)
+{
+ int err;
+ nge_t *ngep = arg;
+
+ NGE_TRACE(("nge_m_start($%p)", arg));
+ /*
+ * If suspended, don't start, as the resume processing
+ * will recall this function with the suspended flag off.
+ */
+ if (ngep->suspended)
+ return (DDI_FAILURE);
+ /*
+ * Start processing and record new MAC state
+ */
+ mutex_enter(ngep->genlock);
+ rw_enter(ngep->rwlock, RW_WRITER);
+ err = nge_alloc_bufs(ngep);
+ if (err != DDI_SUCCESS) {
+ nge_problem(ngep, "nge_m_start: DMA buffer allocation failed");
+ goto finish;
+ }
+ err = nge_init_rings(ngep);
+ if (err != DDI_SUCCESS) {
+ nge_free_bufs(ngep);
+ nge_problem(ngep, "nge_init_rings() failed,err=%x");
+ goto finish;
+ }
+ err = nge_restart(ngep);
+
+ NGE_DEBUG(("nge_m_start($%p) done", arg));
+ finish:
+ rw_exit(ngep->rwlock);
+ mutex_exit(ngep->genlock);
+
+ return (err);
+}
+
+static int
+nge_m_unicst(void *arg, const uint8_t *macaddr)
+{
+ nge_t *ngep = arg;
+
+ NGE_TRACE(("nge_m_unicst($%p)", arg));
+ /*
+ * Remember the new current address in the driver state
+ * Sync the chip's idea of the address too ...
+ */
+ mutex_enter(ngep->genlock);
+
+ ethaddr_copy(macaddr, ngep->cur_uni_addr.addr);
+ ngep->cur_uni_addr.set = 1;
+
+ /*
+ * If we are suspended, we want to quit now, and not update
+ * the chip. Doing so might put it in a bad state, but the
+ * resume will get the unicast address installed.
+ */
+ if (ngep->suspended)
+ return (DDI_SUCCESS);
+
+ nge_chip_sync(ngep);
+
+ NGE_DEBUG(("nge_m_unicst($%p) done", arg));
+ mutex_exit(ngep->genlock);
+
+ return (0);
+}
+
+static int
+nge_m_promisc(void *arg, boolean_t on)
+{
+ nge_t *ngep = arg;
+
+ NGE_TRACE(("nge_m_promisc($%p)", arg));
+ /*
+ * If suspended, we don't do anything, even record the promiscuious
+ * mode, as we won't properly set it on resume. Just fail.
+ */
+ if (ngep->suspended)
+ return (DDI_FAILURE);
+
+ /*
+ * Store specified mode and pass to chip layer to update h/w
+ */
+ mutex_enter(ngep->genlock);
+ if (ngep->promisc == on) {
+ mutex_exit(ngep->genlock);
+ NGE_DEBUG(("nge_m_promisc($%p) done", arg));
+ return (0);
+ }
+ ngep->promisc = on;
+ nge_chip_sync(ngep);
+ NGE_DEBUG(("nge_m_promisc($%p) done", arg));
+ mutex_exit(ngep->genlock);
+
+ return (0);
+}
+
+static void nge_mulparam(nge_t *ngep)
+{
+ uint8_t number;
+ ether_addr_t pand;
+ ether_addr_t por;
+ mul_item *plist;
+
+ for (number = 0; number < ETHERADDRL; number++) {
+ pand[number] = 0x00;
+ por[number] = 0x00;
+ }
+ for (plist = ngep->pcur_mulist; plist != NULL; plist = plist->next) {
+ for (number = 0; number < ETHERADDRL; number++) {
+ pand[number] &= plist->mul_addr[number];
+ por[number] |= plist->mul_addr[number];
+ }
+ }
+ for (number = 0; number < ETHERADDRL; number++) {
+ ngep->cur_mul_addr.addr[number]
+ = pand[number] & por[number];
+ ngep->cur_mul_mask.addr[number]
+ = pand [number] | (~por[number]);
+ }
+}
+static int
+nge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
+{
+ boolean_t update;
+ boolean_t b_eq;
+ nge_t *ngep = arg;
+ mul_item *plist;
+ mul_item *plist_prev;
+ mul_item *pitem;
+
+ NGE_TRACE(("nge_m_multicst($%p, %s, %s)", arg,
+ (add) ? "add" : "remove", ether_sprintf((void *)mca)));
+
+ update = B_FALSE;
+ plist = plist_prev = NULL;
+ mutex_enter(ngep->genlock);
+ if (add) {
+ if (ngep->pcur_mulist != NULL) {
+ for (plist = ngep->pcur_mulist; plist != NULL;
+ plist = plist->next) {
+ b_eq = ether_eq(plist->mul_addr, mca);
+ if (b_eq) {
+ plist->ref_cnt++;
+ break;
+ }
+ plist_prev = plist;
+ }
+ }
+
+ if (plist == NULL) {
+ pitem = kmem_zalloc(sizeof (mul_item), KM_SLEEP);
+ ether_copy(mca, pitem->mul_addr);
+ pitem ->ref_cnt++;
+ pitem ->next = NULL;
+ if (plist_prev == NULL)
+ ngep->pcur_mulist = pitem;
+ else
+ plist_prev->next = pitem;
+ update = B_TRUE;
+ }
+ } else {
+ if (ngep->pcur_mulist != NULL) {
+ for (plist = ngep->pcur_mulist; plist != NULL;
+ plist = plist->next) {
+ b_eq = ether_eq(plist->mul_addr, mca);
+ if (b_eq) {
+ update = B_TRUE;
+ break;
+ }
+ plist_prev = plist;
+ }
+
+ if (update) {
+ if ((plist_prev == NULL) &&
+ (plist->next == NULL))
+ ngep->pcur_mulist = NULL;
+ else if ((plist_prev == NULL) &&
+ (plist->next != NULL))
+ ngep->pcur_mulist = plist->next;
+ else
+ plist_prev->next = plist->next;
+ kmem_free(plist, sizeof (mul_item));
+ }
+ }
+ }
+
+ if (update || !ngep->suspended) {
+ nge_mulparam(ngep);
+ nge_chip_sync(ngep);
+ }
+ NGE_DEBUG(("nge_m_multicst($%p) done", arg));
+ mutex_exit(ngep->genlock);
+
+ return (0);
+}
+
+static void
+nge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
+{
+ int err;
+ int cmd;
+ nge_t *ngep = arg;
+ struct iocblk *iocp;
+ enum ioc_reply status;
+ boolean_t need_privilege;
+
+ /*
+ * If suspended, we might actually be able to do some of
+ * these ioctls, but it is harder to make sure they occur
+ * without actually putting the hardware in an undesireable
+ * state. So just NAK it.
+ */
+ if (ngep->suspended) {
+ miocnak(wq, mp, 0, EINVAL);
+ return;
+ }
+
+ /*
+ * Validate the command before bothering with the mutex ...
+ */
+ iocp = (struct iocblk *)mp->b_rptr;
+ iocp->ioc_error = 0;
+ need_privilege = B_TRUE;
+ cmd = iocp->ioc_cmd;
+
+ NGE_DEBUG(("nge_m_ioctl: cmd 0x%x", cmd));
+ switch (cmd) {
+ default:
+ NGE_LDB(NGE_DBG_BADIOC,
+ ("nge_m_ioctl: unknown cmd 0x%x", cmd));
+
+ miocnak(wq, mp, 0, EINVAL);
+ return;
+
+ case NGE_MII_READ:
+ case NGE_MII_WRITE:
+ case NGE_SEE_READ:
+ case NGE_SEE_WRITE:
+ case NGE_DIAG:
+ case NGE_PEEK:
+ case NGE_POKE:
+ case NGE_PHY_RESET:
+ case NGE_SOFT_RESET:
+ case NGE_HARD_RESET:
+ break;
+
+ case LB_GET_INFO_SIZE:
+ case LB_GET_INFO:
+ case LB_GET_MODE:
+ need_privilege = B_FALSE;
+ break;
+ case LB_SET_MODE:
+ break;
+
+ case ND_GET:
+ need_privilege = B_FALSE;
+ break;
+ case ND_SET:
+ break;
+ }
+
+ if (need_privilege) {
+ /*
+ * Check for specific net_config privilege.
+ */
+ err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
+ if (err != 0) {
+ NGE_DEBUG(("nge_m_ioctl: rejected cmd 0x%x, err %d",
+ cmd, err));
+ miocnak(wq, mp, 0, err);
+ return;
+ }
+ }
+
+ mutex_enter(ngep->genlock);
+
+ switch (cmd) {
+ default:
+ _NOTE(NOTREACHED)
+ status = IOC_INVAL;
+ break;
+
+ case NGE_MII_READ:
+ case NGE_MII_WRITE:
+ case NGE_SEE_READ:
+ case NGE_SEE_WRITE:
+ case NGE_DIAG:
+ case NGE_PEEK:
+ case NGE_POKE:
+ case NGE_PHY_RESET:
+ case NGE_SOFT_RESET:
+ case NGE_HARD_RESET:
+ status = nge_chip_ioctl(ngep, mp, iocp);
+ break;
+
+ case LB_GET_INFO_SIZE:
+ case LB_GET_INFO:
+ case LB_GET_MODE:
+ case LB_SET_MODE:
+ status = nge_loop_ioctl(ngep, mp, iocp);
+ break;
+
+ case ND_GET:
+ case ND_SET:
+ status = nge_nd_ioctl(ngep, wq, mp, iocp);
+ break;
+
+ }
+
+ /*
+ * Do we need to reprogram the PHY and/or the MAC?
+ * Do it now, while we still have the mutex.
+ *
+ * Note: update the PHY first, 'cos it controls the
+ * speed/duplex parameters that the MAC code uses.
+ */
+
+ NGE_DEBUG(("nge_m_ioctl: cmd 0x%x status %d", cmd, status));
+
+ switch (status) {
+ case IOC_RESTART_REPLY:
+ case IOC_RESTART_ACK:
+ (*ngep->physops->phys_update)(ngep);
+ nge_chip_sync(ngep);
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_exit(ngep->genlock);
+
+ /*
+ * Finally, decide how to reply
+ */
+ switch (status) {
+
+ default:
+ case IOC_INVAL:
+ miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
+ EINVAL : iocp->ioc_error);
+ break;
+
+ case IOC_DONE:
+ break;
+
+ case IOC_RESTART_ACK:
+ case IOC_ACK:
+ miocack(wq, mp, 0, 0);
+ break;
+
+ case IOC_RESTART_REPLY:
+ case IOC_REPLY:
+ mp->b_datap->db_type = iocp->ioc_error == 0 ?
+ M_IOCACK : M_IOCNAK;
+ qreply(wq, mp);
+ break;
+ }
+}
+
+static void
+nge_chip_blank(void *arg, time_t ticks, uint_t count)
+{
+ _NOTE(ARGUNUSED(arg, ticks, count));
+}
+
+static void
+nge_m_resources(void *arg)
+{
+ nge_t *ngep = arg;
+ recv_ring_t *rrp;
+ mac_rx_fifo_t mrf;
+
+ mutex_enter(ngep->genlock);
+
+ /*
+ * Register Rx rings as resources and save mac
+ * resource id for future reference
+ */
+ mrf.mrf_type = MAC_RX_FIFO;
+ mrf.mrf_blank = nge_chip_blank;
+ mrf.mrf_arg = (void *)ngep;
+ mrf.mrf_normal_blank_time = NGE_TICKS_CNT;
+ mrf.mrf_normal_pkt_count = NGE_RX_PKT_CNT;
+
+ rrp = ngep->recv;
+ rrp->handle = mac_resource_add(ngep->mh, (mac_resource_t *)&mrf);
+ mutex_exit(ngep->genlock);
+}
+
+/* ARGSUSED */
+static boolean_t
+nge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
+{
+ nge_t *ngep = arg;
+ nge_dev_spec_param_t *dev_param_p;
+
+ dev_param_p = &ngep->dev_spec_param;
+
+ switch (cap) {
+ case MAC_CAPAB_HCKSUM: {
+ uint32_t *hcksum_txflags = cap_data;
+
+ if (dev_param_p->tx_hw_checksum) {
+ *hcksum_txflags = dev_param_p->tx_hw_checksum;
+ } else
+ return (B_FALSE);
+ break;
+ }
+ case MAC_CAPAB_POLL:
+ /*
+ * There's nothing for us to fill in, simply returning
+ * B_TRUE, stating that we support polling is sufficient.
+ */
+ break;
+ default:
+ return (B_FALSE);
+ }
+ return (B_TRUE);
+}
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_INIT /* debug flag for this code */
+int
+nge_restart(nge_t *ngep)
+{
+ int err = 0;
+ err += nge_reset(ngep);
+ err += nge_chip_start(ngep);
+
+ if (err) {
+ ngep->nge_mac_state = NGE_MAC_STOPPED;
+ return (DDI_FAILURE);
+ } else {
+ ngep->nge_mac_state = NGE_MAC_STARTED;
+ return (DDI_SUCCESS);
+ }
+}
+
+void
+nge_wake_factotum(nge_t *ngep)
+{
+ mutex_enter(ngep->softlock);
+ if (ngep->factotum_flag == 0) {
+ ngep->factotum_flag = 1;
+ (void) ddi_intr_trigger_softint(ngep->factotum_hdl, NULL);
+ }
+ mutex_exit(ngep->softlock);
+}
+
+/*
+ * High-level cyclic handler
+ *
+ * This routine schedules a (low-level) softint callback to the
+ * factotum.
+ */
+
+static void
+nge_chip_cyclic(void *arg)
+{
+ nge_t *ngep;
+
+ ngep = (nge_t *)arg;
+
+ switch (ngep->nge_chip_state) {
+ default:
+ return;
+
+ case NGE_CHIP_RUNNING:
+ break;
+
+ case NGE_CHIP_FAULT:
+ case NGE_CHIP_ERROR:
+ break;
+ }
+
+ nge_wake_factotum(ngep);
+}
+
+static void
+nge_unattach(nge_t *ngep)
+{
+ send_ring_t *srp;
+ buff_ring_t *brp;
+
+ srp = ngep->send;
+ brp = ngep->buff;
+ NGE_TRACE(("nge_unattach($%p)", (void *)ngep));
+
+ /*
+ * Flag that no more activity may be initiated
+ */
+ ngep->progress &= ~PROGRESS_READY;
+ ngep->nge_mac_state = NGE_MAC_UNATTACH;
+
+ /*
+ * Quiesce the PHY and MAC (leave it reset but still powered).
+ * Clean up and free all NGE data structures
+ */
+ if (ngep->periodic_id != NULL) {
+ ddi_periodic_delete(ngep->periodic_id);
+ ngep->periodic_id = NULL;
+ }
+
+ if (ngep->progress & PROGRESS_KSTATS)
+ nge_fini_kstats(ngep);
+
+ if (ngep->progress & PROGRESS_NDD)
+ nge_nd_cleanup(ngep);
+
+ if (ngep->progress & PROGRESS_HWINT) {
+ mutex_enter(ngep->genlock);
+ nge_restore_mac_addr(ngep);
+ (void) nge_chip_stop(ngep, B_FALSE);
+ mutex_exit(ngep->genlock);
+ }
+
+ if (ngep->progress & PROGRESS_SWINT)
+ nge_rem_intrs(ngep);
+
+ if (ngep->progress & PROGRESS_FACTOTUM)
+ (void) ddi_intr_remove_softint(ngep->factotum_hdl);
+
+ if (ngep->progress & PROGRESS_RESCHED)
+ (void) ddi_intr_remove_softint(ngep->resched_hdl);
+
+ if (ngep->progress & PROGRESS_INTR) {
+ mutex_destroy(srp->tx_lock);
+ mutex_destroy(srp->tc_lock);
+ mutex_destroy(&srp->dmah_lock);
+ mutex_destroy(brp->recycle_lock);
+
+ mutex_destroy(ngep->genlock);
+ mutex_destroy(ngep->softlock);
+ rw_destroy(ngep->rwlock);
+ }
+
+ if (ngep->progress & PROGRESS_REGS)
+ ddi_regs_map_free(&ngep->io_handle);
+
+ if (ngep->progress & PROGRESS_CFG)
+ pci_config_teardown(&ngep->cfg_handle);
+
+ ddi_remove_minor_node(ngep->devinfo, NULL);
+
+ kmem_free(ngep, sizeof (*ngep));
+}
+
+static int
+nge_resume(dev_info_t *devinfo)
+{
+ nge_t *ngep;
+ chip_info_t *infop;
+
+ ASSERT(devinfo != NULL);
+
+ ngep = ddi_get_driver_private(devinfo);
+ /*
+ * If there are state inconsistancies, this is bad. Returning
+ * DDI_FAILURE here will eventually cause the machine to panic,
+ * so it is best done here so that there is a possibility of
+ * debugging the problem.
+ */
+ if (ngep == NULL)
+ cmn_err(CE_PANIC,
+ "nge: ngep returned from ddi_get_driver_private was NULL");
+ infop = (chip_info_t *)&ngep->chipinfo;
+
+ if (ngep->devinfo != devinfo)
+ cmn_err(CE_PANIC,
+ "nge: passed devinfo not the same as saved definfo");
+
+ ngep->suspended = B_FALSE;
+
+ /*
+ * Fetch the config space. Even though we have most of it cached,
+ * some values *might* change across a suspend/resume.
+ */
+ nge_chip_cfg_init(ngep, infop, B_FALSE);
+
+ /*
+ * Start the controller. In this case (and probably most GLDv3
+ * devices), it is sufficient to call nge_m_start().
+ */
+ if (nge_m_start((void *)ngep) != DDI_SUCCESS) {
+ /*
+ * We note the failure, but return success, as the
+ * system is still usable without this controller.
+ */
+ cmn_err(CE_WARN, "nge: resume: failed to restart controller");
+ }
+ return (DDI_SUCCESS);
+}
+
+/*
+ * attach(9E) -- Attach a device to the system
+ *
+ * Called once for each board successfully probed.
+ */
+static int
+nge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
+{
+ int err;
+ int i;
+ int instance;
+ caddr_t regs;
+ nge_t *ngep;
+ chip_info_t *infop;
+ mac_register_t *macp;
+
+ switch (cmd) {
+ default:
+ return (DDI_FAILURE);
+
+ case DDI_RESUME:
+ return (nge_resume(devinfo));
+
+ case DDI_ATTACH:
+ break;
+ }
+
+ ngep = kmem_zalloc(sizeof (*ngep), KM_SLEEP);
+ instance = ddi_get_instance(devinfo);
+ ddi_set_driver_private(devinfo, ngep);
+ ngep->devinfo = devinfo;
+
+ (void) snprintf(ngep->ifname, sizeof (ngep->ifname), "%s%d",
+ NGE_DRIVER_NAME, instance);
+ err = pci_config_setup(devinfo, &ngep->cfg_handle);
+ if (err != DDI_SUCCESS) {
+ nge_problem(ngep, "nge_attach: pci_config_setup() failed");
+ goto attach_fail;
+ }
+ infop = (chip_info_t *)&ngep->chipinfo;
+ nge_chip_cfg_init(ngep, infop, B_FALSE);
+ nge_init_dev_spec_param(ngep);
+ nge_get_props(ngep);
+ ngep->progress |= PROGRESS_CFG;
+
+ err = ddi_regs_map_setup(devinfo, NGE_PCI_OPREGS_RNUMBER,
+ ®s, 0, 0, &nge_reg_accattr, &ngep->io_handle);
+ if (err != DDI_SUCCESS) {
+ nge_problem(ngep, "nge_attach: ddi_regs_map_setup() failed");
+ goto attach_fail;
+ }
+ ngep->io_regs = regs;
+ ngep->progress |= PROGRESS_REGS;
+
+ err = nge_register_intrs_and_init_locks(ngep);
+ if (err != DDI_SUCCESS) {
+ nge_problem(ngep, "nge_attach:"
+ " register intrs and init locks failed");
+ goto attach_fail;
+ }
+ nge_init_ring_param_lock(ngep);
+ ngep->progress |= PROGRESS_INTR;
+
+ mutex_enter(ngep->genlock);
+
+ /*
+ * Initialise link state variables
+ * Stop, reset & reinitialise the chip.
+ * Initialise the (internal) PHY.
+ */
+ nge_phys_init(ngep);
+ err = nge_chip_reset(ngep);
+ if (err != DDI_SUCCESS) {
+ nge_problem(ngep, "nge_attach: nge_chip_reset() failed");
+ mutex_exit(ngep->genlock);
+ goto attach_fail;
+ }
+ nge_chip_sync(ngep);
+
+ /*
+ * Now that mutex locks are initialized, enable interrupts.
+ */
+ if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ /* Call ddi_intr_block_enable() for MSI interrupts */
+ (void) ddi_intr_block_enable(ngep->htable,
+ ngep->intr_actual_cnt);
+ } else {
+ /* Call ddi_intr_enable for MSI or FIXED interrupts */
+ for (i = 0; i < ngep->intr_actual_cnt; i++) {
+ (void) ddi_intr_enable(ngep->htable[i]);
+ }
+ }
+
+ ngep->link_state = LINK_STATE_UNKNOWN;
+ ngep->progress |= PROGRESS_HWINT;
+
+ /*
+ * Register NDD-tweakable parameters
+ */
+ if (nge_nd_init(ngep)) {
+ nge_problem(ngep, "nge_attach: nge_nd_init() failed");
+ mutex_exit(ngep->genlock);
+ goto attach_fail;
+ }
+ ngep->progress |= PROGRESS_NDD;
+
+ /*
+ * Create & initialise named kstats
+ */
+ nge_init_kstats(ngep, instance);
+ ngep->progress |= PROGRESS_KSTATS;
+
+ mutex_exit(ngep->genlock);
+
+ if ((macp = mac_alloc(MAC_VERSION)) == NULL)
+ goto attach_fail;
+ macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
+ macp->m_driver = ngep;
+ macp->m_dip = devinfo;
+ macp->m_src_addr = infop->vendor_addr.addr;
+ macp->m_callbacks = &nge_m_callbacks;
+ macp->m_min_sdu = 0;
+ macp->m_max_sdu = ngep->default_mtu;
+ /*
+ * Finally, we're ready to register ourselves with the mac
+ * interface; if this succeeds, we're all ready to start()
+ */
+ err = mac_register(macp, &ngep->mh);
+ mac_free(macp);
+ if (err != 0)
+ goto attach_fail;
+
+ /*
+ * Register a periodical handler.
+ * nge_chip_cyclic() is invoked in kernel context.
+ */
+ ngep->periodic_id = ddi_periodic_add(nge_chip_cyclic, ngep,
+ NGE_CYCLIC_PERIOD, DDI_IPL_0);
+
+ ngep->progress |= PROGRESS_READY;
+ return (DDI_SUCCESS);
+
+attach_fail:
+ nge_unattach(ngep);
+ return (DDI_FAILURE);
+}
+
+/*
+ * detach(9E) -- Detach a device from the system
+ */
+static int
+nge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
+{
+ int i;
+ nge_t *ngep;
+ mul_item *p, *nextp;
+ buff_ring_t *brp;
+
+ NGE_GTRACE(("nge_detach($%p, %d)", (void *)devinfo, cmd));
+
+ ngep = ddi_get_driver_private(devinfo);
+ brp = ngep->buff;
+
+ switch (cmd) {
+ default:
+ return (DDI_FAILURE);
+
+ case DDI_SUSPEND:
+ /*
+ * Stop the NIC
+ * I suspect that we can actually suspend if the stop
+ * routine returns a failure, as the resume will
+ * effectively fully reset the hardware (i.e. we don't
+ * really save any hardware state). However, nge_m_stop
+ * doesn't return an error code.
+ * Note: This driver doesn't currently support WOL, but
+ * should it in the future, it is important to
+ * make sure the PHY remains powered so that the
+ * wakeup packet can actually be recieved.
+ */
+ nge_m_stop(ngep);
+ ngep->suspended = B_TRUE;
+ return (DDI_SUCCESS);
+
+ case DDI_DETACH:
+ break;
+ }
+
+ /* Try to wait all the buffer post to upper layer be released */
+ for (i = 0; i < 1000; i++) {
+ if (brp->rx_hold == 0)
+ break;
+ drv_usecwait(1000);
+ }
+
+ /* If there is any posted buffer, reject to detach */
+ if (brp->rx_hold != 0)
+ return (DDI_FAILURE);
+
+ /* Recycle the multicast table */
+ for (p = ngep->pcur_mulist; p != NULL; p = nextp) {
+ nextp = p->next;
+ kmem_free(p, sizeof (mul_item));
+ }
+ ngep->pcur_mulist = NULL;
+
+ /*
+ * Unregister from the GLD subsystem. This can fail, in
+ * particular if there are DLPI style-2 streams still open -
+ * in which case we just return failure without shutting
+ * down chip operations.
+ */
+ if (mac_unregister(ngep->mh) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+
+ /*
+ * All activity stopped, so we can clean up & exit
+ */
+ nge_unattach(ngep);
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * ========== Module Loading Data & Entry Points ==========
+ */
+
+DDI_DEFINE_STREAM_OPS(nge_dev_ops, nulldev, nulldev, nge_attach, nge_detach,
+ nodev, NULL, D_MP, NULL);
+
+
+static struct modldrv nge_modldrv = {
+ &mod_driverops, /* Type of module. This one is a driver */
+ nge_ident, /* short description */
+ &nge_dev_ops /* driver specific ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&nge_modldrv, NULL
+};
+
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+int
+_init(void)
+{
+ int status;
+
+ mac_init_ops(&nge_dev_ops, "nge");
+ status = mod_install(&modlinkage);
+ if (status != DDI_SUCCESS)
+ mac_fini_ops(&nge_dev_ops);
+ else
+ mutex_init(nge_log_mutex, NULL, MUTEX_DRIVER, NULL);
+
+ return (status);
+}
+
+int
+_fini(void)
+{
+ int status;
+
+ status = mod_remove(&modlinkage);
+ if (status == DDI_SUCCESS) {
+ mac_fini_ops(&nge_dev_ops);
+ mutex_destroy(nge_log_mutex);
+ }
+
+ return (status);
+}
+
+/*
+ * ============ Init MSI/Fixed/SoftInterrupt routines ==============
+ */
+
+/*
+ * Register interrupts and initialize each mutex and condition variables
+ */
+
+static int
+nge_register_intrs_and_init_locks(nge_t *ngep)
+{
+ int err;
+ int intr_types;
+ uint_t soft_prip;
+ nge_msi_mask msi_mask;
+ nge_msi_map0_vec map0_vec;
+ nge_msi_map1_vec map1_vec;
+
+ /*
+ * Add the softint handlers:
+ *
+ * Both of these handlers are used to avoid restrictions on the
+ * context and/or mutexes required for some operations. In
+ * particular, the hardware interrupt handler and its subfunctions
+ * can detect a number of conditions that we don't want to handle
+ * in that context or with that set of mutexes held. So, these
+ * softints are triggered instead:
+ *
+ * the <resched> softint is triggered if if we have previously
+ * had to refuse to send a packet because of resource shortage
+ * (we've run out of transmit buffers), but the send completion
+ * interrupt handler has now detected that more buffers have
+ * become available. Its only purpose is to call gld_sched()
+ * to retry the pending transmits (we're not allowed to hold
+ * driver-defined mutexes across gld_sched()).
+ *
+ * the <factotum> is triggered if the h/w interrupt handler
+ * sees the <link state changed> or <error> bits in the status
+ * block. It's also triggered periodically to poll the link
+ * state, just in case we aren't getting link status change
+ * interrupts ...
+ */
+ err = ddi_intr_add_softint(ngep->devinfo, &ngep->resched_hdl,
+ DDI_INTR_SOFTPRI_MIN, nge_reschedule, (caddr_t)ngep);
+ if (err != DDI_SUCCESS) {
+ nge_problem(ngep,
+ "nge_attach: add nge_reschedule softintr failed");
+
+ return (DDI_FAILURE);
+ }
+ ngep->progress |= PROGRESS_RESCHED;
+ err = ddi_intr_add_softint(ngep->devinfo, &ngep->factotum_hdl,
+ DDI_INTR_SOFTPRI_MIN, nge_chip_factotum, (caddr_t)ngep);
+ if (err != DDI_SUCCESS) {
+ nge_problem(ngep,
+ "nge_attach: add nge_chip_factotum softintr failed!");
+
+ return (DDI_FAILURE);
+ }
+ if (ddi_intr_get_softint_pri(ngep->factotum_hdl, &soft_prip)
+ != DDI_SUCCESS) {
+ nge_problem(ngep, "nge_attach: get softintr priority failed\n");
+
+ return (DDI_FAILURE);
+ }
+ ngep->soft_pri = soft_prip;
+
+ ngep->progress |= PROGRESS_FACTOTUM;
+ /* Get supported interrupt types */
+ if (ddi_intr_get_supported_types(ngep->devinfo, &intr_types)
+ != DDI_SUCCESS) {
+ nge_error(ngep, "ddi_intr_get_supported_types failed\n");
+
+ return (DDI_FAILURE);
+ }
+
+ NGE_DEBUG(("ddi_intr_get_supported_types() returned: %x",
+ intr_types));
+
+ if ((intr_types & DDI_INTR_TYPE_MSI) && nge_enable_msi) {
+
+ /* MSI Configurations for mcp55 chipset */
+ if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
+ ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
+
+
+ /* Enable the 8 vectors */
+ msi_mask.msi_mask_val =
+ nge_reg_get32(ngep, NGE_MSI_MASK);
+ msi_mask.msi_msk_bits.vec0 = NGE_SET;
+ msi_mask.msi_msk_bits.vec1 = NGE_SET;
+ msi_mask.msi_msk_bits.vec2 = NGE_SET;
+ msi_mask.msi_msk_bits.vec3 = NGE_SET;
+ msi_mask.msi_msk_bits.vec4 = NGE_SET;
+ msi_mask.msi_msk_bits.vec5 = NGE_SET;
+ msi_mask.msi_msk_bits.vec6 = NGE_SET;
+ msi_mask.msi_msk_bits.vec7 = NGE_SET;
+ nge_reg_put32(ngep, NGE_MSI_MASK,
+ msi_mask.msi_mask_val);
+
+ /*
+ * Remapping the MSI MAP0 and MAP1. MCP55
+ * is default mapping all the interrupt to 0 vector.
+ * Software needs to remapping this.
+ * This mapping is same as CK804.
+ */
+ map0_vec.msi_map0_val =
+ nge_reg_get32(ngep, NGE_MSI_MAP0);
+ map1_vec.msi_map1_val =
+ nge_reg_get32(ngep, NGE_MSI_MAP1);
+ map0_vec.vecs_bits.reint_vec = 0;
+ map0_vec.vecs_bits.rcint_vec = 0;
+ map0_vec.vecs_bits.miss_vec = 3;
+ map0_vec.vecs_bits.teint_vec = 5;
+ map0_vec.vecs_bits.tcint_vec = 5;
+ map0_vec.vecs_bits.stint_vec = 2;
+ map0_vec.vecs_bits.mint_vec = 6;
+ map0_vec.vecs_bits.rfint_vec = 0;
+ map1_vec.vecs_bits.tfint_vec = 5;
+ map1_vec.vecs_bits.feint_vec = 6;
+ map1_vec.vecs_bits.resv8_11 = 3;
+ map1_vec.vecs_bits.resv12_15 = 1;
+ map1_vec.vecs_bits.resv16_19 = 0;
+ map1_vec.vecs_bits.resv20_23 = 7;
+ map1_vec.vecs_bits.resv24_31 = 0xff;
+ nge_reg_put32(ngep, NGE_MSI_MAP0,
+ map0_vec.msi_map0_val);
+ nge_reg_put32(ngep, NGE_MSI_MAP1,
+ map1_vec.msi_map1_val);
+ }
+ if (nge_add_intrs(ngep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
+ NGE_DEBUG(("MSI registration failed, "
+ "trying FIXED interrupt type\n"));
+ } else {
+ nge_log(ngep, "Using MSI interrupt type\n");
+
+ ngep->intr_type = DDI_INTR_TYPE_MSI;
+ ngep->progress |= PROGRESS_SWINT;
+ }
+ }
+
+ if (!(ngep->progress & PROGRESS_SWINT) &&
+ (intr_types & DDI_INTR_TYPE_FIXED)) {
+ if (nge_add_intrs(ngep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
+ nge_error(ngep, "FIXED interrupt "
+ "registration failed\n");
+
+ return (DDI_FAILURE);
+ }
+
+ nge_log(ngep, "Using FIXED interrupt type\n");
+
+ ngep->intr_type = DDI_INTR_TYPE_FIXED;
+ ngep->progress |= PROGRESS_SWINT;
+ }
+
+
+ if (!(ngep->progress & PROGRESS_SWINT)) {
+ nge_error(ngep, "No interrupts registered\n");
+
+ return (DDI_FAILURE);
+ }
+ mutex_init(ngep->genlock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(ngep->intr_pri));
+ mutex_init(ngep->softlock, NULL, MUTEX_DRIVER,
+ DDI_INTR_PRI(ngep->soft_pri));
+ rw_init(ngep->rwlock, NULL, RW_DRIVER,
+ DDI_INTR_PRI(ngep->intr_pri));
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * nge_add_intrs:
+ *
+ * Register FIXED or MSI interrupts.
+ */
+static int
+nge_add_intrs(nge_t *ngep, int intr_type)
+{
+ dev_info_t *dip = ngep->devinfo;
+ int avail, actual, intr_size, count = 0;
+ int i, flag, ret;
+
+ NGE_DEBUG(("nge_add_intrs: interrupt type 0x%x\n", intr_type));
+
+ /* Get number of interrupts */
+ ret = ddi_intr_get_nintrs(dip, intr_type, &count);
+ if ((ret != DDI_SUCCESS) || (count == 0)) {
+ nge_error(ngep, "ddi_intr_get_nintrs() failure, ret: %d, "
+ "count: %d", ret, count);
+
+ return (DDI_FAILURE);
+ }
+
+ /* Get number of available interrupts */
+ ret = ddi_intr_get_navail(dip, intr_type, &avail);
+ if ((ret != DDI_SUCCESS) || (avail == 0)) {
+ nge_error(ngep, "ddi_intr_get_navail() failure, "
+ "ret: %d, avail: %d\n", ret, avail);
+
+ return (DDI_FAILURE);
+ }
+
+ if (avail < count) {
+ NGE_DEBUG(("nitrs() returned %d, navail returned %d\n",
+ count, avail));
+ }
+ flag = DDI_INTR_ALLOC_NORMAL;
+
+ /* Allocate an array of interrupt handles */
+ intr_size = count * sizeof (ddi_intr_handle_t);
+ ngep->htable = kmem_alloc(intr_size, KM_SLEEP);
+
+ /* Call ddi_intr_alloc() */
+ ret = ddi_intr_alloc(dip, ngep->htable, intr_type, 0,
+ count, &actual, flag);
+
+ if ((ret != DDI_SUCCESS) || (actual == 0)) {
+ nge_error(ngep, "ddi_intr_alloc() failed %d\n", ret);
+
+ kmem_free(ngep->htable, intr_size);
+ return (DDI_FAILURE);
+ }
+
+ if (actual < count) {
+ NGE_DEBUG(("Requested: %d, Received: %d\n",
+ count, actual));
+ }
+
+ ngep->intr_actual_cnt = actual;
+ ngep->intr_req_cnt = count;
+
+ /*
+ * Get priority for first msi, assume remaining are all the same
+ */
+ if ((ret = ddi_intr_get_pri(ngep->htable[0], &ngep->intr_pri)) !=
+ DDI_SUCCESS) {
+ nge_error(ngep, "ddi_intr_get_pri() failed %d\n", ret);
+
+ /* Free already allocated intr */
+ for (i = 0; i < actual; i++) {
+ (void) ddi_intr_free(ngep->htable[i]);
+ }
+
+ kmem_free(ngep->htable, intr_size);
+
+ return (DDI_FAILURE);
+ }
+ /* Test for high level mutex */
+ if (ngep->intr_pri >= ddi_intr_get_hilevel_pri()) {
+ nge_error(ngep, "nge_add_intrs:"
+ "Hi level interrupt not supported");
+
+ for (i = 0; i < actual; i++)
+ (void) ddi_intr_free(ngep->htable[i]);
+
+ kmem_free(ngep->htable, intr_size);
+
+ return (DDI_FAILURE);
+ }
+
+
+ /* Call ddi_intr_add_handler() */
+ for (i = 0; i < actual; i++) {
+ if ((ret = ddi_intr_add_handler(ngep->htable[i], nge_chip_intr,
+ (caddr_t)ngep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
+ nge_error(ngep, "ddi_intr_add_handler() "
+ "failed %d\n", ret);
+
+ /* Free already allocated intr */
+ for (i = 0; i < actual; i++) {
+ (void) ddi_intr_free(ngep->htable[i]);
+ }
+
+ kmem_free(ngep->htable, intr_size);
+
+ return (DDI_FAILURE);
+ }
+ }
+
+ if ((ret = ddi_intr_get_cap(ngep->htable[0], &ngep->intr_cap))
+ != DDI_SUCCESS) {
+ nge_error(ngep, "ddi_intr_get_cap() failed %d\n", ret);
+
+ for (i = 0; i < actual; i++) {
+ (void) ddi_intr_remove_handler(ngep->htable[i]);
+ (void) ddi_intr_free(ngep->htable[i]);
+ }
+
+ kmem_free(ngep->htable, intr_size);
+
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * nge_rem_intrs:
+ *
+ * Unregister FIXED or MSI interrupts
+ */
+static void
+nge_rem_intrs(nge_t *ngep)
+{
+ int i;
+
+ NGE_DEBUG(("nge_rem_intrs\n"));
+
+ /* Disable all interrupts */
+ if (ngep->intr_cap & DDI_INTR_FLAG_BLOCK) {
+ /* Call ddi_intr_block_disable() */
+ (void) ddi_intr_block_disable(ngep->htable,
+ ngep->intr_actual_cnt);
+ } else {
+ for (i = 0; i < ngep->intr_actual_cnt; i++) {
+ (void) ddi_intr_disable(ngep->htable[i]);
+ }
+ }
+
+ /* Call ddi_intr_remove_handler() */
+ for (i = 0; i < ngep->intr_actual_cnt; i++) {
+ (void) ddi_intr_remove_handler(ngep->htable[i]);
+ (void) ddi_intr_free(ngep->htable[i]);
+ }
+
+ kmem_free(ngep->htable,
+ ngep->intr_req_cnt * sizeof (ddi_intr_handle_t));
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_ndd.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,435 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_NDD
+
+static char transfer_speed_propname[] = "transfer-speed";
+static char speed_propname[] = "speed";
+static char duplex_propname[] = "full-duplex";
+
+/*
+ * Notes:
+ * The first character of the <name> field encodes the read/write
+ * status of the parameter:
+ * '=' => read-only,
+ * '-' => read-only and forced to 0 on serdes
+ * '+' => read/write,
+ * '?' => read/write on copper, read-only and 0 on serdes
+ * '!' => invisible!
+ *
+ * For writable parameters, we check for a driver property with the
+ * same name; if found, and its value is in range, we initialise
+ * the parameter from the property, overriding the default in the
+ * table below.
+ *
+ * A NULL in the <name> field terminates the array.
+ *
+ * The <info> field is used here to provide the index of the
+ * parameter to be initialised; thus it doesn't matter whether
+ * this table is kept ordered or not.
+ *
+ * The <info> field in the per-instance copy, on the other hand,
+ * is used to count assignments so that we can tell when a magic
+ * parameter has been set via ndd (see nge_param_set()).
+ */
+static const nd_param_t nd_template[] = {
+/* info min max init r/w+name */
+
+/* Our hardware capabilities */
+{ PARAM_AUTONEG_CAP, 0, 1, 1, "=autoneg_cap" },
+{ PARAM_PAUSE_CAP, 0, 1, 1, "=pause_cap" },
+{ PARAM_ASYM_PAUSE_CAP, 0, 1, 1, "=asym_pause_cap" },
+{ PARAM_1000FDX_CAP, 0, 1, 1, "=1000fdx_cap" },
+{ PARAM_1000HDX_CAP, 0, 1, 0, "=1000hdx_cap" },
+{ PARAM_100T4_CAP, 0, 1, 0, "=100T4_cap" },
+{ PARAM_100FDX_CAP, 0, 1, 1, "-100fdx_cap" },
+{ PARAM_100HDX_CAP, 0, 1, 1, "-100hdx_cap" },
+{ PARAM_10FDX_CAP, 0, 1, 1, "-10fdx_cap" },
+{ PARAM_10HDX_CAP, 0, 1, 1, "-10hdx_cap" },
+
+/* Our advertised capabilities */
+{ PARAM_ADV_AUTONEG_CAP, 0, 1, 1, "+adv_autoneg_cap" },
+{ PARAM_ADV_PAUSE_CAP, 0, 1, 1, "+adv_pause_cap" },
+{ PARAM_ADV_ASYM_PAUSE_CAP, 0, 1, 1, "+adv_asym_pause_cap" },
+{ PARAM_ADV_1000FDX_CAP, 0, 1, 1, "+adv_1000fdx_cap" },
+{ PARAM_ADV_1000HDX_CAP, 0, 1, 0, "=adv_1000hdx_cap" },
+{ PARAM_ADV_100T4_CAP, 0, 1, 0, "=adv_100T4_cap" },
+{ PARAM_ADV_100FDX_CAP, 0, 1, 1, "?adv_100fdx_cap" },
+{ PARAM_ADV_100HDX_CAP, 0, 1, 1, "?adv_100hdx_cap" },
+{ PARAM_ADV_10FDX_CAP, 0, 1, 1, "?adv_10fdx_cap" },
+{ PARAM_ADV_10HDX_CAP, 0, 1, 1, "?adv_10hdx_cap" },
+
+/* Partner's advertised capabilities */
+{ PARAM_LP_AUTONEG_CAP, 0, 1, 0, "-lp_autoneg_cap" },
+{ PARAM_LP_PAUSE_CAP, 0, 1, 0, "-lp_pause_cap" },
+{ PARAM_LP_ASYM_PAUSE_CAP, 0, 1, 0, "-lp_asym_pause_cap" },
+{ PARAM_LP_1000FDX_CAP, 0, 1, 0, "-lp_1000fdx_cap" },
+{ PARAM_LP_1000HDX_CAP, 0, 1, 0, "-lp_1000hdx_cap" },
+{ PARAM_LP_100T4_CAP, 0, 1, 0, "-lp_100T4_cap" },
+{ PARAM_LP_100FDX_CAP, 0, 1, 0, "-lp_100fdx_cap" },
+{ PARAM_LP_100HDX_CAP, 0, 1, 0, "-lp_100hdx_cap" },
+{ PARAM_LP_10FDX_CAP, 0, 1, 0, "-lp_10fdx_cap" },
+{ PARAM_LP_10HDX_CAP, 0, 1, 0, "-lp_10hdx_cap" },
+
+/* Current operating modes */
+{ PARAM_LINK_STATUS, 0, 1, 0, "-link_status" },
+{ PARAM_LINK_SPEED, 0, 1000, 0, "-link_speed" },
+{ PARAM_LINK_DUPLEX, -1, 1, -1, "-link_duplex" },
+
+{ PARAM_LINK_AUTONEG, 0, 1, 0, "-link_autoneg" },
+{ PARAM_LINK_RX_PAUSE, 0, 1, 0, "-link_rx_pause" },
+{ PARAM_LINK_TX_PAUSE, 0, 1, 0, "-link_tx_pause" },
+
+/* Loopback status */
+{ PARAM_LOOP_MODE, 0, 5, 0, "-loop_mode" },
+
+/* TX Bcopy threshold */
+{ PARAM_TXBCOPY_THRESHOLD, 0, NGE_MAX_SDU, NGE_TX_COPY_SIZE,
+"+tx_bcopy_threshold" },
+
+/* RX Bcopy threshold */
+{ PARAM_RXBCOPY_THRESHOLD, 0, NGE_MAX_SDU, NGE_RX_COPY_SIZE,
+"+rx_bcopy_threshold" },
+
+/* Max packet received per interrupt */
+{ PARAM_RECV_MAX_PACKET, 0, NGE_RECV_SLOTS_DESC_1024, 32,
+"+recv_max_packet" },
+/* Terminator */
+{ PARAM_COUNT, 0, 0, 0, NULL }
+};
+
+
+/* ============== NDD Support Functions =============== */
+
+/*
+ * Extracts the value from the nge parameter array and prints
+ * the parameter value. cp points to the required parameter.
+ */
+static int
+nge_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
+{
+ nd_param_t *ndp;
+
+ _NOTE(ARGUNUSED(q, credp))
+ ndp = (nd_param_t *)cp;
+ (void) mi_mpprintf(mp, "%d", ndp->ndp_val);
+
+ return (0);
+}
+
+/*
+ * Validates the request to set a NGE parameter to a specific value.
+ * If the request is OK, the parameter is set. Also the <info> field
+ * is incremented to show that the parameter was touched, even though
+ * it may have been set to the same value it already had.
+ */
+static int
+nge_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
+{
+ nd_param_t *ndp;
+ long new_value;
+ char *end;
+
+ _NOTE(ARGUNUSED(q, mp, credp))
+ ndp = (nd_param_t *)cp;
+ new_value = mi_strtol(value, &end, 10);
+ if (end == value)
+ return (EINVAL);
+ if (new_value < ndp->ndp_min || new_value > ndp->ndp_max)
+ return (EINVAL);
+
+ ndp->ndp_val = new_value;
+ ndp->ndp_info += 1;
+ return (0);
+}
+
+/*
+ * Initialise the per-instance parameter array from the global prototype,
+ * and register each element with the named dispatch handler using nd_load()
+ */
+static int
+nge_param_register(nge_t *ngep)
+{
+ const nd_param_t *tmplp;
+ dev_info_t *dip;
+ nd_param_t *ndp;
+ caddr_t *nddpp;
+ pfi_t setfn;
+ char *nm;
+ int pval;
+
+ dip = ngep->devinfo;
+ nddpp = &ngep->nd_data_p;
+ ASSERT(*nddpp == NULL);
+
+ NGE_TRACE(("nge_param_register($%p)", (void *)ngep));
+
+ for (tmplp = nd_template; tmplp->ndp_name != NULL; ++tmplp) {
+ /*
+ * Copy the template from nd_template[] into the
+ * proper slot in the per-instance parameters,
+ * then register the parameter with nd_load()
+ */
+ ndp = &ngep->nd_params[tmplp->ndp_info];
+ *ndp = *tmplp;
+ nm = &ndp->ndp_name[0];
+ setfn = nge_param_set;
+ switch (*nm) {
+ default:
+ case '!':
+ continue;
+
+ case '+':
+ case '?':
+ break;
+
+ case '=':
+ case '-':
+ setfn = NULL;
+ break;
+ }
+
+ if (!nd_load(nddpp, ++nm, nge_param_get, setfn, (caddr_t)ndp))
+ goto nd_fail;
+
+ /*
+ * If the parameter is writable, and there's a property
+ * with the same name, and its value is in range, we use
+ * it to initialise the parameter. If it exists but is
+ * out of range, it's ignored.
+ */
+ if (setfn && NGE_PROP_EXISTS(dip, nm)) {
+ pval = NGE_PROP_GET_INT(dip, nm);
+ if (pval >= ndp->ndp_min && pval <= ndp->ndp_max)
+ ndp->ndp_val = pval;
+ }
+ }
+ return (DDI_SUCCESS);
+
+nd_fail:
+ nd_free(nddpp);
+ return (DDI_FAILURE);
+}
+
+int
+nge_nd_init(nge_t *ngep)
+{
+ int duplex;
+ int speed;
+ dev_info_t *dip;
+
+ NGE_TRACE(("nge_nd_init($%p)", (void *)ngep));
+ /*
+ * Register all the per-instance properties, initialising
+ * them from the table above or from driver properties set
+ * in the .conf file
+ */
+ if (nge_param_register(ngep) != DDI_SUCCESS)
+ return (-1);
+
+ /*
+ * The link speed may be forced to 10, 100 or 1000 Mbps using
+ * the property "transfer-speed". This may be done in OBP by
+ * using the command "apply transfer-speed=<speed> <device>".
+ * The speed may be 10, 100 or 1000 - any other value will be
+ * ignored. Note that this does *enables* autonegotiation, but
+ * restricts it to the speed specified by the property.
+ */
+ dip = ngep->devinfo;
+ if (NGE_PROP_EXISTS(dip, transfer_speed_propname)) {
+
+ speed = NGE_PROP_GET_INT(dip, transfer_speed_propname);
+ nge_log(ngep, "%s property is %d",
+ transfer_speed_propname, speed);
+
+ switch (speed) {
+ case 1000:
+ ngep->param_adv_autoneg = 1;
+ ngep->param_adv_1000fdx = 1;
+ ngep->param_adv_1000hdx = 0;
+ ngep->param_adv_100fdx = 0;
+ ngep->param_adv_100hdx = 0;
+ ngep->param_adv_10fdx = 0;
+ ngep->param_adv_10hdx = 0;
+ break;
+
+ case 100:
+ ngep->param_adv_autoneg = 1;
+ ngep->param_adv_1000fdx = 0;
+ ngep->param_adv_1000hdx = 0;
+ ngep->param_adv_100fdx = 1;
+ ngep->param_adv_100hdx = 1;
+ ngep->param_adv_10fdx = 0;
+ ngep->param_adv_10hdx = 0;
+ break;
+
+ case 10:
+ ngep->param_adv_autoneg = 1;
+ ngep->param_adv_1000fdx = 0;
+ ngep->param_adv_1000hdx = 0;
+ ngep->param_adv_100fdx = 0;
+ ngep->param_adv_100hdx = 0;
+ ngep->param_adv_10fdx = 1;
+ ngep->param_adv_10hdx = 1;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Also check the "speed" and "full-duplex" properties. Setting
+ * these properties will override all other settings and *disable*
+ * autonegotiation, so both should be specified if either one is.
+ * Otherwise, the unspecified parameter will be set to a default
+ * value (1000Mb/s, full-duplex).
+ */
+ if (NGE_PROP_EXISTS(dip, speed_propname) ||
+ NGE_PROP_EXISTS(dip, duplex_propname)) {
+
+ ngep->param_adv_autoneg = 0;
+ ngep->param_adv_1000fdx = 1;
+ ngep->param_adv_1000hdx = 0;
+ ngep->param_adv_100fdx = 1;
+ ngep->param_adv_100hdx = 1;
+ ngep->param_adv_10fdx = 1;
+ ngep->param_adv_10hdx = 1;
+
+ speed = NGE_PROP_GET_INT(dip, speed_propname);
+ duplex = NGE_PROP_GET_INT(dip, duplex_propname);
+ nge_log(ngep, "%s property is %d",
+ speed_propname, speed);
+ nge_log(ngep, "%s property is %d",
+ duplex_propname, duplex);
+
+ switch (speed) {
+ case 1000:
+ default:
+ ngep->param_adv_100fdx = 0;
+ ngep->param_adv_100hdx = 0;
+ ngep->param_adv_10fdx = 0;
+ ngep->param_adv_10hdx = 0;
+ break;
+
+ case 100:
+ ngep->param_adv_1000fdx = 0;
+ ngep->param_adv_1000hdx = 0;
+ ngep->param_adv_10fdx = 0;
+ ngep->param_adv_10hdx = 0;
+ break;
+
+ case 10:
+ ngep->param_adv_1000fdx = 0;
+ ngep->param_adv_1000hdx = 0;
+ ngep->param_adv_100fdx = 0;
+ ngep->param_adv_100hdx = 0;
+ break;
+ }
+
+ switch (duplex) {
+ default:
+ case 1:
+ ngep->param_adv_1000hdx = 0;
+ ngep->param_adv_100hdx = 0;
+ ngep->param_adv_10hdx = 0;
+ break;
+
+ case 0:
+ ngep->param_adv_1000fdx = 0;
+ ngep->param_adv_100fdx = 0;
+ ngep->param_adv_10fdx = 0;
+ break;
+ }
+ }
+
+ return (0);
+}
+
+enum ioc_reply
+nge_nd_ioctl(nge_t *ngep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
+{
+ boolean_t ok;
+ int cmd;
+ NGE_TRACE(("nge_nd_ioctl($%p, $%p, $%p, $%p)",
+ (void *)ngep, (void *)wq, (void *)mp, (void *)iocp));
+
+ ASSERT(mutex_owned(ngep->genlock));
+
+ cmd = iocp->ioc_cmd;
+ switch (cmd) {
+ default:
+ nge_error(ngep, "nge_nd_ioctl: invalid cmd 0x%x", cmd);
+ return (IOC_INVAL);
+
+ case ND_GET:
+ /*
+ * If nd_getset() returns B_FALSE, the command was
+ * not valid (e.g. unknown name), so we just tell the
+ * top-level ioctl code to send a NAK (with code EINVAL).
+ *
+ * Otherwise, nd_getset() will have built the reply to
+ * be sent (but not actually sent it), so we tell the
+ * caller to send the prepared reply.
+ */
+ ok = nd_getset(wq, ngep->nd_data_p, mp);
+ return (ok ? IOC_REPLY : IOC_INVAL);
+
+ case ND_SET:
+ /*
+ * All adv_* parameters are locked (read-only) while
+ * the device is in any sort of loopback mode ...
+ */
+ if (ngep->param_loop_mode != NGE_LOOP_NONE) {
+ iocp->ioc_error = EBUSY;
+ return (IOC_INVAL);
+ }
+
+ ok = nd_getset(wq, ngep->nd_data_p, mp);
+
+ /*
+ * If nd_getset() returns B_FALSE, the command was
+ * not valid (e.g. unknown name), so we just tell
+ * the top-level ioctl code to send a NAK (with code
+ * EINVAL by default).
+ *
+ * Otherwise, nd_getset() will have built the reply to
+ * be sent - but that doesn't imply success! In some
+ * cases, the reply it's built will have a non-zero
+ * error code in it (e.g. EPERM if not superuser).
+ * So, we also drop out in that case ...
+ */
+ if (!ok)
+ return (IOC_INVAL);
+ if (iocp->ioc_error)
+ return (IOC_REPLY);
+
+ /*
+ * OK, a successful 'set'. Return IOC_RESTART_REPLY,
+ * telling the top-level ioctl code to update the PHY
+ * and restart the chip before sending our prepared reply
+ */
+ return (IOC_RESTART_REPLY);
+ }
+}
+
+/* Free the Named Dispatch Table by calling nd_free */
+void
+nge_nd_cleanup(nge_t *ngep)
+{
+ NGE_TRACE(("nge_nd_cleanup($%p)", (void *)ngep));
+ nd_free(&ngep->nd_data_p);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_rx.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,474 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_RECV
+
+#define RXD_END 0x20000000
+#define RXD_ERR 0x40000000
+#define RXD_OWN 0x80000000
+#define RXD_CSUM_MSK 0x1C000000
+#define RXD_BCNT_MSK 0x00003FFF
+
+#define RXD_CK8G_NO_HSUM 0x0
+#define RXD_CK8G_TCP_SUM_ERR 0x04000000
+#define RXD_CK8G_UDP_SUM_ERR 0x08000000
+#define RXD_CK8G_IP_HSUM_ERR 0x0C000000
+#define RXD_CK8G_IP_HSUM 0x10000000
+#define RXD_CK8G_TCP_SUM 0x14000000
+#define RXD_CK8G_UDP_SUM 0x18000000
+#define RXD_CK8G_RESV 0x1C000000
+
+extern ddi_device_acc_attr_t nge_data_accattr;
+
+/*
+ * Callback code invoked from STREAMs when the recv data buffer is free
+ * for recycling.
+ */
+
+void
+nge_recv_recycle(caddr_t arg)
+{
+ boolean_t val;
+ boolean_t valid;
+ nge_t *ngep;
+ dma_area_t *bufp;
+ buff_ring_t *brp;
+ nge_sw_statistics_t *sw_stp;
+
+ bufp = (dma_area_t *)arg;
+ ngep = (nge_t *)bufp->private;
+ brp = ngep->buff;
+ sw_stp = &ngep->statistics.sw_statistics;
+
+ /*
+ * Free the buffer directly if the buffer was allocated
+ * previously or mac was stopped.
+ */
+ if (bufp->signature != brp->buf_sign) {
+ if (bufp->rx_delivered == B_TRUE) {
+ nge_free_dma_mem(bufp);
+ kmem_free(bufp, sizeof (dma_area_t));
+ val = nge_atomic_decrease(&brp->rx_hold, 1);
+ ASSERT(val == B_TRUE);
+ }
+ return;
+ }
+
+ /*
+ * recycle the data buffer again and fill them in free ring
+ */
+ bufp->rx_recycle.free_func = nge_recv_recycle;
+ bufp->rx_recycle.free_arg = (caddr_t)bufp;
+
+ bufp->mp = desballoc(DMA_VPTR(*bufp),
+ ngep->buf_size + NGE_HEADROOM, 0, &bufp->rx_recycle);
+
+ if (bufp->mp == NULL) {
+ sw_stp->mp_alloc_err++;
+ sw_stp->recy_free++;
+ nge_free_dma_mem(bufp);
+ kmem_free(bufp, sizeof (dma_area_t));
+ val = nge_atomic_decrease(&brp->rx_hold, 1);
+ ASSERT(val == B_TRUE);
+ } else {
+
+ mutex_enter(brp->recycle_lock);
+ if (bufp->signature != brp->buf_sign)
+ valid = B_TRUE;
+ else
+ valid = B_FALSE;
+ bufp->rx_delivered = valid;
+ if (bufp->rx_delivered == B_FALSE) {
+ bufp->next = brp->recycle_list;
+ brp->recycle_list = bufp;
+ }
+ mutex_exit(brp->recycle_lock);
+ if (valid == B_TRUE)
+ /* call nge_rx_recycle again to free it */
+ freemsg(bufp->mp);
+ else {
+ val = nge_atomic_decrease(&brp->rx_hold, 1);
+ ASSERT(val == B_TRUE);
+ }
+ }
+}
+
+/*
+ * Checking the rx's BDs (one or more) to receive
+ * one complete packet.
+ * start_index: the start indexer of BDs for one packet.
+ * end_index: the end indexer of BDs for one packet.
+ */
+static mblk_t *nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len);
+#pragma inline(nge_recv_packet)
+
+static mblk_t *
+nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len)
+{
+ uint8_t *rptr;
+ uint32_t minsize;
+ uint32_t maxsize;
+ mblk_t *mp;
+ buff_ring_t *brp;
+ sw_rx_sbd_t *srbdp;
+ dma_area_t *bufp;
+ nge_sw_statistics_t *sw_stp;
+ void *hw_bd_p;
+
+ brp = ngep->buff;
+ minsize = ETHERMIN;
+ maxsize = ngep->max_sdu;
+ sw_stp = &ngep->statistics.sw_statistics;
+ mp = NULL;
+
+ srbdp = &brp->sw_rbds[start_index];
+ DMA_SYNC(*srbdp->bufp, DDI_DMA_SYNC_FORKERNEL);
+ hw_bd_p = DMA_VPTR(srbdp->desc);
+
+ /*
+ * First check the free_list, if it is NULL,
+ * make the recycle_list be free_list.
+ */
+ if (brp->free_list == NULL) {
+ mutex_enter(brp->recycle_lock);
+ brp->free_list = brp->recycle_list;
+ brp->recycle_list = NULL;
+ mutex_exit(brp->recycle_lock);
+ }
+ bufp = brp->free_list;
+ /* If it's not a qualified packet, delete it */
+ if (len > maxsize || len < minsize) {
+ ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
+ srbdp->bufp->alength);
+ srbdp->flags = CONTROLER_OWN;
+ return (NULL);
+ }
+
+ /*
+ * If receive packet size is smaller than RX bcopy threshold,
+ * or there is no available buffer in free_list or recycle list,
+ * we use bcopy directly.
+ */
+ if (len <= ngep->param_rxbcopy_threshold || bufp == NULL)
+ brp->rx_bcopy = B_TRUE;
+ else
+ brp->rx_bcopy = B_FALSE;
+
+ if (brp->rx_bcopy) {
+ mp = allocb(len + NGE_HEADROOM, 0);
+ if (mp == NULL) {
+ sw_stp->mp_alloc_err++;
+ ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
+ srbdp->bufp->alength);
+ srbdp->flags = CONTROLER_OWN;
+ return (NULL);
+ }
+ rptr = DMA_VPTR(*srbdp->bufp);
+ mp->b_rptr = mp->b_rptr + NGE_HEADROOM;
+ bcopy(rptr + NGE_HEADROOM, mp->b_rptr, len);
+ mp->b_wptr = mp->b_rptr + len;
+ } else {
+ mp = srbdp->bufp->mp;
+ /*
+ * Make sure the packet *contents* 4-byte aligned
+ */
+ mp->b_rptr += NGE_HEADROOM;
+ mp->b_wptr = mp->b_rptr + len;
+ mp->b_next = mp->b_cont = NULL;
+ srbdp->bufp->rx_delivered = B_TRUE;
+ srbdp->bufp = NULL;
+ nge_atomic_increase(&brp->rx_hold, 1);
+
+ /* Fill the buffer from free_list */
+ srbdp->bufp = bufp;
+ brp->free_list = bufp->next;
+ bufp->next = NULL;
+ }
+
+ /* replenish the buffer for hardware descriptor */
+ ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
+ srbdp->bufp->alength);
+ srbdp->flags = CONTROLER_OWN;
+ sw_stp->rbytes += len;
+ sw_stp->recv_count++;
+
+ return (mp);
+}
+
+
+#define RX_HW_ERR 0x01
+#define RX_SUM_NO 0x02
+#define RX_SUM_ERR 0x04
+
+/*
+ * Statistic the rx's error
+ * and generate a log msg for these.
+ * Note:
+ * RXE, Parity Error, Symbo error, CRC error
+ * have been recored by nvidia's hardware
+ * statistics part (nge_statistics). So it is uncessary to record them by
+ * driver in this place.
+ */
+static uint32_t
+nge_rxsta_handle(nge_t *ngep, uint32_t stflag, uint32_t *pflags);
+#pragma inline(nge_rxsta_handle)
+
+static uint32_t
+nge_rxsta_handle(nge_t *ngep, uint32_t stflag, uint32_t *pflags)
+{
+ uint32_t errors;
+ uint32_t err_flag;
+ nge_sw_statistics_t *sw_stp;
+
+ err_flag = 0;
+ sw_stp = &ngep->statistics.sw_statistics;
+
+ if ((RXD_END & stflag) == 0)
+ return (RX_HW_ERR);
+
+ errors = stflag & RXD_CSUM_MSK;
+ switch (errors) {
+ default:
+ break;
+
+ case RXD_CK8G_TCP_SUM:
+ case RXD_CK8G_UDP_SUM:
+ *pflags |= HCK_FULLCKSUM;
+ *pflags |= HCK_IPV4_HDRCKSUM;
+ *pflags |= HCK_FULLCKSUM_OK;
+ break;
+
+ case RXD_CK8G_TCP_SUM_ERR:
+ case RXD_CK8G_UDP_SUM_ERR:
+ sw_stp->tcp_hwsum_err++;
+ *pflags |= HCK_IPV4_HDRCKSUM;
+ break;
+
+ case RXD_CK8G_IP_HSUM:
+ *pflags |= HCK_IPV4_HDRCKSUM;
+ break;
+
+ case RXD_CK8G_NO_HSUM:
+ err_flag |= RX_SUM_NO;
+ break;
+
+ case RXD_CK8G_IP_HSUM_ERR:
+ sw_stp->ip_hwsum_err++;
+ err_flag |= RX_SUM_ERR;
+ break;
+ }
+
+ if ((stflag & RXD_ERR) != 0) {
+
+ err_flag |= RX_HW_ERR;
+ NGE_DEBUG(("Receive desc error, status: 0x%x", stflag));
+ }
+
+ return (err_flag);
+}
+
+static mblk_t *
+nge_recv_ring(nge_t *ngep)
+{
+ uint32_t stflag;
+ uint32_t flag_err;
+ uint32_t sum_flags;
+ uint32_t count;
+ size_t len;
+ uint64_t end_index;
+ uint64_t sync_start;
+ mblk_t *mp;
+ mblk_t **tail;
+ mblk_t *head;
+ recv_ring_t *rrp;
+ buff_ring_t *brp;
+ sw_rx_sbd_t *srbdp;
+ void * hw_bd_p;
+ nge_mode_cntl mode_cntl;
+
+ mp = NULL;
+ head = NULL;
+ count = 0;
+ tail = &head;
+ rrp = ngep->recv;
+ brp = ngep->buff;
+
+ end_index = sync_start = rrp->prod_index;
+ /* Sync the descriptor for kernel */
+ if (sync_start + ngep->param_recv_max_packet <= ngep->rx_desc) {
+ (void) ddi_dma_sync(rrp->desc.dma_hdl,
+ sync_start * ngep->desc_attr.rxd_size,
+ ngep->param_recv_max_packet * ngep->desc_attr.rxd_size,
+ DDI_DMA_SYNC_FORKERNEL);
+ } else {
+ (void) ddi_dma_sync(rrp->desc.dma_hdl,
+ sync_start * ngep->desc_attr.rxd_size,
+ 0,
+ DDI_DMA_SYNC_FORKERNEL);
+ (void) ddi_dma_sync(rrp->desc.dma_hdl,
+ 0,
+ (ngep->param_recv_max_packet + sync_start - ngep->rx_desc) *
+ ngep->desc_attr.rxd_size,
+ DDI_DMA_SYNC_FORKERNEL);
+ }
+
+ /*
+ * Looking through the rx's ring to find the good packets
+ * and try to receive more and more packets in rx's ring
+ */
+ for (;;) {
+ sum_flags = 0;
+ flag_err = 0;
+ end_index = rrp->prod_index;
+ srbdp = &brp->sw_rbds[end_index];
+ hw_bd_p = DMA_VPTR(srbdp->desc);
+ stflag = ngep->desc_attr.rxd_check(hw_bd_p, &len);
+ /*
+ * If there is no packet in receving ring
+ * break the loop
+ */
+ if ((stflag & RXD_OWN) != 0 || HOST_OWN == srbdp->flags)
+ break;
+
+ ngep->recv_count++;
+ flag_err = nge_rxsta_handle(ngep, stflag, &sum_flags);
+ if ((flag_err & RX_HW_ERR) == 0) {
+ srbdp->flags = NGE_END_PACKET;
+ mp = nge_recv_packet(ngep, end_index, len);
+ } else {
+ /* Hardware error, re-use the buffer */
+ ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
+ srbdp->bufp->alength);
+ srbdp->flags = CONTROLER_OWN;
+ }
+ count++;
+ if (mp != NULL) {
+ if (!(flag_err & (RX_SUM_NO | RX_SUM_ERR))) {
+ (void) hcksum_assoc(mp, NULL, NULL,
+ 0, 0, 0, 0, sum_flags, 0);
+ }
+ *tail = mp;
+ tail = &mp->b_next;
+ mp = NULL;
+ }
+ rrp->prod_index = NEXT(end_index, rrp->desc.nslots);
+ if (count > ngep->param_recv_max_packet)
+ break;
+ }
+
+ /* Sync the descriptors for device */
+ if (sync_start + count <= ngep->rx_desc) {
+ (void) ddi_dma_sync(rrp->desc.dma_hdl,
+ sync_start * ngep->desc_attr.rxd_size,
+ count * ngep->desc_attr.rxd_size,
+ DDI_DMA_SYNC_FORDEV);
+ } else {
+ (void) ddi_dma_sync(rrp->desc.dma_hdl,
+ sync_start * ngep->desc_attr.rxd_size,
+ 0,
+ DDI_DMA_SYNC_FORDEV);
+ (void) ddi_dma_sync(rrp->desc.dma_hdl,
+ 0,
+ (count + sync_start - ngep->rx_desc) *
+ ngep->desc_attr.rxd_size,
+ DDI_DMA_SYNC_FORDEV);
+ }
+ mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
+ mode_cntl.mode_bits.rxdm = NGE_SET;
+ mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
+ nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
+
+ return (head);
+}
+
+void
+nge_receive(nge_t *ngep)
+{
+ mblk_t *mp;
+ recv_ring_t *rrp;
+ rrp = ngep->recv;
+
+ mp = nge_recv_ring(ngep);
+ mutex_exit(ngep->genlock);
+ if (mp != NULL)
+ mac_rx(ngep->mh, rrp->handle, mp);
+ mutex_enter(ngep->genlock);
+}
+
+void
+nge_hot_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len)
+{
+ uint64_t dmac_addr;
+ hot_rx_bd * hw_bd_p;
+
+ hw_bd_p = (hot_rx_bd *)hwd;
+ dmac_addr = cookie->dmac_laddress + NGE_HEADROOM;
+
+ hw_bd_p->cntl_status.cntl_val = 0;
+
+ hw_bd_p->host_buf_addr_hi = dmac_addr >> 32;
+ hw_bd_p->host_buf_addr_lo = dmac_addr;
+ hw_bd_p->cntl_status.control_bits.bcnt = len - 1;
+
+ membar_producer();
+ hw_bd_p->cntl_status.control_bits.own = NGE_SET;
+}
+
+void
+nge_sum_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len)
+{
+ uint64_t dmac_addr;
+ sum_rx_bd * hw_bd_p;
+
+ hw_bd_p = hwd;
+ dmac_addr = cookie->dmac_address + NGE_HEADROOM;
+
+ hw_bd_p->cntl_status.cntl_val = 0;
+
+ hw_bd_p->host_buf_addr = dmac_addr;
+ hw_bd_p->cntl_status.control_bits.bcnt = len - 1;
+
+ membar_producer();
+ hw_bd_p->cntl_status.control_bits.own = NGE_SET;
+}
+
+uint32_t
+nge_hot_rxd_check(const void *hwd, size_t *len)
+{
+ uint32_t err_flag;
+ const hot_rx_bd * hrbdp;
+
+ hrbdp = hwd;
+
+ err_flag = hrbdp->cntl_status.cntl_val & ~RXD_BCNT_MSK;
+ *len = hrbdp->cntl_status.status_bits_legacy.bcnt;
+
+ return (err_flag);
+}
+
+uint32_t
+nge_sum_rxd_check(const void *hwd, size_t *len)
+{
+ uint32_t err_flag;
+ const sum_rx_bd * hrbdp;
+
+ hrbdp = hwd;
+
+ err_flag = hrbdp->cntl_status.cntl_val & ~RXD_BCNT_MSK;
+ *len = hrbdp->cntl_status.status_bits.bcnt;
+
+ return (err_flag);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_tx.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,773 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+
+#define TXD_OWN 0x80000000
+#define TXD_ERR 0x40000000
+#define TXD_END 0x20000000
+#define TXD_BCNT_MSK 0x00003FFF
+
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_SEND
+
+#define NGE_TXSWD_RECYCLE(sd) {\
+ (sd)->mp = NULL; \
+ (sd)->frags = 0; \
+ (sd)->mp_hndl.head = NULL; \
+ (sd)->mp_hndl.tail = NULL; \
+ (sd)->flags = HOST_OWN; \
+ }
+
+
+static size_t nge_tx_dmah_pop(nge_dmah_list_t *, nge_dmah_list_t *, size_t);
+static void nge_tx_dmah_push(nge_dmah_list_t *, nge_dmah_list_t *);
+
+
+void nge_tx_recycle_all(nge_t *ngep);
+#pragma no_inline(nge_tx_recycle_all)
+
+void
+nge_tx_recycle_all(nge_t *ngep)
+{
+ send_ring_t *srp;
+ sw_tx_sbd_t *ssbdp;
+ nge_dmah_node_t *dmah;
+ uint32_t slot;
+ uint32_t nslots;
+
+ srp = ngep->send;
+ nslots = srp->desc.nslots;
+
+ for (slot = 0; slot < nslots; ++slot) {
+
+ ssbdp = srp->sw_sbds + slot;
+
+ DMA_ZERO(ssbdp->desc);
+
+ if (ssbdp->mp != NULL) {
+
+ for (dmah = ssbdp->mp_hndl.head; dmah != NULL;
+ dmah = dmah->next)
+ (void) ddi_dma_unbind_handle(dmah->hndl);
+
+ freemsg(ssbdp->mp);
+ }
+
+ NGE_TXSWD_RECYCLE(ssbdp);
+ }
+}
+
+static size_t
+nge_tx_dmah_pop(nge_dmah_list_t *src, nge_dmah_list_t *dst, size_t num)
+{
+ nge_dmah_node_t *node;
+
+ for (node = src->head; node != NULL && --num != 0; node = node->next)
+ ;
+
+ if (num == 0) {
+
+ dst->head = src->head;
+ dst->tail = node;
+
+ if ((src->head = node->next) == NULL)
+ src->tail = NULL;
+
+ node->next = NULL;
+ }
+
+ return (num);
+}
+
+static void
+nge_tx_dmah_push(nge_dmah_list_t *src, nge_dmah_list_t *dst)
+{
+ if (dst->tail != NULL)
+ dst->tail->next = src->head;
+ else
+ dst->head = src->head;
+
+ dst->tail = src->tail;
+}
+
+static void
+nge_tx_desc_sync(nge_t *ngep, uint64_t start, uint64_t num, uint_t type)
+{
+ send_ring_t *srp = ngep->send;
+ const size_t txd_size = ngep->desc_attr.txd_size;
+ const uint64_t end = srp->desc.nslots * txd_size;
+
+ start = start * txd_size;
+ num = num * txd_size;
+
+ if (start + num <= end)
+ (void) ddi_dma_sync(srp->desc.dma_hdl, start, num, type);
+ else {
+
+ (void) ddi_dma_sync(srp->desc.dma_hdl, start, 0, type);
+ (void) ddi_dma_sync(srp->desc.dma_hdl, 0, start + num - end,
+ type);
+ }
+}
+
+/*
+ * Reclaim the resource after tx's completion
+ */
+void
+nge_tx_recycle(nge_t *ngep, boolean_t is_intr)
+{
+ int resched;
+ uint32_t stflg;
+ size_t len;
+ uint64_t free;
+ uint64_t slot;
+ uint64_t used;
+ uint64_t next;
+ uint64_t nslots;
+ mblk_t *mp;
+ sw_tx_sbd_t *ssbdp;
+ void *hw_sbd_p;
+ send_ring_t *srp;
+ nge_dmah_node_t *dme;
+ nge_dmah_list_t dmah;
+
+ srp = ngep->send;
+
+ if (is_intr) {
+ if (mutex_tryenter(srp->tc_lock) == 0)
+ return;
+ } else
+ mutex_enter(srp->tc_lock);
+ mutex_enter(srp->tx_lock);
+
+ next = srp->tx_next;
+ used = srp->tx_flow;
+ free = srp->tx_free;
+
+ mutex_exit(srp->tx_lock);
+
+ slot = srp->tc_next;
+ nslots = srp->desc.nslots;
+
+ used = nslots - free - used;
+
+ ASSERT(slot == NEXT_INDEX(next, free, nslots));
+
+ if (used > srp->tx_hwmark)
+ used = srp->tx_hwmark;
+
+ nge_tx_desc_sync(ngep, slot, used, DDI_DMA_SYNC_FORKERNEL);
+
+ /*
+ * Look through the send ring by bd's status part
+ * to find all the bds which has been transmitted sucessfully
+ * then reclaim all resouces associated with these bds
+ */
+
+ mp = NULL;
+ dmah.head = NULL;
+ dmah.tail = NULL;
+
+ for (free = 0; used-- != 0; slot = NEXT(slot, nslots), ++free) {
+
+ ssbdp = &srp->sw_sbds[slot];
+ hw_sbd_p = DMA_VPTR(ssbdp->desc);
+
+ stflg = ngep->desc_attr.txd_check(hw_sbd_p, &len);
+
+ if (ssbdp->flags == HOST_OWN || (TXD_OWN & stflg) != 0)
+ break;
+
+ DMA_ZERO(ssbdp->desc);
+
+ if (ssbdp->mp != NULL) {
+ ssbdp->mp->b_next = mp;
+ mp = ssbdp->mp;
+
+ if (ssbdp->mp_hndl.head != NULL)
+ nge_tx_dmah_push(&ssbdp->mp_hndl, &dmah);
+ }
+
+ NGE_TXSWD_RECYCLE(ssbdp);
+ }
+
+ /*
+ * We're about to release one or more places :-)
+ * These ASSERTions check that our invariants still hold:
+ * there must always be at least one free place
+ * at this point, there must be at least one place NOT free
+ * we're not about to free more places than were claimed!
+ */
+
+ mutex_enter(srp->tx_lock);
+
+ srp->tx_free += free;
+ ngep->watchdog = (srp->desc.nslots - srp->tx_free != 0);
+
+ srp->tc_next = slot;
+
+ ASSERT(srp->tx_free <= nslots);
+ ASSERT(srp->tc_next == NEXT_INDEX(srp->tx_next, srp->tx_free, nslots));
+
+ resched = (ngep->resched_needed != 0 && srp->tx_hwmark <= srp->tx_free);
+
+ mutex_exit(srp->tx_lock);
+ mutex_exit(srp->tc_lock);
+
+ /* unbind/free mblks */
+
+ for (dme = dmah.head; dme != NULL; dme = dme->next)
+ (void) ddi_dma_unbind_handle(dme->hndl);
+
+ mutex_enter(&srp->dmah_lock);
+ nge_tx_dmah_push(&dmah, &srp->dmah_free);
+ mutex_exit(&srp->dmah_lock);
+
+ freemsgchain(mp);
+
+ /*
+ * up to this place, we maybe have reclaim some resouce
+ * if there is a requirement to report to gld, report this.
+ */
+
+ if (resched)
+ (void) ddi_intr_trigger_softint(ngep->resched_hdl, NULL);
+}
+
+static uint64_t
+nge_tx_alloc(nge_t *ngep, uint64_t num)
+{
+ uint64_t start;
+ send_ring_t *srp;
+
+ start = (uint64_t)-1;
+ srp = ngep->send;
+
+ mutex_enter(srp->tx_lock);
+
+ if (srp->tx_free < srp->tx_lwmark) {
+
+ mutex_exit(srp->tx_lock);
+ nge_tx_recycle(ngep, B_FALSE);
+ mutex_enter(srp->tx_lock);
+ }
+
+ if (srp->tx_free >= num) {
+
+ start = srp->tx_next;
+
+ srp->tx_next = NEXT_INDEX(start, num, srp->desc.nslots);
+ srp->tx_free -= num;
+ srp->tx_flow += num;
+ }
+
+ mutex_exit(srp->tx_lock);
+ return (start);
+}
+
+static void
+nge_tx_start(nge_t *ngep, uint64_t slotnum)
+{
+ nge_mode_cntl mode_cntl;
+ send_ring_t *srp;
+
+ srp = ngep->send;
+
+ /*
+ * Because there can be multiple concurrent threads in
+ * transit through this code, we only want to notify the
+ * hardware once the last one is departing ...
+ */
+
+ mutex_enter(srp->tx_lock);
+
+ srp->tx_flow -= slotnum;
+ if (srp->tx_flow == 0) {
+
+ /*
+ * Bump the watchdog counter, thus guaranteeing that it's
+ * nonzero (watchdog activated). Note that non-synchonised
+ * access here means we may race with the reclaim() code
+ * above, but the outcome will be harmless. At worst, the
+ * counter may not get reset on a partial reclaim; but the
+ * large trigger threshold makes false positives unlikely
+ */
+ ngep->watchdog ++;
+
+ mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
+ mode_cntl.mode_bits.txdm = NGE_SET;
+ mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
+ nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
+ }
+ mutex_exit(srp->tx_lock);
+}
+
+static enum send_status
+nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp);
+#pragma inline(nge_send_copy)
+
+static enum send_status
+nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp)
+{
+ size_t totlen;
+ size_t mblen;
+ uint32_t flags;
+ uint64_t bds;
+ uint64_t start_index;
+ char *txb;
+ mblk_t *bp;
+ void *hw_sbd_p;
+ sw_tx_sbd_t *ssbdp;
+
+ hcksum_retrieve(mp, NULL, NULL, NULL, NULL,
+ NULL, NULL, &flags);
+ bds = 0x1;
+
+ if ((uint64_t)-1 == (start_index = nge_tx_alloc(ngep, bds)))
+ return (SEND_COPY_FAIL);
+
+ ASSERT(start_index < srp->desc.nslots);
+
+ /*
+ * up to this point, there's nothing that can fail,
+ * so we can go straight to claiming our
+ * already-reserved place son the train.
+ *
+ * This is the point of no return!
+ */
+
+ bp = mp;
+ totlen = 0;
+ ssbdp = &srp->sw_sbds[start_index];
+ ASSERT(ssbdp->flags == HOST_OWN);
+
+ txb = DMA_VPTR(ssbdp->pbuf);
+ totlen = 0;
+ for (; bp != NULL; bp = bp->b_cont) {
+ if ((mblen = MBLKL(bp)) == 0)
+ continue;
+ if ((totlen += mblen) <= ngep->max_sdu) {
+ bcopy(bp->b_rptr, txb, mblen);
+ txb += mblen;
+ }
+ }
+
+ DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
+
+ /* Fill & sync hw desc */
+
+ hw_sbd_p = DMA_VPTR(ssbdp->desc);
+
+ ngep->desc_attr.txd_fill(hw_sbd_p, &ssbdp->pbuf.cookie, totlen,
+ flags, B_TRUE);
+ nge_tx_desc_sync(ngep, start_index, bds, DDI_DMA_SYNC_FORDEV);
+
+ ssbdp->flags = CONTROLER_OWN;
+
+ nge_tx_start(ngep, bds);
+
+ /*
+ * The return status indicates that the message can be freed
+ * right away, as we've already copied the contents ...
+ */
+
+ freemsg(mp);
+ return (SEND_COPY_SUCESS);
+}
+
+/*
+ * static enum send_status
+ * nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno);
+ * #pragma inline(nge_send_mapped)
+ */
+
+static enum send_status
+nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno)
+{
+ int err;
+ boolean_t end;
+ uint32_t i;
+ uint32_t j;
+ uint32_t ncookies;
+ uint32_t slot;
+ uint32_t nslots;
+ uint32_t mblen;
+ uint32_t flags;
+ uint64_t start_index;
+ uint64_t end_index;
+ mblk_t *bp;
+ void *hw_sbd_p;
+ send_ring_t *srp;
+ nge_dmah_node_t *dmah;
+ nge_dmah_node_t *dmer;
+ nge_dmah_list_t dmah_list;
+ ddi_dma_cookie_t cookie[NGE_MAX_COOKIES * NGE_MAP_FRAGS];
+
+ srp = ngep->send;
+ nslots = srp->desc.nslots;
+
+ mutex_enter(&srp->dmah_lock);
+ err = nge_tx_dmah_pop(&srp->dmah_free, &dmah_list, fragno);
+ mutex_exit(&srp->dmah_lock);
+
+ if (err != 0) {
+
+ return (SEND_MAP_FAIL);
+ }
+
+ /*
+ * Pre-scan the message chain, noting the total number of bytes,
+ * the number of fragments by pre-doing dma addr bind
+ * if the fragment is larger than NGE_COPY_SIZE.
+ * This way has the following advantages:
+ * 1. Acquire the detailed information of resouce
+ * need to send the message
+ *
+ * 2. If can not pre-apply enough resouce, fails at once
+ * and the driver will chose copy way to send out the
+ * message
+ */
+
+ slot = 0;
+ dmah = dmah_list.head;
+
+ hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &flags);
+
+ for (bp = mp; bp != NULL; bp = bp->b_cont) {
+
+ mblen = MBLKL(bp);
+ if (mblen == 0)
+ continue;
+
+ err = ddi_dma_addr_bind_handle(dmah->hndl,
+ NULL, (caddr_t)bp->b_rptr, mblen,
+ DDI_DMA_STREAMING | DDI_DMA_WRITE,
+ DDI_DMA_DONTWAIT, NULL, cookie + slot, &ncookies);
+
+ /*
+ * If there can not map successfully, it is uncessary
+ * sending the message by map way. Sending the message
+ * by copy way.
+ *
+ * By referring to intel's suggestion, it is better
+ * the number of cookies should be less than 4.
+ */
+ if (err != DDI_DMA_MAPPED || ncookies > NGE_MAX_COOKIES) {
+ NGE_DEBUG(("err(%x) map tx bulk fails"
+ " cookie(%x), ncookies(%x)",
+ err, cookie[slot].dmac_laddress, ncookies));
+ goto map_fail;
+ }
+
+ /*
+ * Check How many bds a cookie will consume
+ */
+ for (end_index = slot + ncookies;
+ ++slot != end_index;
+ ddi_dma_nextcookie(dmah->hndl, cookie + slot))
+ ;
+
+ dmah = dmah->next;
+ }
+
+ /*
+ * Now allocate tx descriptors and fill them
+ * IMPORTANT:
+ * Up to the point where it claims a place, It is impossibel
+ * to fail.
+ *
+ * In this version, there's no setup to be done here, and there's
+ * nothing that can fail, so we can go straight to claiming our
+ * already-reserved places on the train.
+ *
+ * This is the point of no return!
+ */
+
+
+ if ((uint64_t)-1 == (start_index = nge_tx_alloc(ngep, slot)))
+ goto map_fail;
+
+ ASSERT(start_index < nslots);
+
+ /* fill&sync hw desc, going in reverse order */
+
+ end = B_TRUE;
+ end_index = NEXT_INDEX(start_index, slot - 1, nslots);
+
+ for (i = slot - 1, j = end_index; start_index - j != 0;
+ j = PREV(j, nslots), --i) {
+
+ hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
+ ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i,
+ cookie[i].dmac_size, 0, end);
+
+ end = B_FALSE;
+ }
+
+ hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
+ ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, cookie[i].dmac_size,
+ flags, end);
+
+ nge_tx_desc_sync(ngep, start_index, slot, DDI_DMA_SYNC_FORDEV);
+
+ /* fill sw desc */
+
+ for (j = start_index; end_index - j != 0; j = NEXT(j, nslots)) {
+
+ srp->sw_sbds[j].flags = CONTROLER_OWN;
+ }
+
+ srp->sw_sbds[j].mp = mp;
+ srp->sw_sbds[j].mp_hndl = dmah_list;
+ srp->sw_sbds[j].frags = fragno;
+ srp->sw_sbds[j].flags = CONTROLER_OWN;
+
+ nge_tx_start(ngep, slot);
+
+ /*
+ * The return status indicates that the message can not be freed
+ * right away, until we can make assure the message has been sent
+ * out sucessfully.
+ */
+ return (SEND_MAP_SUCCESS);
+
+map_fail:
+ for (dmer = dmah_list.head; dmah - dmer != 0; dmer = dmer->next)
+ (void) ddi_dma_unbind_handle(dmer->hndl);
+
+ mutex_enter(&srp->dmah_lock);
+ nge_tx_dmah_push(&dmah_list, &srp->dmah_free);
+ mutex_exit(&srp->dmah_lock);
+
+ return (SEND_MAP_FAIL);
+}
+
+static boolean_t
+nge_send(nge_t *ngep, mblk_t *mp)
+{
+ mblk_t *bp;
+ send_ring_t *srp;
+ enum send_status status;
+ uint32_t mblen = 0;
+ uint32_t frags = 0;
+ nge_statistics_t *nstp = &ngep->statistics;
+ nge_sw_statistics_t *sw_stp = &nstp->sw_statistics;
+
+ ASSERT(mp != NULL);
+ ASSERT(ngep->nge_mac_state == NGE_MAC_STARTED);
+
+ srp = ngep->send;
+ /*
+ * 1.Check the number of the fragments of the messages
+ * If the total number is larger than 3,
+ * Chose copy way
+ *
+ * 2. Check the length of the message whether is larger than
+ * NGE_TX_COPY_SIZE, if so, choose the map way.
+ */
+ for (frags = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
+ if (MBLKL(bp) == 0)
+ continue;
+ frags++;
+ mblen += MBLKL(bp);
+ }
+ if (mblen > (ngep->max_sdu) || mblen == 0) {
+ freemsg(mp);
+ return (B_TRUE);
+ }
+
+ if ((mblen > ngep->param_txbcopy_threshold) &&
+ (srp->tx_free > frags * NGE_MAX_COOKIES)) {
+ status = nge_send_mapped(ngep, mp, frags);
+ if (status == SEND_MAP_FAIL)
+ status = nge_send_copy(ngep, mp, srp);
+ } else {
+ status = nge_send_copy(ngep, mp, srp);
+ }
+ if (status == SEND_COPY_FAIL) {
+ nge_tx_recycle(ngep, B_FALSE);
+ status = nge_send_copy(ngep, mp, srp);
+ if (status == SEND_COPY_FAIL) {
+ ngep->resched_needed = 1;
+ NGE_DEBUG(("nge_send: send fail!"));
+ return (B_FALSE);
+ }
+ }
+ /* Update the software statistics */
+ sw_stp->obytes += mblen + ETHERFCSL;
+ sw_stp->xmit_count ++;
+
+ return (B_TRUE);
+}
+
+/*
+ * nge_m_tx : Send a chain of packets.
+ */
+mblk_t *
+nge_m_tx(void *arg, mblk_t *mp)
+{
+ nge_t *ngep = arg;
+ mblk_t *next;
+
+ rw_enter(ngep->rwlock, RW_READER);
+ ASSERT(mp != NULL);
+ if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
+ freemsgchain(mp);
+ mp = NULL;
+ }
+ while (mp != NULL) {
+ next = mp->b_next;
+ mp->b_next = NULL;
+
+ if (!nge_send(ngep, mp)) {
+ mp->b_next = next;
+ break;
+ }
+
+ mp = next;
+ }
+ rw_exit(ngep->rwlock);
+
+ return (mp);
+}
+
+/* ARGSUSED */
+uint_t
+nge_reschedule(caddr_t args1, caddr_t args2)
+{
+ nge_t *ngep;
+ uint_t rslt;
+
+ ngep = (nge_t *)args1;
+ rslt = DDI_INTR_UNCLAIMED;
+
+ /*
+ * when softintr is trigged, checking whether this
+ * is caused by our expected interrupt
+ */
+ if (ngep->nge_mac_state == NGE_MAC_STARTED &&
+ ngep->resched_needed == 1) {
+ ngep->resched_needed = 0;
+ ++ngep->statistics.sw_statistics.tx_resched;
+ mac_tx_update(ngep->mh);
+ rslt = DDI_INTR_CLAIMED;
+ }
+ return (rslt);
+}
+
+uint32_t
+nge_hot_txd_check(const void *hwd, size_t *len)
+{
+ uint32_t err_flag;
+ const hot_tx_bd * htbdp;
+
+ htbdp = hwd;
+ err_flag = htbdp->control_status.cntl_val & ~TXD_BCNT_MSK;
+
+ *len = htbdp->control_status.status_bits.bcnt;
+ return (err_flag);
+}
+
+uint32_t
+nge_sum_txd_check(const void *hwd, size_t *len)
+{
+ uint32_t err_flag;
+ const sum_tx_bd * htbdp;
+
+ htbdp = hwd;
+ err_flag = htbdp->control_status.cntl_val & ~TXD_BCNT_MSK;
+
+ *len = htbdp->control_status.status_bits.bcnt;
+ return (err_flag);
+}
+
+
+/*
+ * Filling the contents of Tx's data descriptor
+ * before transmitting.
+ */
+
+void
+nge_hot_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
+ size_t length, uint32_t sum_flag, boolean_t end)
+{
+ hot_tx_bd * hw_sbd_p = hwdesc;
+
+ hw_sbd_p->host_buf_addr_hi = cookie->dmac_laddress >> 32;
+ hw_sbd_p->host_buf_addr_lo = cookie->dmac_laddress;
+
+ /*
+ * Setting the length of the packet
+ * Note: the length filled in the part should be
+ * the original length subtract 1;
+ */
+
+ hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
+
+ /* setting ip checksum */
+ if (sum_flag & HCK_IPV4_HDRCKSUM)
+ hw_sbd_p->control_status.control_sum_bits.ip_hsum
+ = NGE_SET;
+ /* setting tcp checksum */
+ if (sum_flag & HCK_FULLCKSUM)
+ hw_sbd_p->control_status.control_sum_bits.tcp_hsum
+ = NGE_SET;
+ /*
+ * indicating the end of BDs
+ */
+ if (end)
+ hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
+
+ membar_producer();
+
+ /* pass desc to HW */
+ hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
+}
+
+void
+nge_sum_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
+ size_t length, uint32_t sum_flag, boolean_t end)
+{
+ sum_tx_bd * hw_sbd_p = hwdesc;
+
+ hw_sbd_p->host_buf_addr = cookie->dmac_address;
+
+ /*
+ * Setting the length of the packet
+ * Note: the length filled in the part should be
+ * the original length subtract 1;
+ */
+
+ hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
+
+ /* setting ip checksum */
+ if (sum_flag & HCK_IPV4_HDRCKSUM)
+ hw_sbd_p->control_status.control_sum_bits.ip_hsum
+ = NGE_SET;
+ /* setting tcp checksum */
+ if (sum_flag & HCK_FULLCKSUM)
+ hw_sbd_p->control_status.control_sum_bits.tcp_hsum
+ = NGE_SET;
+ /*
+ * indicating the end of BDs
+ */
+ if (end)
+ hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
+
+ membar_producer();
+
+ /* pass desc to HW */
+ hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/common/io/nge/nge_xmii.c Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,661 @@
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * This file may contain confidential information of Nvidia
+ * and should not be distributed in source form without approval
+ * from Sun Legal.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include "nge.h"
+
+#undef NGE_DBG
+#define NGE_DBG NGE_DBG_MII /* debug flag for this code */
+
+/*
+ * The arrays below can be indexed by the MODE bits from the mac2phy
+ * register to determine the current speed/duplex settings.
+ */
+static const int16_t nge_copper_link_speed[] = {
+ 0, /* MII_AUX_STATUS_MODE_NONE */
+ 10, /* MII_AUX_STAT0,US_MODE_10 */
+ 100, /* MII_AUX_STAT0,US_MODE_100 */
+ 1000, /* MII_AUX_STAT0,US_MODE_1000 */
+};
+
+static const int8_t nge_copper_link_duplex[] = {
+ LINK_DUPLEX_UNKNOWN, /* MII_DUPLEX_NONE */
+ LINK_DUPLEX_HALF, /* MII_DUPLEX_HALF */
+ LINK_DUPLEX_FULL, /* MII_DUPLEX_FULL */
+};
+
+
+static uint16_t nge_mii_access(nge_t *ngep, nge_regno_t regno,
+ uint16_t data, uint32_t cmd);
+#pragma inline(nge_mii_access)
+
+static uint16_t
+nge_mii_access(nge_t *ngep, nge_regno_t regno, uint16_t data, uint32_t cmd)
+{
+ uint16_t tries;
+ uint16_t mdio_data;
+ nge_mdio_adr mdio_adr;
+ nge_mintr_src intr_src;
+
+ NGE_TRACE(("nge_mii_access($%p, 0x%lx, 0x%x, 0x%x)",
+ (void *)ngep, regno, data, cmd));
+
+ /*
+ * Clear the privous interrupt event
+ */
+ intr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC);
+ nge_reg_put8(ngep, NGE_MINTR_SRC, intr_src.src_val);
+
+ /*
+ * Check whether the current operation has been finished
+ */
+ mdio_adr.adr_val = nge_reg_get16(ngep, NGE_MDIO_ADR);
+ for (tries = 0; tries < 30; tries ++) {
+ if (mdio_adr.adr_bits.mdio_clc == NGE_CLEAR)
+ break;
+ drv_usecwait(10);
+ mdio_adr.adr_val = nge_reg_get16(ngep, NGE_MDIO_ADR);
+ }
+
+ /*
+ * The current operation can not be finished successfully
+ * The driver should halt the current operation
+ */
+ if (tries == 30) {
+ mdio_adr.adr_bits.mdio_clc = NGE_SET;
+ nge_reg_put16(ngep, NGE_MDIO_ADR, mdio_adr.adr_val);
+ drv_usecwait(100);
+ }
+
+ /*
+ * Assemble the operation cmd
+ */
+ mdio_adr.adr_bits.phy_reg = regno;
+ mdio_adr.adr_bits.phy_adr = ngep->phy_xmii_addr;
+ mdio_adr.adr_bits.mdio_rw = (cmd == NGE_MDIO_WRITE) ? 1 : 0;
+
+
+ if (cmd == NGE_MDIO_WRITE)
+ nge_reg_put16(ngep, NGE_MDIO_DATA, data);
+
+ nge_reg_put16(ngep, NGE_MDIO_ADR, mdio_adr.adr_val);
+
+ /*
+ * To check whether the read/write operation is finished
+ */
+ for (tries = 0; tries < 300; tries ++) {
+ drv_usecwait(10);
+ mdio_adr.adr_val = nge_reg_get16(ngep, NGE_MDIO_ADR);
+ if (mdio_adr.adr_bits.mdio_clc == NGE_CLEAR)
+ break;
+ }
+ if (tries == 300)
+ return ((uint16_t)~0);
+
+ /*
+ * Read the data from MDIO data register
+ */
+ if (cmd == NGE_MDIO_READ)
+ mdio_data = nge_reg_get16(ngep, NGE_MDIO_DATA);
+
+ /*
+ * To check whether the read/write operation is valid
+ */
+ intr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC);
+ nge_reg_put8(ngep, NGE_MINTR_SRC, intr_src.src_val);
+ if (intr_src.src_bits.mrei == NGE_SET)
+ return ((uint16_t)~0);
+
+ return (mdio_data);
+}
+
+uint16_t nge_mii_get16(nge_t *ngep, nge_regno_t regno);
+#pragma inline(nge_mii_get16)
+
+uint16_t
+nge_mii_get16(nge_t *ngep, nge_regno_t regno)
+{
+
+ return (nge_mii_access(ngep, regno, 0, NGE_MDIO_READ));
+}
+
+void nge_mii_put16(nge_t *ngep, nge_regno_t regno, uint16_t data);
+#pragma inline(nge_mii_put16)
+
+void
+nge_mii_put16(nge_t *ngep, nge_regno_t regno, uint16_t data)
+{
+
+ (void) nge_mii_access(ngep, regno, data, NGE_MDIO_WRITE);
+}
+
+/*
+ * Basic low-level function to probe for a PHY
+ *
+ * Returns TRUE if the PHY responds with valid data, FALSE otherwise
+ */
+static boolean_t
+nge_phy_probe(nge_t *ngep)
+{
+ int i;
+ uint16_t phy_status;
+ uint16_t phyidh;
+ uint16_t phyidl;
+
+ NGE_TRACE(("nge_phy_probe($%p)", (void *)ngep));
+
+ /*
+ * Scan the phys to find the right address
+ * of the phy
+ *
+ * Probe maximum for 32 phy addresses
+ */
+ for (i = 0; i < NGE_PHY_NUMBER; i++) {
+ ngep->phy_xmii_addr = i;
+ /*
+ * Read the MII_STATUS register twice, in
+ * order to clear any sticky bits (but they should
+ * have been cleared by the RESET, I think).
+ */
+ phy_status = nge_mii_get16(ngep, MII_STATUS);
+ phy_status = nge_mii_get16(ngep, MII_STATUS);
+ if (phy_status != 0xffff) {
+ phyidh = nge_mii_get16(ngep, MII_PHYIDH);
+ phyidl = nge_mii_get16(ngep, MII_PHYIDL);
+ ngep->phy_id =
+ (((uint32_t)phyidh << 16) |(phyidl & MII_IDL_MASK));
+ NGE_DEBUG(("nge_phy_probe: status 0x%x, phy id 0x%x",
+ phy_status, ngep->phy_id));
+
+ return (B_TRUE);
+ }
+ }
+
+ return (B_FALSE);
+}
+
+
+/*
+ * Basic low-level function to powerup the phy and remove the isolation
+ */
+
+static boolean_t
+nge_phy_recover(nge_t *ngep)
+{
+ uint16_t control;
+ uint16_t count;
+
+ NGE_TRACE(("nge_phy_recover($%p)", (void *)ngep));
+ control = nge_mii_get16(ngep, MII_CONTROL);
+ control &= ~(MII_CONTROL_PWRDN | MII_CONTROL_ISOLATE);
+ nge_mii_put16(ngep, MII_CONTROL, control);
+ for (count = 0; ++count < 10; ) {
+ drv_usecwait(5);
+ control = nge_mii_get16(ngep, MII_CONTROL);
+ if (BIC(control, MII_CONTROL_PWRDN))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+/*
+ * Basic low-level function to reset the PHY.
+ * Doesn't incorporate any special-case workarounds.
+ *
+ * Returns TRUE on success, FALSE if the RESET bit doesn't clear
+ */
+boolean_t
+nge_phy_reset(nge_t *ngep)
+{
+ uint16_t control;
+ uint_t count;
+
+ NGE_TRACE(("nge_phy_reset($%p)", (void *)ngep));
+
+ ASSERT(mutex_owned(ngep->genlock));
+
+ /*
+ * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear
+ */
+ control = nge_mii_get16(ngep, MII_CONTROL);
+ control |= MII_CONTROL_RESET;
+ nge_mii_put16(ngep, MII_CONTROL, control);
+ drv_usecwait(30);
+ for (count = 0; ++count < 10; ) {
+ drv_usecwait(5);
+ control = nge_mii_get16(ngep, MII_CONTROL);
+ if (BIC(control, MII_CONTROL_RESET))
+ return (B_TRUE);
+ }
+ NGE_DEBUG(("nge_phy_reset: FAILED, control now 0x%x", control));
+
+ return (B_FALSE);
+}
+
+static void
+nge_phy_restart(nge_t *ngep)
+{
+ uint16_t mii_reg;
+
+ (void) nge_phy_recover(ngep);
+ (void) nge_phy_reset(ngep);
+ if (PHY_MANUFACTURER(ngep->phy_id) == MII_ID_CICADA) {
+ if (ngep->phy_mode == RGMII_IN) {
+ mii_reg = nge_mii_get16(ngep,
+ MII_CICADA_EXT_CONTROL);
+ mii_reg &= ~(MII_CICADA_MODE_SELECT_BITS
+ | MII_CICADA_POWER_SUPPLY_BITS);
+ mii_reg |= (MII_CICADA_MODE_SELECT_RGMII
+ | MII_CICADA_POWER_SUPPLY_2_5V);
+ nge_mii_put16(ngep, MII_CICADA_EXT_CONTROL, mii_reg);
+
+ mii_reg = nge_mii_get16(ngep,
+ MII_CICADA_AUXCTRL_STATUS);
+ mii_reg |= MII_CICADA_PIN_PRORITY_SETTING;
+ nge_mii_put16(ngep, MII_CICADA_AUXCTRL_STATUS,
+ mii_reg);
+ } else {
+ mii_reg = nge_mii_get16(ngep,
+ MII_CICADA_10BASET_CONTROL);
+ mii_reg |= MII_CICADA_DISABLE_ECHO_MODE;
+ nge_mii_put16(ngep,
+ MII_CICADA_10BASET_CONTROL, mii_reg);
+
+ mii_reg = nge_mii_get16(ngep,
+ MII_CICADA_BYPASS_CONTROL);
+ mii_reg &= (~CICADA_125MHZ_CLOCK_ENABLE);
+ nge_mii_put16(ngep, MII_CICADA_BYPASS_CONTROL, mii_reg);
+ }
+ }
+}
+
+/*
+ * Synchronise the (copper) PHY's speed/duplex/autonegotiation capabilities
+ * and advertisements with the required settings as specified by the various
+ * param_* variables that can be poked via the NDD interface.
+ *
+ * We always reset the PHY and reprogram *all* the relevant registers,
+ * not just those changed. This should cause the link to go down, and then
+ * back up again once the link is stable and autonegotiation (if enabled)
+ * is complete. We should get a link state change interrupt somewhere along
+ * the way ...
+ *
+ * NOTE: <genlock> must already be held by the caller
+ */
+static void
+nge_update_copper(nge_t *ngep)
+{
+ uint16_t control;
+ uint16_t gigctrl;
+ uint16_t anar;
+ boolean_t adv_autoneg;
+ boolean_t adv_pause;
+ boolean_t adv_asym_pause;
+ boolean_t adv_1000fdx;
+ boolean_t adv_100fdx;
+ boolean_t adv_100hdx;
+ boolean_t adv_10fdx;
+ boolean_t adv_10hdx;
+
+ NGE_TRACE(("nge_update_copper($%p)", (void *)ngep));
+
+ ASSERT(mutex_owned(ngep->genlock));
+
+ NGE_DEBUG(("nge_update_copper: autoneg %d "
+ "pause %d asym_pause %d "
+ "1000fdx %d "
+ "100fdx %d 100hdx %d "
+ "10fdx %d 10hdx %d ",
+ ngep->param_adv_autoneg,
+ ngep->param_adv_pause, ngep->param_adv_asym_pause,
+ ngep->param_adv_1000fdx,
+ ngep->param_adv_100fdx, ngep->param_adv_100hdx,
+ ngep->param_adv_10fdx, ngep->param_adv_10hdx));
+
+ control = anar = gigctrl = 0;
+
+ /*
+ * PHY settings are normally based on the param_* variables,
+ * but if any loopback mode is in effect, that takes precedence.
+ *
+ * NGE supports MAC-internal loopback, PHY-internal loopback,
+ * and External loopback at a variety of speeds (with a special
+ * cable). In all cases, autoneg is turned OFF, full-duplex
+ * is turned ON, and the speed/mastership is forced.
+ */
+ switch (ngep->param_loop_mode) {
+ case NGE_LOOP_NONE:
+ default:
+ adv_pause = ngep->param_adv_pause;
+ adv_autoneg = ngep->param_adv_autoneg;
+ adv_asym_pause = ngep->param_adv_asym_pause;
+ if (ngep->phy_mode == MII_IN) {
+ adv_1000fdx = ngep->param_adv_1000fdx = B_FALSE;
+ }
+ adv_1000fdx = ngep->param_adv_1000fdx;
+ adv_100fdx = ngep->param_adv_100fdx;
+ adv_100hdx = ngep->param_adv_100hdx;
+ adv_10fdx = ngep->param_adv_10fdx;
+ adv_10hdx = ngep->param_adv_10hdx;
+
+ break;
+
+ case NGE_LOOP_EXTERNAL_100:
+ case NGE_LOOP_EXTERNAL_10:
+ case NGE_LOOP_INTERNAL_PHY:
+ adv_autoneg = adv_pause = adv_asym_pause = B_FALSE;
+ adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE;
+ adv_100hdx = adv_10hdx = B_FALSE;
+ ngep->param_link_duplex = LINK_DUPLEX_FULL;
+
+ switch (ngep->param_loop_mode) {
+ case NGE_LOOP_EXTERNAL_100:
+ ngep->param_link_speed = 100;
+ adv_100fdx = B_TRUE;
+ break;
+
+ case NGE_LOOP_EXTERNAL_10:
+ ngep->param_link_speed = 10;
+ adv_10fdx = B_TRUE;
+ break;
+
+ case NGE_LOOP_INTERNAL_PHY:
+ ngep->param_link_speed = 1000;
+ adv_1000fdx = B_TRUE;
+ break;
+
+ }
+ }
+ NGE_DEBUG(("nge_update_copper: autoneg %d "
+ "pause %d asym_pause %d "
+ "1000fdx %d "
+ "100fdx %d 100hdx %d "
+ "10fdx %d 10hdx %d ",
+ adv_autoneg,
+ adv_pause, adv_asym_pause,
+ adv_1000fdx,
+ adv_100fdx, adv_100hdx,
+ adv_10fdx, adv_10hdx));
+
+ /*
+ * We should have at least one technology capability set;
+ * if not, we select a default of 10Mb/s half-duplex
+ */
+ if (!adv_1000fdx && !adv_100fdx && !adv_10fdx &&
+ !adv_100hdx && !adv_10hdx)
+ adv_10hdx = B_TRUE;
+
+ /*
+ * Now transform the adv_* variables into the proper settings
+ * of the PHY registers ...
+ *
+ * If autonegotiation is (now) enabled, we want to trigger
+ * a new autonegotiation cycle once the PHY has been
+ * programmed with the capabilities to be advertised.
+ */
+ if (adv_autoneg)
+ control |= MII_CONTROL_ANE|MII_CONTROL_RSAN;
+
+ if (adv_1000fdx)
+ control |= MII_CONTROL_1000MB|MII_CONTROL_FDUPLEX;
+ else if (adv_100fdx)
+ control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX;
+ else if (adv_100hdx)
+ control |= MII_CONTROL_100MB;
+ else if (adv_10fdx)
+ control |= MII_CONTROL_FDUPLEX;
+ else if (adv_10hdx)
+ control |= 0;
+ else
+ { _NOTE(EMPTY); } /* Can't get here anyway ... */
+
+ if (adv_1000fdx)
+ gigctrl |= MII_1000BT_CTL_ADV_FDX;
+ if (adv_100fdx)
+ anar |= MII_ABILITY_100BASE_TX_FD;
+ if (adv_100hdx)
+ anar |= MII_ABILITY_100BASE_TX;
+ if (adv_10fdx)
+ anar |= MII_ABILITY_10BASE_T_FD;
+ if (adv_10hdx)
+ anar |= MII_ABILITY_10BASE_T;
+
+ if (adv_pause)
+ anar |= MII_ABILITY_PAUSE;
+ if (adv_asym_pause)
+ anar |= MII_ABILITY_ASYM_PAUSE;
+
+ /*
+ * Munge in any other fixed bits we require ...
+ */
+ anar |= MII_AN_SELECTOR_8023;
+
+ /*
+ * Restart the PHY and write the new values.
+ */
+ nge_mii_put16(ngep, MII_AN_ADVERT, anar);
+ nge_mii_put16(ngep, MII_CONTROL, control);
+ nge_mii_put16(ngep, MII_1000BASE_T_CONTROL, gigctrl);
+ nge_phy_restart(ngep);
+
+ /*
+ * Loopback bit in control register is not reset sticky
+ * write it after PHY restart.
+ */
+ if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY) {
+ control = nge_mii_get16(ngep, MII_CONTROL);
+ control |= MII_CONTROL_LOOPBACK;
+ nge_mii_put16(ngep, MII_CONTROL, control);
+ }
+}
+
+static boolean_t
+nge_check_copper(nge_t *ngep)
+{
+ uint16_t mii_status;
+ uint16_t mii_exstatus;
+ uint16_t mii_excontrol;
+ uint16_t anar;
+ uint16_t lpan;
+ uint_t speed;
+ uint_t duplex;
+ boolean_t linkup;
+ nge_mii_cs mii_cs;
+ nge_mintr_src mintr_src;
+
+ speed = UNKOWN_SPEED;
+ duplex = UNKOWN_DUPLEX;
+ /*
+ * Read the status from the PHY (which is self-clearing
+ * on read!); also read & clear the main (Ethernet) MAC status
+ * (the relevant bits of this are write-one-to-clear).
+ */
+ mii_status = nge_mii_get16(ngep, MII_STATUS);
+ mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS);
+ mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC);
+ nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val);
+
+ NGE_DEBUG(("nge_check_copper: link %d/%s, MII status 0x%x "
+ "(was 0x%x)", ngep->link_state,
+ UPORDOWN(ngep->param_link_up), mii_status,
+ ngep->phy_gen_status));
+
+ do {
+ /*
+ * If the PHY status changed, record the time
+ */
+ switch (ngep->phy_mode) {
+ default:
+ case RGMII_IN:
+
+ /*
+ * Judge the giga speed by reading control
+ * and status register
+ */
+ mii_excontrol = nge_mii_get16(ngep,
+ MII_1000BASE_T_CONTROL);
+ mii_exstatus = nge_mii_get16(ngep,
+ MII_1000BASE_T_STATUS);
+ if ((mii_excontrol & MII_1000BT_CTL_ADV_FDX) &&
+ (mii_exstatus & MII_1000BT_STAT_LP_FDX_CAP)) {
+ speed = NGE_1000M;
+ duplex = NGE_FD;
+ } else {
+ anar = nge_mii_get16(ngep, MII_AN_ADVERT);
+ lpan = nge_mii_get16(ngep, MII_AN_LPABLE);
+ if (lpan != 0)
+ anar = (anar & lpan);
+ if (anar & MII_100BASET_FD) {
+ speed = NGE_100M;
+ duplex = NGE_FD;
+ } else if (anar & MII_100BASET_HD) {
+ speed = NGE_100M;
+ duplex = NGE_HD;
+ } else if (anar & MII_10BASET_FD) {
+ speed = NGE_10M;
+ duplex = NGE_FD;
+ } else if (anar & MII_10BASET_HD) {
+ speed = NGE_10M;
+ duplex = NGE_HD;
+ }
+ }
+ break;
+ case MII_IN:
+ anar = nge_mii_get16(ngep, MII_AN_ADVERT);
+ lpan = nge_mii_get16(ngep, MII_AN_LPABLE);
+ if (lpan != 0)
+ anar = (anar & lpan);
+
+ if (anar & MII_100BASET_FD) {
+ speed = NGE_100M;
+ duplex = NGE_FD;
+ } else if (anar & MII_100BASET_HD) {
+ speed = NGE_100M;
+ duplex = NGE_HD;
+ } else if (anar & MII_10BASET_FD) {
+ speed = NGE_10M;
+ duplex = NGE_FD;
+ } else if (anar & MII_10BASET_HD) {
+ speed = NGE_10M;
+ duplex = NGE_HD;
+ }
+ break;
+ }
+
+
+ /*
+ * We will only consider the link UP if all the readings
+ * are consistent and give meaningful results ...
+ */
+ linkup = nge_copper_link_speed[speed] > 0;
+ linkup &= nge_copper_link_duplex[duplex] != LINK_DUPLEX_UNKNOWN;
+ linkup &= BIS(mii_status, MII_STATUS_LINKUP);
+ linkup &= BIS(mii_cs.cs_val, MII_STATUS_LINKUP);
+
+ /*
+ * Record current register values, then reread status
+ * register & loop until it stabilises ...
+ */
+ ngep->phy_gen_status = mii_status;
+ mii_status = nge_mii_get16(ngep, MII_STATUS);
+ } while (mii_status != ngep->phy_gen_status);
+
+ /* Get the Link Partner Ability */
+ mii_exstatus = nge_mii_get16(ngep, MII_1000BASE_T_STATUS);
+ lpan = nge_mii_get16(ngep, MII_AN_LPABLE);
+ if (mii_exstatus & MII_1000BT_STAT_LP_FDX_CAP) {
+ ngep->param_lp_autoneg = B_TRUE;
+ ngep->param_link_autoneg = B_TRUE;
+ ngep->param_lp_1000fdx = B_TRUE;
+ }
+ if (mii_exstatus & MII_1000BT_STAT_LP_HDX_CAP) {
+ ngep->param_lp_autoneg = B_TRUE;
+ ngep->param_link_autoneg = B_TRUE;
+ ngep->param_lp_1000hdx = B_TRUE;
+ }
+ if (lpan & MII_100BASET_FD)
+ ngep->param_lp_100fdx = B_TRUE;
+ if (lpan & MII_100BASET_HD)
+ ngep->param_lp_100hdx = B_TRUE;
+ if (lpan & MII_10BASET_FD)
+ ngep->param_lp_10fdx = B_TRUE;
+ if (lpan & MII_10BASET_HD)
+ ngep->param_lp_10hdx = B_TRUE;
+ if (lpan & MII_LP_ASYM_PAUSE)
+ ngep->param_lp_asym_pause = B_TRUE;
+ if (lpan & MII_LP_PAUSE)
+ ngep->param_lp_pause = B_TRUE;
+ ngep->param_link_tx_pause = B_FALSE;
+
+ if (ngep->param_adv_autoneg)
+ ngep->param_link_rx_pause = B_FALSE;
+ else
+ ngep->param_link_rx_pause = ngep->param_adv_pause;
+ if (linkup) {
+ ngep->param_link_up = linkup;
+ ngep->param_link_speed = nge_copper_link_speed[speed];
+ ngep->param_link_duplex = nge_copper_link_duplex[duplex];
+ } else {
+ ngep->param_link_up = B_FALSE;
+ ngep->param_link_speed = 0;
+ ngep->param_link_duplex = LINK_DUPLEX_UNKNOWN;
+ }
+ NGE_DEBUG(("nge_check_copper: link now %s speed %d duplex %d",
+ UPORDOWN(ngep->param_link_up),
+ ngep->param_link_speed,
+ ngep->param_link_duplex));
+
+ return (B_FALSE);
+}
+
+/*
+ * Because the network chipset embedded in Ck8-04 bridge is only a mac chipset,
+ * the different vendor can use different media(serdes and copper).
+ * To make it easier to extend the driver to support more platforms with ck8-04,
+ * For example, one platform with serdes support,
+ * wrapper phy operation functions.
+ * But now, only supply copper phy operations.
+ */
+static const phys_ops_t copper_ops = {
+ nge_phy_restart,
+ nge_update_copper,
+ nge_check_copper
+};
+
+/*
+ * Here we have to determine which media we're using (copper or serdes).
+ * Once that's done, we can initialise the physical layer appropriately.
+ */
+void
+nge_phys_init(nge_t *ngep)
+{
+ nge_mac2phy m2p;
+ NGE_TRACE(("nge_phys_init($%p)", (void *)ngep));
+
+ /* Get the phy type from MAC2PHY register */
+ m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY);
+ ngep->phy_mode = m2p.m2p_bits.in_type;
+ if ((ngep->phy_mode != RGMII_IN) && (ngep->phy_mode != MII_IN)) {
+ ngep->phy_mode = RGMII_IN;
+ m2p.m2p_bits.in_type = RGMII_IN;
+ nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val);
+ }
+
+ /*
+ * Probe for the type of the PHY.
+ */
+ ngep->phy_xmii_addr = 1;
+ (void) nge_phy_probe(ngep);
+ ngep->chipinfo.flags |= CHIP_FLAG_COPPER;
+ ngep->physops = &copper_ops;
+ (*(ngep->physops->phys_restart))(ngep);
+}
--- a/usr/src/uts/intel/Makefile Sat Dec 01 08:56:06 2007 -0800
+++ b/usr/src/uts/intel/Makefile Sun Dec 02 07:26:48 2007 -0800
@@ -44,7 +44,6 @@
$(CLOSED_BUILD)LINT_CLOSED_XMOD3 = $(LINT_CLOSED_XMOD4:lsimega=)
$(CLOSED_BUILD)LINT_CLOSED_XMOD2 = $(LINT_CLOSED_XMOD3:spwr=)
$(CLOSED_BUILD)LINT_CLOSED_XMOD1 = $(LINT_CLOSED_XMOD2:adpu320=)
-$(CLOSED_BUILD)LINT_XMODLIBS = $(LINT_CLOSED_XMOD1:nge=)
$(CLOSED_BUILD)LINT_LIBS += $(LINT_XMODLIBS:%=$(LINT_LIB_DIR)/llib-l%.ln)
#
--- a/usr/src/uts/intel/Makefile.intel.shared Sat Dec 01 08:56:06 2007 -0800
+++ b/usr/src/uts/intel/Makefile.intel.shared Sun Dec 02 07:26:48 2007 -0800
@@ -356,6 +356,7 @@
DRV_KMODS += dmfe
DRV_KMODS += e1000g
DRV_KMODS += mxfe
+DRV_KMODS += nge
DRV_KMODS += rge
DRV_KMODS += amd8111s
$(CLOSED_BUILD)CLOSED_DRV_KMODS += ixgb
@@ -623,7 +624,6 @@
bnx \
daplt \
lsimega \
- nge \
sdpib \
spwr \
tavor
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/uts/intel/nge/Makefile Sun Dec 02 07:26:48 2007 -0800
@@ -0,0 +1,87 @@
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# This file may contain confidential information of Nvidia
+# and should not be distributed in source form without approval
+# from Sun Legal.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the nvidia toe/ge
+# driver.
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = nge
+OBJECTS = $(NGE_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(NGE_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/common/io/nge
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/intel/Makefile.intel
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# Override defaults
+#
+INC_PATH += -I$(CONF_SRCDIR)
+
+# CFLAGS += $(CINLINEFLAGS)
+
+#
+# Driver depends on GLD & IP
+#
+LDFLAGS += -dy -N misc/mac -N drv/ip
+
+#
+# For now, disable these lint checks; maintainers should endeavor
+# to investigate and remove these for maximum lint coverage.
+# Please do not carry these forward to new Makefiles.
+#
+LINTTAGS += -erroff=E_BAD_PTR_CAST_ALIGN
+LINTTAGS += -erroff=E_PTRDIFF_OVERFLOW
+LINTTAGS += -erroff=E_ASSIGN_NARROW_CONV
+LINTTAGS += -erroff=E_STATIC_UNUSED
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/intel/Makefile.targ
--- a/usr/src/xmod/xmod_files Sat Dec 01 08:56:06 2007 -0800
+++ b/usr/src/xmod/xmod_files Sun Dec 02 07:26:48 2007 -0800
@@ -19,8 +19,6 @@
../closed/uts/sparc/wsdrv
../closed/uts/sun/io/wsdrv.c
../closed/uts/sun/io/wsdrv.conf
-../closed/uts/common/io/nge
-../closed/uts/intel/nge
../closed/uts/common/io/bnx
../closed/uts/intel/bnx
../closed/uts/intel/io/adpu320