aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c1813
1 files changed, 1475 insertions, 338 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 705e1229d89d..037d870712ff 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -107,6 +107,8 @@
* 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
+ * 0.55: 22 Mar 2006: Add flow control (pause frame).
+ * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -118,7 +120,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.54"
+#define FORCEDETH_VERSION "0.56"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -163,6 +165,9 @@
#define DEV_HAS_MSI 0x0040 /* device supports MSI */
#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
+#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
+#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
+#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
enum {
NvRegIrqStatus = 0x000,
@@ -203,6 +208,7 @@ enum {
NvRegMSIIrqMask = 0x030,
#define NVREG_MSI_VECTOR_0_ENABLED 0x01
NvRegMisc1 = 0x080,
+#define NVREG_MISC1_PAUSE_TX 0x01
#define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c
@@ -214,9 +220,11 @@ enum {
#define NVREG_XMITSTAT_BUSY 0x01
NvRegPacketFilterFlags = 0x8c,
-#define NVREG_PFF_ALWAYS 0x7F0008
+#define NVREG_PFF_PAUSE_RX 0x08
+#define NVREG_PFF_ALWAYS 0x7F0000
#define NVREG_PFF_PROMISC 0x80
#define NVREG_PFF_MYADDR 0x20
+#define NVREG_PFF_LOOPBACK 0x10
NvRegOffloadConfig = 0x90,
#define NVREG_OFFLOAD_HOMEPHY 0x601
@@ -277,6 +285,9 @@ enum {
#define NVREG_TXRXCTL_VLANINS 0x00080
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
+ NvRegTxPauseFrame = 0x170,
+#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -326,6 +337,33 @@ enum {
#define NVREG_POWERSTATE_D1 0x0001
#define NVREG_POWERSTATE_D2 0x0002
#define NVREG_POWERSTATE_D3 0x0003
+ NvRegTxCnt = 0x280,
+ NvRegTxZeroReXmt = 0x284,
+ NvRegTxOneReXmt = 0x288,
+ NvRegTxManyReXmt = 0x28c,
+ NvRegTxLateCol = 0x290,
+ NvRegTxUnderflow = 0x294,
+ NvRegTxLossCarrier = 0x298,
+ NvRegTxExcessDef = 0x29c,
+ NvRegTxRetryErr = 0x2a0,
+ NvRegRxFrameErr = 0x2a4,
+ NvRegRxExtraByte = 0x2a8,
+ NvRegRxLateCol = 0x2ac,
+ NvRegRxRunt = 0x2b0,
+ NvRegRxFrameTooLong = 0x2b4,
+ NvRegRxOverflow = 0x2b8,
+ NvRegRxFCSErr = 0x2bc,
+ NvRegRxFrameAlignErr = 0x2c0,
+ NvRegRxLenErr = 0x2c4,
+ NvRegRxUnicast = 0x2c8,
+ NvRegRxMulticast = 0x2cc,
+ NvRegRxBroadcast = 0x2d0,
+ NvRegTxDef = 0x2d4,
+ NvRegTxFrame = 0x2d8,
+ NvRegRxCnt = 0x2dc,
+ NvRegTxPause = 0x2e0,
+ NvRegRxPause = 0x2e4,
+ NvRegRxDropFrame = 0x2e8,
NvRegVlanControl = 0x300,
#define NVREG_VLANCONTROL_ENABLE 0x2000
NvRegMSIXMap0 = 0x3e0,
@@ -449,16 +487,18 @@ typedef union _ring_type {
/* General driver defaults */
#define NV_WATCHDOG_TIMEO (5*HZ)
-#define RX_RING 128
-#define TX_RING 256
-/*
- * If your nic mysteriously hangs then try to reduce the limits
- * to 1/0: It might be required to set NV_TX_LASTPACKET in the
- * last valid ring entry. But this would be impossible to
- * implement - probably a disassembly error.
+#define RX_RING_DEFAULT 128
+#define TX_RING_DEFAULT 256
+#define RX_RING_MIN 128
+#define TX_RING_MIN 64
+#define RING_MAX_DESC_VER_1 1024
+#define RING_MAX_DESC_VER_2_3 16384
+/*
+ * Difference between the get and put pointers for the tx ring.
+ * This is used to throttle the amount of data outstanding in the
+ * tx ring.
*/
-#define TX_LIMIT_STOP 255
-#define TX_LIMIT_START 254
+#define TX_LIMIT_DIFFERENCE 1
/* rx/tx mac addr + type + vlan + align + slack*/
#define NV_RX_HEADERS (64)
@@ -472,8 +512,9 @@ typedef union _ring_type {
#define OOM_REFILL (1+HZ/20)
#define POLL_WAIT (1+HZ/100)
#define LINK_TIMEOUT (3*HZ)
+#define STATS_INTERVAL (10*HZ)
-/*
+/*
* desc_ver values:
* The nic supports three different descriptor types:
* - DESC_VER_1: Original
@@ -506,13 +547,13 @@ typedef union _ring_type {
#define PHY_1000 0x2
#define PHY_HALF 0x100
-/* FIXME: MII defines that should be added to <linux/mii.h> */
-#define MII_1000BT_CR 0x09
-#define MII_1000BT_SR 0x0a
-#define ADVERTISE_1000FULL 0x0200
-#define ADVERTISE_1000HALF 0x0100
-#define LPA_1000FULL 0x0800
-#define LPA_1000HALF 0x0400
+#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
+#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
+#define NV_PAUSEFRAME_RX_ENABLE 0x0004
+#define NV_PAUSEFRAME_TX_ENABLE 0x0008
+#define NV_PAUSEFRAME_RX_REQ 0x0010
+#define NV_PAUSEFRAME_TX_REQ 0x0020
+#define NV_PAUSEFRAME_AUTONEG 0x0040
/* MSI/MSI-X defines */
#define NV_MSI_X_MAX_VECTORS 8
@@ -527,15 +568,110 @@ typedef union _ring_type {
#define NV_MSI_X_VECTOR_TX 0x1
#define NV_MSI_X_VECTOR_OTHER 0x2
+/* statistics */
+struct nv_ethtool_str {
+ char name[ETH_GSTRING_LEN];
+};
+
+static const struct nv_ethtool_str nv_estats_str[] = {
+ { "tx_bytes" },
+ { "tx_zero_rexmt" },
+ { "tx_one_rexmt" },
+ { "tx_many_rexmt" },
+ { "tx_late_collision" },
+ { "tx_fifo_errors" },
+ { "tx_carrier_errors" },
+ { "tx_excess_deferral" },
+ { "tx_retry_error" },
+ { "tx_deferral" },
+ { "tx_packets" },
+ { "tx_pause" },
+ { "rx_frame_error" },
+ { "rx_extra_byte" },
+ { "rx_late_collision" },
+ { "rx_runt" },
+ { "rx_frame_too_long" },
+ { "rx_over_errors" },
+ { "rx_crc_errors" },
+ { "rx_frame_align_error" },
+ { "rx_length_error" },
+ { "rx_unicast" },
+ { "rx_multicast" },
+ { "rx_broadcast" },
+ { "rx_bytes" },
+ { "rx_pause" },
+ { "rx_drop_frame" },
+ { "rx_packets" },
+ { "rx_errors_total" }
+};
+
+struct nv_ethtool_stats {
+ u64 tx_bytes;
+ u64 tx_zero_rexmt;
+ u64 tx_one_rexmt;
+ u64 tx_many_rexmt;
+ u64 tx_late_collision;
+ u64 tx_fifo_errors;
+ u64 tx_carrier_errors;
+ u64 tx_excess_deferral;
+ u64 tx_retry_error;
+ u64 tx_deferral;
+ u64 tx_packets;
+ u64 tx_pause;
+ u64 rx_frame_error;
+ u64 rx_extra_byte;
+ u64 rx_late_collision;
+ u64 rx_runt;
+ u64 rx_frame_too_long;
+ u64 rx_over_errors;
+ u64 rx_crc_errors;
+ u64 rx_frame_align_error;
+ u64 rx_length_error;
+ u64 rx_unicast;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_bytes;
+ u64 rx_pause;
+ u64 rx_drop_frame;
+ u64 rx_packets;
+ u64 rx_errors_total;
+};
+
+/* diagnostics */
+#define NV_TEST_COUNT_BASE 3
+#define NV_TEST_COUNT_EXTENDED 4
+
+static const struct nv_ethtool_str nv_etests_str[] = {
+ { "link (online/offline)" },
+ { "register (offline) " },
+ { "interrupt (offline) " },
+ { "loopback (offline) " }
+};
+
+struct register_test {
+ u32 reg;
+ u32 mask;
+};
+
+static const struct register_test nv_registers_test[] = {
+ { NvRegUnknownSetupReg6, 0x01 },
+ { NvRegMisc1, 0x03c },
+ { NvRegOffloadConfig, 0x03ff },
+ { NvRegMulticastAddrA, 0xffffffff },
+ { NvRegUnknownSetupReg3, 0x0ff },
+ { NvRegWakeUpFlags, 0x07777 },
+ { 0,0 }
+};
+
/*
* SMP locking:
* All hardware access under dev->priv->lock, except the performance
* critical parts:
* - rx is (pseudo-) lockless: it relies on the single-threading provided
* by the arch code for interrupts.
- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
+ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
* needs dev->priv->lock :-(
- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
+ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
*/
/* in dev: base, irq */
@@ -545,6 +681,7 @@ struct fe_priv {
/* General data:
* Locking: spin_lock(&np->lock); */
struct net_device_stats stats;
+ struct nv_ethtool_stats estats;
int in_shutdown;
u32 linkspeed;
int duplex;
@@ -554,6 +691,7 @@ struct fe_priv {
int wolenabled;
unsigned int phy_oui;
u16 gigabit;
+ int intr_test;
/* General data: RO fields */
dma_addr_t ring_addr;
@@ -573,13 +711,15 @@ struct fe_priv {
*/
ring_type rx_ring;
unsigned int cur_rx, refill_rx;
- struct sk_buff *rx_skbuff[RX_RING];
- dma_addr_t rx_dma[RX_RING];
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *rx_dma;
unsigned int rx_buf_sz;
unsigned int pkt_limit;
struct timer_list oom_kick;
struct timer_list nic_poll;
+ struct timer_list stats_poll;
u32 nic_poll_irq;
+ int rx_ring_size;
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -591,10 +731,13 @@ struct fe_priv {
*/
ring_type tx_ring;
unsigned int next_tx, nic_tx;
- struct sk_buff *tx_skbuff[TX_RING];
- dma_addr_t tx_dma[TX_RING];
- unsigned int tx_dma_len[TX_RING];
+ struct sk_buff **tx_skbuff;
+ dma_addr_t *tx_dma;
+ unsigned int *tx_dma_len;
u32 tx_flags;
+ int tx_ring_size;
+ int tx_limit_start;
+ int tx_limit_stop;
/* vlan fields */
struct vlan_group *vlangrp;
@@ -602,6 +745,9 @@ struct fe_priv {
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
+
+ /* flow control */
+ u32 pause_flags;
};
/*
@@ -612,12 +758,14 @@ static int max_interrupt_work = 5;
/*
* Optimization can be either throuput mode or cpu mode
- *
+ *
* Throughput Mode: Every tx and rx packet will generate an interrupt.
* CPU Mode: Interrupts are controlled by a timer.
*/
-#define NV_OPTIMIZATION_MODE_THROUGHPUT 0
-#define NV_OPTIMIZATION_MODE_CPU 1
+enum {
+ NV_OPTIMIZATION_MODE_THROUGHPUT,
+ NV_OPTIMIZATION_MODE_CPU
+};
static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
/*
@@ -630,14 +778,31 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
static int poll_interval = -1;
/*
- * Disable MSI interrupts
+ * MSI interrupts
+ */
+enum {
+ NV_MSI_INT_DISABLED,
+ NV_MSI_INT_ENABLED
+};
+static int msi = NV_MSI_INT_ENABLED;
+
+/*
+ * MSIX interrupts
*/
-static int disable_msi = 0;
+enum {
+ NV_MSIX_INT_DISABLED,
+ NV_MSIX_INT_ENABLED
+};
+static int msix = NV_MSIX_INT_ENABLED;
/*
- * Disable MSIX interrupts
+ * DMA 64bit
*/
-static int disable_msix = 0;
+enum {
+ NV_DMA_64BIT_DISABLED,
+ NV_DMA_64BIT_ENABLED
+};
+static int dma_64bit = NV_DMA_64BIT_ENABLED;
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
@@ -697,7 +862,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
}
if (rxtx_flags & NV_SETUP_TX_RING) {
- writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
}
} else {
if (rxtx_flags & NV_SETUP_RX_RING) {
@@ -705,12 +870,37 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
}
if (rxtx_flags & NV_SETUP_TX_RING) {
- writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
- writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
+ writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
+ writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
}
}
}
+static void free_rings(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ if(np->rx_ring.orig)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.orig, np->ring_addr);
+ } else {
+ if (np->rx_ring.ex)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.ex, np->ring_addr);
+ }
+ if (np->rx_skbuff)
+ kfree(np->rx_skbuff);
+ if (np->rx_dma)
+ kfree(np->rx_dma);
+ if (np->tx_skbuff)
+ kfree(np->tx_skbuff);
+ if (np->tx_dma)
+ kfree(np->tx_dma);
+ if (np->tx_dma_len)
+ kfree(np->tx_dma_len);
+}
+
static int using_multi_irqs(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
@@ -860,7 +1050,7 @@ static int phy_init(struct net_device *dev)
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
+ reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
@@ -873,14 +1063,14 @@ static int phy_init(struct net_device *dev)
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
- mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
mii_control_1000 &= ~ADVERTISE_1000HALF;
if (phyinterface & PHY_RGMII)
mii_control_1000 |= ADVERTISE_1000FULL;
else
mii_control_1000 &= ~ADVERTISE_1000FULL;
- if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
+ if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
@@ -918,6 +1108,8 @@ static int phy_init(struct net_device *dev)
return PHY_ERROR;
}
}
+ /* some phys clear out pause advertisment on reset, set it back */
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
/* restart auto negotiation */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1047,7 +1239,7 @@ static int nv_alloc_rx(struct net_device *dev)
while (np->cur_rx != refill_rx) {
struct sk_buff *skb;
- nr = refill_rx % RX_RING;
+ nr = refill_rx % np->rx_ring_size;
if (np->rx_skbuff[nr] == NULL) {
skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
@@ -1076,7 +1268,7 @@ static int nv_alloc_rx(struct net_device *dev)
refill_rx++;
}
np->refill_rx = refill_rx;
- if (np->cur_rx - refill_rx == RX_RING)
+ if (np->cur_rx - refill_rx == np->rx_ring_size)
return 1;
return 0;
}
@@ -1110,14 +1302,14 @@ static void nv_do_rx_refill(unsigned long data)
}
}
-static void nv_init_rx(struct net_device *dev)
+static void nv_init_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
- np->cur_rx = RX_RING;
+ np->cur_rx = np->rx_ring_size;
np->refill_rx = 0;
- for (i = 0; i < RX_RING; i++)
+ for (i = 0; i < np->rx_ring_size; i++)
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->rx_ring.orig[i].FlagLen = 0;
else
@@ -1130,7 +1322,7 @@ static void nv_init_tx(struct net_device *dev)
int i;
np->next_tx = np->nic_tx = 0;
- for (i = 0; i < TX_RING; i++) {
+ for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->tx_ring.orig[i].FlagLen = 0;
else
@@ -1174,8 +1366,8 @@ static void nv_drain_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
unsigned int i;
-
- for (i = 0; i < TX_RING; i++) {
+
+ for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->tx_ring.orig[i].FlagLen = 0;
else
@@ -1189,7 +1381,7 @@ static void nv_drain_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
- for (i = 0; i < RX_RING; i++) {
+ for (i = 0; i < np->rx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->rx_ring.orig[i].FlagLen = 0;
else
@@ -1213,7 +1405,7 @@ static void drain_ring(struct net_device *dev)
/*
* nv_start_xmit: dev->hard_start_xmit function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
*/
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -1221,8 +1413,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 tx_flags = 0;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
unsigned int fragments = skb_shinfo(skb)->nr_frags;
- unsigned int nr = (np->next_tx - 1) % TX_RING;
- unsigned int start_nr = np->next_tx % TX_RING;
+ unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
+ unsigned int start_nr = np->next_tx % np->tx_ring_size;
unsigned int i;
u32 offset = 0;
u32 bcnt;
@@ -1238,7 +1430,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&np->lock);
- if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
+ if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
spin_unlock_irq(&np->lock);
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
@@ -1247,7 +1439,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the header buffer */
do {
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- nr = (nr + 1) % TX_RING;
+ nr = (nr + 1) % np->tx_ring_size;
np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
@@ -1274,7 +1466,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
do {
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- nr = (nr + 1) % TX_RING;
+ nr = (nr + 1) % np->tx_ring_size;
np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
PCI_DMA_TODEVICE);
@@ -1303,8 +1495,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[nr] = skb;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->tso_size)
- tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
+ if (skb_shinfo(skb)->gso_size)
+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif
tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
@@ -1320,7 +1512,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
- }
+ }
dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
dev->name, np->next_tx, entries, tx_flags_extra);
@@ -1356,7 +1548,7 @@ static void nv_tx_done(struct net_device *dev)
struct sk_buff *skb;
while (np->nic_tx != np->next_tx) {
- i = np->nic_tx % TX_RING;
+ i = np->nic_tx % np->tx_ring_size;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
@@ -1395,19 +1587,19 @@ static void nv_tx_done(struct net_device *dev)
} else {
np->stats.tx_packets++;
np->stats.tx_bytes += skb->len;
- }
+ }
}
}
nv_release_txskb(dev, i);
np->nic_tx++;
}
- if (np->next_tx - np->nic_tx < TX_LIMIT_START)
+ if (np->next_tx - np->nic_tx < np->tx_limit_start)
netif_wake_queue(dev);
}
/*
* nv_tx_timeout: dev->tx_timeout function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
*/
static void nv_tx_timeout(struct net_device *dev)
{
@@ -1438,10 +1630,10 @@ static void nv_tx_timeout(struct net_device *dev)
readl(base + i + 24), readl(base + i + 28));
}
printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
- for (i=0;i<TX_RING;i+= 4) {
+ for (i=0;i<np->tx_ring_size;i+= 4) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
+ i,
le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
le32_to_cpu(np->tx_ring.orig[i].FlagLen),
le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
@@ -1452,7 +1644,7 @@ static void nv_tx_timeout(struct net_device *dev)
le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
} else {
printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
- i,
+ i,
le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
le32_to_cpu(np->tx_ring.ex[i].FlagLen),
@@ -1550,15 +1742,14 @@ static void nv_rx_process(struct net_device *dev)
u32 Flags;
u32 vlanflags = 0;
-
for (;;) {
struct sk_buff *skb;
int len;
int i;
- if (np->cur_rx - np->refill_rx >= RX_RING)
+ if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
break; /* we scanned the whole ring - do not continue */
- i = np->cur_rx % RX_RING;
+ i = np->cur_rx % np->rx_ring_size;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
@@ -1665,14 +1856,16 @@ static void nv_rx_process(struct net_device *dev)
}
}
}
- Flags &= NV_RX2_CHECKSUMMASK;
- if (Flags == NV_RX2_CHECKSUMOK1 ||
- Flags == NV_RX2_CHECKSUMOK2 ||
- Flags == NV_RX2_CHECKSUMOK3) {
- dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
- np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
+ if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
+ Flags &= NV_RX2_CHECKSUMMASK;
+ if (Flags == NV_RX2_CHECKSUMOK1 ||
+ Flags == NV_RX2_CHECKSUMOK2 ||
+ Flags == NV_RX2_CHECKSUMOK3) {
+ dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
+ np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
+ }
}
}
/* got a valid packet - forward it to the network core */
@@ -1737,7 +1930,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* Changing the MTU is a rare event, it shouldn't matter.
*/
nv_disable_irq(dev);
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
@@ -1747,18 +1940,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_drain_rx(dev);
nv_drain_tx(dev);
/* reinit driver view of the rx queue */
- nv_init_rx(dev);
- nv_init_tx(dev);
- /* alloc new rx buffers */
set_bufsize(dev);
- if (nv_alloc_rx(dev)) {
+ if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
}
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -1768,7 +1958,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_start_rx(dev);
nv_start_tx(dev);
spin_unlock(&np->lock);
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
nv_enable_irq(dev);
}
return 0;
@@ -1803,7 +1993,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
if (netif_running(dev)) {
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
spin_lock_irq(&np->lock);
/* stop rx engine */
@@ -1815,7 +2005,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
/* restart rx engine */
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
- spin_unlock_bh(&dev->xmit_lock);
+ netif_tx_unlock_bh(dev);
} else {
nv_copy_mac_to_hw(dev);
}
@@ -1824,7 +2014,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
/*
* nv_set_multicast: dev->set_multicast function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
*/
static void nv_set_multicast(struct net_device *dev)
{
@@ -1832,16 +2022,16 @@ static void nv_set_multicast(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 addr[2];
u32 mask[2];
- u32 pff;
+ u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
memset(addr, 0, sizeof(addr));
memset(mask, 0, sizeof(mask));
if (dev->flags & IFF_PROMISC) {
printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
- pff = NVREG_PFF_PROMISC;
+ pff |= NVREG_PFF_PROMISC;
} else {
- pff = NVREG_PFF_MYADDR;
+ pff |= NVREG_PFF_MYADDR;
if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
u32 alwaysOff[2];
@@ -1886,6 +2076,35 @@ static void nv_set_multicast(struct net_device *dev)
spin_unlock_irq(&np->lock);
}
+static void nv_update_pause(struct net_device *dev, u32 pause_flags)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
+
+ if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
+ u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
+ if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
+ writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ } else {
+ writel(pff, base + NvRegPacketFilterFlags);
+ }
+ }
+ if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
+ u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
+ if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
+ writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
+ writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ } else {
+ writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
+ writel(regmisc, base + NvRegMisc1);
+ }
+ }
+}
+
/**
* nv_update_linkspeed: Setup the MAC according to the link partner
* @dev: Network device to be configured
@@ -1901,12 +2120,14 @@ static int nv_update_linkspeed(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- int adv, lpa;
+ int adv = 0;
+ int lpa = 0;
+ int adv_lpa, adv_pause, lpa_pause;
int newls = np->linkspeed;
int newdup = np->duplex;
int mii_status;
int retval = 0;
- u32 control_1000, status_1000, phyreg;
+ u32 control_1000, status_1000, phyreg, pause_flags;
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
@@ -1952,10 +2173,15 @@ static int nv_update_linkspeed(struct net_device *dev)
goto set_speed;
}
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
+ dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
+ dev->name, adv, lpa);
+
retval = 1;
if (np->gigabit == PHY_GIGABIT) {
- control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
- status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
+ control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
@@ -1967,27 +2193,22 @@ static int nv_update_linkspeed(struct net_device *dev)
}
}
- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
- dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
- dev->name, adv, lpa);
-
/* FIXME: handle parallel detection properly */
- lpa = lpa & adv;
- if (lpa & LPA_100FULL) {
+ adv_lpa = lpa & adv;
+ if (adv_lpa & LPA_100FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 1;
- } else if (lpa & LPA_100HALF) {
+ } else if (adv_lpa & LPA_100HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 0;
- } else if (lpa & LPA_10FULL) {
+ } else if (adv_lpa & LPA_10FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 1;
- } else if (lpa & LPA_10HALF) {
+ } else if (adv_lpa & LPA_10HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
} else {
- dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
+ dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
@@ -2030,6 +2251,46 @@ set_speed:
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
+ pause_flags = 0;
+ /* setup pause frame */
+ if (np->duplex != 0) {
+ if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
+ adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
+ lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+
+ switch (adv_pause) {
+ case (ADVERTISE_PAUSE_CAP):
+ if (lpa_pause & LPA_PAUSE_CAP) {
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ break;
+ case (ADVERTISE_PAUSE_ASYM):
+ if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
+ {
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ break;
+ case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
+ if (lpa_pause & LPA_PAUSE_CAP)
+ {
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ if (lpa_pause == LPA_PAUSE_ASYM)
+ {
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ }
+ break;
+ }
+ } else {
+ pause_flags = np->pause_flags;
+ }
+ }
+ nv_update_pause(dev, pause_flags);
+
return retval;
}
@@ -2090,7 +2351,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
spin_lock(&np->lock);
nv_tx_done(dev);
spin_unlock(&np->lock);
-
+
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
spin_lock(&np->lock);
@@ -2098,7 +2359,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock(&np->lock);
}
-
+
if (events & NVREG_IRQ_LINK) {
spin_lock(&np->lock);
nv_link_irq(dev);
@@ -2163,7 +2424,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
spin_lock_irq(&np->lock);
nv_tx_done(dev);
spin_unlock_irq(&np->lock);
-
+
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events);
@@ -2206,7 +2467,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
-
+
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock);
@@ -2214,7 +2475,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irq(&np->lock);
}
-
+
if (i > max_interrupt_work) {
spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
@@ -2253,7 +2514,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
-
+
if (events & NVREG_IRQ_LINK) {
spin_lock_irq(&np->lock);
nv_link_irq(dev);
@@ -2290,6 +2551,175 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
return IRQ_RETVAL(i);
}
+static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+
+ dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
+ } else {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
+ }
+ pci_push(base);
+ dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
+ if (!(events & NVREG_IRQ_TIMER))
+ return IRQ_RETVAL(0);
+
+ spin_lock(&np->lock);
+ np->intr_test = 1;
+ spin_unlock(&np->lock);
+
+ dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
+
+ return IRQ_RETVAL(1);
+}
+
+static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 msixmap = 0;
+
+ /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
+ * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
+ * the remaining 8 interrupts.
+ */
+ for (i = 0; i < 8; i++) {
+ if ((irqmask >> i) & 0x1) {
+ msixmap |= vector << (i << 2);
+ }
+ }
+ writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
+
+ msixmap = 0;
+ for (i = 0; i < 8; i++) {
+ if ((irqmask >> (i + 8)) & 0x1) {
+ msixmap |= vector << (i << 2);
+ }
+ }
+ writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
+}
+
+static int nv_request_irq(struct net_device *dev, int intr_test)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret = 1;
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_CAPABLE) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ np->msi_x_entry[i].entry = i;
+ }
+ if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+ np->msi_flags |= NV_MSI_X_ENABLED;
+ if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
+ /* Request irq for rx handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+ /* Request irq for tx handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_rx;
+ }
+ /* Request irq for link and timer handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_tx;
+ }
+ /* map interrupts to their respective vector */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
+ } else {
+ /* Request irq for all interrupts */
+ if ((!intr_test &&
+ request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
+ (intr_test &&
+ request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ }
+ }
+ }
+ if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ np->msi_flags |= NV_MSI_ENABLED;
+ if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
+ (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIMap0);
+ writel(0, base + NvRegMSIMap1);
+ /* enable msi vector 0 */
+ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ }
+ }
+ if (ret != 0) {
+ if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
+ (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
+ goto out_err;
+
+ }
+
+ return 0;
+out_free_tx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
+out_free_rx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
+out_err:
+ return 1;
+}
+
+static void nv_free_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ free_irq(np->msi_x_entry[i].vector, dev);
+ }
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ } else {
+ free_irq(np->pci_dev->irq, dev);
+ if (np->msi_flags & NV_MSI_ENABLED) {
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ }
+ }
+}
+
static void nv_do_nic_poll(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
@@ -2305,49 +2735,49 @@ static void nv_do_nic_poll(unsigned long data)
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
- disable_irq(dev->irq);
+ disable_irq_lockdep(dev->irq);
mask = np->irqmask;
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
mask |= NVREG_IRQ_RX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
mask |= NVREG_IRQ_TX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
mask |= NVREG_IRQ_OTHER;
}
}
np->nic_poll_irq = 0;
/* FIXME: Do we need synchronize_irq(dev->irq) here? */
-
+
writel(mask, base + NvRegIrqMask);
pci_push(base);
if (!using_multi_irqs(dev)) {
- nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
+ nv_nic_irq(0, dev, NULL);
if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
- enable_irq(dev->irq);
+ enable_irq_lockdep(dev->irq);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
- nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ nv_nic_irq_rx(0, dev, NULL);
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
- nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ nv_nic_irq_tx(0, dev, NULL);
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
- nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ nv_nic_irq_other(0, dev, NULL);
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
}
@@ -2359,6 +2789,56 @@ static void nv_poll_controller(struct net_device *dev)
}
#endif
+static void nv_do_stats_poll(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ np->estats.tx_bytes += readl(base + NvRegTxCnt);
+ np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+ np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+ np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+ np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
+ np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+ np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
+ np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
+ np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
+ np->estats.tx_deferral += readl(base + NvRegTxDef);
+ np->estats.tx_packets += readl(base + NvRegTxFrame);
+ np->estats.tx_pause += readl(base + NvRegTxPause);
+ np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
+ np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
+ np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
+ np->estats.rx_runt += readl(base + NvRegRxRunt);
+ np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
+ np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
+ np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+ np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
+ np->estats.rx_length_error += readl(base + NvRegRxLenErr);
+ np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+ np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+ np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
+ np->estats.rx_bytes += readl(base + NvRegRxCnt);
+ np->estats.rx_pause += readl(base + NvRegRxPause);
+ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+ np->estats.rx_packets =
+ np->estats.rx_unicast +
+ np->estats.rx_multicast +
+ np->estats.rx_broadcast;
+ np->estats.rx_errors_total =
+ np->estats.rx_crc_errors +
+ np->estats.rx_over_errors +
+ np->estats.rx_frame_error +
+ (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
+ np->estats.rx_late_collision +
+ np->estats.rx_runt +
+ np->estats.rx_frame_too_long;
+
+ if (!np->in_shutdown)
+ mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+}
+
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
@@ -2382,17 +2862,19 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 flags = 0;
- spin_lock_irq(&np->lock);
if (wolinfo->wolopts == 0) {
- writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
- }
- if (wolinfo->wolopts & WAKE_MAGIC) {
- writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
+ } else if (wolinfo->wolopts & WAKE_MAGIC) {
np->wolenabled = 1;
+ flags = NVREG_WAKEUPFLAGS_ENABLE;
+ }
+ if (netif_running(dev)) {
+ spin_lock_irq(&np->lock);
+ writel(flags, base + NvRegWakeUpFlags);
+ spin_unlock_irq(&np->lock);
}
- spin_unlock_irq(&np->lock);
return 0;
}
@@ -2406,9 +2888,17 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (!netif_running(dev)) {
/* We do not track link speed / duplex setting if the
* interface is disabled. Force a link check */
- nv_update_linkspeed(dev);
+ if (nv_update_linkspeed(dev)) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ }
}
- switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+
+ if (netif_carrier_ok(dev)) {
+ switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
case NVREG_LINKSPEED_10:
ecmd->speed = SPEED_10;
break;
@@ -2418,10 +2908,14 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
case NVREG_LINKSPEED_1000:
ecmd->speed = SPEED_1000;
break;
+ }
+ ecmd->duplex = DUPLEX_HALF;
+ if (np->duplex)
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
}
- ecmd->duplex = DUPLEX_HALF;
- if (np->duplex)
- ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = np->autoneg;
@@ -2429,23 +2923,20 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->autoneg) {
ecmd->advertising |= ADVERTISED_Autoneg;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- } else {
- adv = np->fixed_mode;
- }
- if (adv & ADVERTISE_10HALF)
- ecmd->advertising |= ADVERTISED_10baseT_Half;
- if (adv & ADVERTISE_10FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
- if (adv & ADVERTISE_100HALF)
- ecmd->advertising |= ADVERTISED_100baseT_Half;
- if (adv & ADVERTISE_100FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (np->autoneg && np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
- if (adv & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (adv & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (adv & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (adv & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (adv & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ if (adv & ADVERTISE_1000FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
}
-
ecmd->supported = (SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
@@ -2497,7 +2988,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
}
- spin_lock_irq(&np->lock);
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ netif_tx_lock_bh(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
+ netif_tx_unlock_bh(dev);
+ }
+
if (ecmd->autoneg == AUTONEG_ENABLE) {
int adv, bmcr;
@@ -2505,7 +3007,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
/* advertise only what has been requested */
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (ecmd->advertising & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (ecmd->advertising & ADVERTISED_10baseT_Full)
@@ -2514,16 +3016,22 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
adv |= ADVERTISE_100HALF;
if (ecmd->advertising & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ adv |= ADVERTISE_PAUSE_ASYM;
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
if (np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
if (ecmd->advertising & ADVERTISED_1000baseT_Full)
adv |= ADVERTISE_1000FULL;
- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
@@ -2534,7 +3042,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
np->autoneg = 0;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
adv |= ADVERTISE_10HALF;
if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
@@ -2543,30 +3051,49 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
adv |= ADVERTISE_100HALF;
if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
adv |= ADVERTISE_100FULL;
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ }
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
+ adv |= ADVERTISE_PAUSE_ASYM;
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
np->fixed_mode = adv;
if (np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
- if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+ bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
+ if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
bmcr |= BMCR_FULLDPLX;
- if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+ if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
bmcr |= BMCR_SPEED100;
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
-
- if (netif_running(dev)) {
+ if (np->phy_oui == PHY_OUI_MARVELL) {
+ /* reset the phy */
+ if (phy_reset(dev)) {
+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ return -EINVAL;
+ }
+ } else if (netif_running(dev)) {
/* Wait a bit and then reconfigure the nic. */
udelay(10);
nv_linkchange(dev);
}
}
- spin_unlock_irq(&np->lock);
+
+ if (netif_running(dev)) {
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ nv_enable_irq(dev);
+ }
return 0;
}
@@ -2598,196 +3125,742 @@ static int nv_nway_reset(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int ret;
- spin_lock_irq(&np->lock);
if (np->autoneg) {
int bmcr;
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ netif_tx_lock_bh(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
+ netif_tx_unlock_bh(dev);
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+ }
+
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (netif_running(dev)) {
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ nv_enable_irq(dev);
+ }
ret = 0;
} else {
ret = -EINVAL;
}
- spin_unlock_irq(&np->lock);
return ret;
}
-static struct ethtool_ops ops = {
- .get_drvinfo = nv_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_wol = nv_get_wol,
- .set_wol = nv_set_wol,
- .get_settings = nv_get_settings,
- .set_settings = nv_set_settings,
- .get_regs_len = nv_get_regs_len,
- .get_regs = nv_get_regs,
- .nway_reset = nv_nway_reset,
- .get_perm_addr = ethtool_op_get_perm_addr,
-};
+static int nv_set_tso(struct net_device *dev, u32 value)
+{
+ struct fe_priv *np = netdev_priv(dev);
-static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+ if ((np->driver_data & DEV_HAS_CHECKSUM))
+ return ethtool_op_set_tso(dev, value);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
- spin_lock_irq(&np->lock);
+ ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
- /* save vlan group */
- np->vlangrp = grp;
+ ring->rx_pending = np->rx_ring_size;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = np->tx_ring_size;
+}
- if (grp) {
- /* enable vlan on MAC */
- np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
+static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
+ dma_addr_t ring_addr;
+
+ if (ring->rx_pending < RX_RING_MIN ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_mini_pending != 0 ||
+ ring->rx_jumbo_pending != 0 ||
+ (np->desc_ver == DESC_VER_1 &&
+ (ring->rx_pending > RING_MAX_DESC_VER_1 ||
+ ring->tx_pending > RING_MAX_DESC_VER_1)) ||
+ (np->desc_ver != DESC_VER_1 &&
+ (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
+ ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
+ return -EINVAL;
+ }
+
+ /* allocate new rings */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ rxtx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
+ &ring_addr);
} else {
- /* disable vlan on MAC */
- np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
- np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
+ rxtx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
+ &ring_addr);
+ }
+ rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
+ rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
+ tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
+ tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
+ tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
+ if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
+ /* fall back to old rings */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ if(rxtx_ring)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
+ rxtx_ring, ring_addr);
+ } else {
+ if (rxtx_ring)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
+ rxtx_ring, ring_addr);
+ }
+ if (rx_skbuff)
+ kfree(rx_skbuff);
+ if (rx_dma)
+ kfree(rx_dma);
+ if (tx_skbuff)
+ kfree(tx_skbuff);
+ if (tx_dma)
+ kfree(tx_dma);
+ if (tx_dma_len)
+ kfree(tx_dma_len);
+ goto exit;
}
- writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ netif_tx_lock_bh(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain queues */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ /* delete queues */
+ free_rings(dev);
+ }
- spin_unlock_irq(&np->lock);
-};
+ /* set new values */
+ np->rx_ring_size = ring->rx_pending;
+ np->tx_ring_size = ring->tx_pending;
+ np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
+ np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
+ np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
+ } else {
+ np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
+ np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
+ }
+ np->rx_skbuff = (struct sk_buff**)rx_skbuff;
+ np->rx_dma = (dma_addr_t*)rx_dma;
+ np->tx_skbuff = (struct sk_buff**)tx_skbuff;
+ np->tx_dma = (dma_addr_t*)tx_dma;
+ np->tx_dma_len = (unsigned int*)tx_dma_len;
+ np->ring_addr = ring_addr;
+
+ memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
+ memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
+ memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
+ memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
+ memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
-static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+ if (netif_running(dev)) {
+ /* reinit driver view of the queues */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+
+ /* reinit nic view of the queues */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+
+ /* restart engines */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
+ netif_tx_unlock_bh(dev);
+ nv_enable_irq(dev);
+ }
+ return 0;
+exit:
+ return -ENOMEM;
+}
+
+static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
{
- /* nothing to do */
-};
+ struct fe_priv *np = netdev_priv(dev);
-static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
+ pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
+ pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
+ pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
+}
+
+static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int adv, bmcr;
+
+ if ((!np->autoneg && np->duplex == 0) ||
+ (np->autoneg && !pause->autoneg && np->duplex == 0)) {
+ printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
+ dev->name);
+ return -EINVAL;
+ }
+ if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
+ printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
+ return -EINVAL;
+ }
+
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ netif_tx_lock_bh(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
+ netif_tx_unlock_bh(dev);
+ }
+
+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
+ if (pause->rx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
+ if (pause->tx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
+
+ if (np->autoneg && pause->autoneg) {
+ np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ adv |= ADVERTISE_PAUSE_ASYM;
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+
+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ } else {
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (pause->rx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (pause->tx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+
+ if (!netif_running(dev))
+ nv_update_linkspeed(dev);
+ else
+ nv_update_pause(dev, np->pause_flags);
+ }
+
+ if (netif_running(dev)) {
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ nv_enable_irq(dev);
+ }
+ return 0;
+}
+
+static u32 nv_get_rx_csum(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
+}
+
+static int nv_set_rx_csum(struct net_device *dev, u32 data)
{
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- int i;
- u32 msixmap = 0;
+ int retcode = 0;
- /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
- * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
- * the remaining 8 interrupts.
- */
- for (i = 0; i < 8; i++) {
- if ((irqmask >> i) & 0x1) {
- msixmap |= vector << (i << 2);
+ if (np->driver_data & DEV_HAS_CHECKSUM) {
+
+ if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
+ (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
+ /* already set or unset */
+ return 0;
}
- }
- writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
- msixmap = 0;
- for (i = 0; i < 8; i++) {
- if ((irqmask >> (i + 8)) & 0x1) {
- msixmap |= vector << (i << 2);
+ if (data) {
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
+ } else {
+ printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
+ return -EINVAL;
+ }
+
+ if (netif_running(dev)) {
+ spin_lock_irq(&np->lock);
+ writel(np->txrxctl_bits, base + NvRegTxRxControl);
+ spin_unlock_irq(&np->lock);
}
+ } else {
+ return -EINVAL;
}
- writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
+
+ return retcode;
}
-static int nv_request_irq(struct net_device *dev)
+static int nv_set_tx_csum(struct net_device *dev, u32 data)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM)
+ return ethtool_op_set_tx_hw_csum(dev, data);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int nv_set_sg(struct net_device *dev, u32 data)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM)
+ return ethtool_op_set_sg(dev, data);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int nv_get_stats_count(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_STATISTICS)
+ return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
+ else
+ return 0;
+}
+
+static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ /* update stats */
+ nv_do_stats_poll((unsigned long)dev);
+
+ memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
+}
+
+static int nv_self_test_count(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_TEST_EXTENDED)
+ return NV_TEST_COUNT_EXTENDED;
+ else
+ return NV_TEST_COUNT_BASE;
+}
+
+static int nv_link_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int mii_status;
+
+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+
+ /* check phy link status */
+ if (!(mii_status & BMSR_LSTATUS))
+ return 0;
+ else
+ return 1;
+}
+
+static int nv_register_test(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i = 0;
+ u32 orig_read, new_read;
+
+ do {
+ orig_read = readl(base + nv_registers_test[i].reg);
+
+ /* xor with mask to toggle bits */
+ orig_read ^= nv_registers_test[i].mask;
+
+ writel(orig_read, base + nv_registers_test[i].reg);
+
+ new_read = readl(base + nv_registers_test[i].reg);
+
+ if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
+ return 0;
+
+ /* restore original value */
+ orig_read ^= nv_registers_test[i].mask;
+ writel(orig_read, base + nv_registers_test[i].reg);
+
+ } while (nv_registers_test[++i].reg != 0);
+
+ return 1;
+}
+
+static int nv_interrupt_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret = 1;
- int i;
+ int testcnt;
+ u32 save_msi_flags, save_poll_interval = 0;
- if (np->msi_flags & NV_MSI_X_CAPABLE) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- np->msi_x_entry[i].entry = i;
+ if (netif_running(dev)) {
+ /* free current irq */
+ nv_free_irq(dev);
+ save_poll_interval = readl(base+NvRegPollingInterval);
+ }
+
+ /* flag to test interrupt handler */
+ np->intr_test = 0;
+
+ /* setup test irq */
+ save_msi_flags = np->msi_flags;
+ np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
+ np->msi_flags |= 0x001; /* setup 1 vector */
+ if (nv_request_irq(dev, 1))
+ return 0;
+
+ /* setup timer interrupt */
+ writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+
+ nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+
+ /* wait for at least one interrupt */
+ msleep(100);
+
+ spin_lock_irq(&np->lock);
+
+ /* flag should be set within ISR */
+ testcnt = np->intr_test;
+ if (!testcnt)
+ ret = 2;
+
+ nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ else
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+
+ spin_unlock_irq(&np->lock);
+
+ nv_free_irq(dev);
+
+ np->msi_flags = save_msi_flags;
+
+ if (netif_running(dev)) {
+ writel(save_poll_interval, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+ /* restore original irq */
+ if (nv_request_irq(dev, 0))
+ return 0;
+ }
+
+ return ret;
+}
+
+static int nv_loopback_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ struct sk_buff *tx_skb, *rx_skb;
+ dma_addr_t test_dma_addr;
+ u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
+ u32 Flags;
+ int len, i, pkt_len;
+ u8 *pkt_data;
+ u32 filter_flags = 0;
+ u32 misc1_flags = 0;
+ int ret = 1;
+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ filter_flags = readl(base + NvRegPacketFilterFlags);
+ misc1_flags = readl(base + NvRegMisc1);
+ } else {
+ nv_txrx_reset(dev);
+ }
+
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ nv_init_ring(dev);
+
+ /* setup hardware for loopback */
+ writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
+ writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
+
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+
+ /* setup packet for tx */
+ pkt_len = ETH_DATA_LEN;
+ tx_skb = dev_alloc_skb(pkt_len);
+ pkt_data = skb_put(tx_skb, pkt_len);
+ for (i = 0; i < pkt_len; i++)
+ pkt_data[i] = (u8)(i & 0xff);
+ test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
+ np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ } else {
+ np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
+ np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
+ np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ }
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(get_hwbase(dev));
+
+ msleep(500);
+
+ /* check for rx of the packet */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
+ len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
+
+ } else {
+ Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
+ len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
+ }
+
+ if (Flags & NV_RX_AVAIL) {
+ ret = 0;
+ } else if (np->desc_ver == DESC_VER_1) {
+ if (Flags & NV_RX_ERROR)
+ ret = 0;
+ } else {
+ if (Flags & NV_RX2_ERROR) {
+ ret = 0;
}
- if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
- np->msi_flags |= NV_MSI_X_ENABLED;
- if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
- /* Request irq for rx handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_err;
- }
- /* Request irq for tx handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_free_rx;
- }
- /* Request irq for link and timer handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_free_tx;
- }
- /* map interrupts to their respective vector */
- writel(0, base + NvRegMSIXMap0);
- writel(0, base + NvRegMSIXMap1);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
- } else {
- /* Request irq for all interrupts */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_err;
- }
+ }
- /* map interrupts to vector 0 */
- writel(0, base + NvRegMSIXMap0);
- writel(0, base + NvRegMSIXMap1);
+ if (ret) {
+ if (len != pkt_len) {
+ ret = 0;
+ dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
+ dev->name, len, pkt_len);
+ } else {
+ rx_skb = np->rx_skbuff[0];
+ for (i = 0; i < pkt_len; i++) {
+ if (rx_skb->data[i] != (u8)(i & 0xff)) {
+ ret = 0;
+ dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
+ dev->name, i);
+ break;
+ }
}
}
+ } else {
+ dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
}
- if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
- if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
- np->msi_flags |= NV_MSI_ENABLED;
- if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- goto out_err;
+
+ pci_unmap_page(np->pci_dev, test_dma_addr,
+ tx_skb->end-tx_skb->data,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(tx_skb);
+
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+
+ if (netif_running(dev)) {
+ writel(misc1_flags, base + NvRegMisc1);
+ writel(filter_flags, base + NvRegPacketFilterFlags);
+ nv_enable_irq(dev);
+ }
+
+ return ret;
+}
+
+static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int result;
+ memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
+
+ if (!nv_link_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[0] = 1;
+ }
+
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ netif_tx_lock_bh(dev);
+ spin_lock_irq(&np->lock);
+ nv_disable_hw_interrupts(dev, np->irqmask);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ } else {
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
}
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ spin_unlock_irq(&np->lock);
+ netif_tx_unlock_bh(dev);
+ }
- /* map interrupts to vector 0 */
- writel(0, base + NvRegMSIMap0);
- writel(0, base + NvRegMSIMap1);
- /* enable msi vector 0 */
- writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ if (!nv_register_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[1] = 1;
+ }
+
+ result = nv_interrupt_test(dev);
+ if (result != 1) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[2] = 1;
+ }
+ if (result == 0) {
+ /* bail out */
+ return;
+ }
+
+ if (!nv_loopback_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[3] = 1;
+ }
+
+ if (netif_running(dev)) {
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ netif_start_queue(dev);
+ nv_enable_hw_interrupts(dev, np->irqmask);
}
}
- if (ret != 0) {
- if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
- goto out_err;
- }
+}
- return 0;
-out_free_tx:
- free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
-out_free_rx:
- free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
-out_err:
- return 1;
+static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
+ break;
+ case ETH_SS_TEST:
+ memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
+ break;
+ }
}
-static void nv_free_irq(struct net_device *dev)
+static struct ethtool_ops ops = {
+ .get_drvinfo = nv_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_wol = nv_get_wol,
+ .set_wol = nv_set_wol,
+ .get_settings = nv_get_settings,
+ .set_settings = nv_set_settings,
+ .get_regs_len = nv_get_regs_len,
+ .get_regs = nv_get_regs,
+ .nway_reset = nv_nway_reset,
+ .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = nv_set_tso,
+ .get_ringparam = nv_get_ringparam,
+ .set_ringparam = nv_set_ringparam,
+ .get_pauseparam = nv_get_pauseparam,
+ .set_pauseparam = nv_set_pauseparam,
+ .get_rx_csum = nv_get_rx_csum,
+ .set_rx_csum = nv_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = nv_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = nv_set_sg,
+ .get_strings = nv_get_strings,
+ .get_stats_count = nv_get_stats_count,
+ .get_ethtool_stats = nv_get_ethtool_stats,
+ .self_test_count = nv_self_test_count,
+ .self_test = nv_self_test,
+};
+
+static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct fe_priv *np = get_nvpriv(dev);
- int i;
- if (np->msi_flags & NV_MSI_X_ENABLED) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- free_irq(np->msi_x_entry[i].vector, dev);
- }
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
+ spin_lock_irq(&np->lock);
+
+ /* save vlan group */
+ np->vlangrp = grp;
+
+ if (grp) {
+ /* enable vlan on MAC */
+ np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
} else {
- free_irq(np->pci_dev->irq, dev);
- if (np->msi_flags & NV_MSI_ENABLED) {
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- }
+ /* disable vlan on MAC */
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
}
-}
+
+ writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+
+ spin_unlock_irq(&np->lock);
+};
+
+static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ /* nothing to do */
+};
static int nv_open(struct net_device *dev)
{
@@ -2813,6 +3886,9 @@ static int nv_open(struct net_device *dev)
writel(0, base + NvRegAdapterControl);
+ if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
+ writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
+
/* 2) initialize descriptor rings */
set_bufsize(dev);
oom = nv_init_ring(dev);
@@ -2829,7 +3905,7 @@ static int nv_open(struct net_device *dev)
/* 4) give hw rings */
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
/* 5) continue setup */
@@ -2871,7 +3947,8 @@ static int nv_open(struct net_device *dev)
base + NvRegAdapterControl);
writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
- writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
+ if (np->wolenabled)
+ writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
i = readl(base + NvRegPowerState);
if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
@@ -2887,7 +3964,7 @@ static int nv_open(struct net_device *dev)
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
- if (nv_request_irq(dev)) {
+ if (nv_request_irq(dev, 0)) {
goto out_drain;
}
@@ -2924,6 +4001,11 @@ static int nv_open(struct net_device *dev)
}
if (oom)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+
+ /* start statistics timer */
+ if (np->driver_data & DEV_HAS_STATISTICS)
+ mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+
spin_unlock_irq(&np->lock);
return 0;
@@ -2944,6 +4026,7 @@ static int nv_close(struct net_device *dev)
del_timer_sync(&np->oom_kick);
del_timer_sync(&np->nic_poll);
+ del_timer_sync(&np->stats_poll);
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
@@ -3003,6 +4086,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
+ init_timer(&np->stats_poll);
+ np->stats_poll.data = (unsigned long) dev;
+ np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
err = pci_enable_device(pci_dev);
if (err) {
@@ -3017,7 +4103,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (err < 0)
goto out_disable;
- if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL))
+ if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
np->register_size = NV_PCI_REGSZ_VER2;
else
np->register_size = NV_PCI_REGSZ_VER1;
@@ -3049,16 +4135,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* packet format 3: supports 40-bit addressing */
np->desc_ver = DESC_VER_3;
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
- if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
- printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
- pci_name(pci_dev));
- } else {
- dev->features |= NETIF_F_HIGHDMA;
- printk(KERN_INFO "forcedeth: using HIGHDMA\n");
- }
- if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
- printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
- pci_name(pci_dev));
+ if (dma_64bit) {
+ if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
+ printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
+ pci_name(pci_dev));
+ } else {
+ dev->features |= NETIF_F_HIGHDMA;
+ printk(KERN_INFO "forcedeth: using HIGHDMA\n");
+ }
+ if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
+ printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
+ pci_name(pci_dev));
+ }
}
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
@@ -3091,13 +4179,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
}
np->msi_flags = 0;
- if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) {
+ if ((id->driver_data & DEV_HAS_MSI) && msi) {
np->msi_flags |= NV_MSI_CAPABLE;
}
- if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) {
+ if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
np->msi_flags |= NV_MSI_X_CAPABLE;
}
+ np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
+ if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
+ np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
+ }
+
+
err = -ENOMEM;
np->base = ioremap(addr, np->register_size);
if (!np->base)
@@ -3106,21 +4200,38 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->irq = pci_dev->irq;
+ np->rx_ring_size = RX_RING_DEFAULT;
+ np->tx_ring_size = TX_RING_DEFAULT;
+ np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
+ np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
+
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->rx_ring.orig = pci_alloc_consistent(pci_dev,
- sizeof(struct ring_desc) * (RX_RING + TX_RING),
+ sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
&np->ring_addr);
if (!np->rx_ring.orig)
goto out_unmap;
- np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
+ np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
np->rx_ring.ex = pci_alloc_consistent(pci_dev,
- sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
+ sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
&np->ring_addr);
if (!np->rx_ring.ex)
goto out_unmap;
- np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
+ np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
+ np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
+ np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
+ np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
+ np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
+ np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
+ if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
+ goto out_freering;
+ memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
+ memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
+ memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
+ memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
+ memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
dev->open = nv_open;
dev->stop = nv_close;
@@ -3242,9 +4353,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (i == 33) {
printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
pci_name(pci_dev));
- goto out_freering;
+ goto out_error;
}
-
+
/* reset it */
phy_init(dev);
@@ -3256,7 +4367,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
err = register_netdev(dev);
if (err) {
printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
- goto out_freering;
+ goto out_error;
}
printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
@@ -3264,14 +4375,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
return 0;
-out_freering:
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
- np->rx_ring.orig, np->ring_addr);
- else
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
- np->rx_ring.ex, np->ring_addr);
+out_error:
pci_set_drvdata(pci_dev, NULL);
+out_freering:
+ free_rings(dev);
out_unmap:
iounmap(get_hwbase(dev));
out_relreg:
@@ -3287,15 +4394,11 @@ out:
static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
- struct fe_priv *np = netdev_priv(dev);
unregister_netdev(dev);
/* free all structures */
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
- else
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
+ free_rings(dev);
iounmap(get_hwbase(dev));
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
@@ -3358,11 +4461,43 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
},
{0,},
};
@@ -3392,10 +4527,12 @@ module_param(optimization_mode, int, 0);
MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
module_param(poll_interval, int, 0);
MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
-module_param(disable_msi, int, 0);
-MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1.");
-module_param(disable_msix, int, 0);
-MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1.");
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
+module_param(msix, int, 0);
+MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
+module_param(dma_64bit, int, 0);
+MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");