aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/et131x/et131x.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/et131x/et131x.c')
-rw-r--r--drivers/staging/et131x/et131x.c1323
1 files changed, 471 insertions, 852 deletions
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 413da0d6b9f6..84bbcd48e264 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -143,7 +143,6 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere S
#define fMP_DEST_BROAD 0x00000002
/* MP_ADAPTER flags */
-#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
/* MP_SHARED flags */
@@ -176,22 +175,14 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere S
#define PARM_DMA_CACHE_DEF 0
/* RX defines */
-#define USE_FBR0 1
-#define FBR_CHUNKS 32
-#define MAX_DESC_PER_RING_RX 1024
+#define FBR_CHUNKS 32
+#define MAX_DESC_PER_RING_RX 1024
/* number of RFDs - default and min */
-#ifdef USE_FBR0
#define RFD_LOW_WATER_MARK 40
#define NIC_DEFAULT_NUM_RFD 1024
#define NUM_FBRS 2
-#else
-#define RFD_LOW_WATER_MARK 20
-#define NIC_DEFAULT_NUM_RFD 256
-#define NUM_FBRS 1
-#endif
-#define NIC_MIN_NUM_RFD 64
#define NUM_PACKETS_HANDLED 256
#define ALCATEL_MULTICAST_PKT 0x01000000
@@ -291,29 +282,20 @@ struct rx_status_block {
*/
struct fbr_lookup {
void *virt[MAX_DESC_PER_RING_RX];
- void *buffer1[MAX_DESC_PER_RING_RX];
- void *buffer2[MAX_DESC_PER_RING_RX];
u32 bus_high[MAX_DESC_PER_RING_RX];
u32 bus_low[MAX_DESC_PER_RING_RX];
void *ring_virtaddr;
dma_addr_t ring_physaddr;
void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
- u64 real_physaddr;
- u64 offset;
u32 local_full;
u32 num_entries;
- u32 buffsize;
+ dma_addr_t buffsize;
};
/*
* struct rx_ring is the sructure representing the adaptor's local
* reference(s) to the rings
- *
- ******************************************************************************
- * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
- * and index 1 to refer to FRB0
- ******************************************************************************
*/
struct rx_ring {
struct fbr_lookup *fbr[NUM_FBRS];
@@ -332,9 +314,6 @@ struct rx_ring {
u32 num_rfd;
bool unfinished_receives;
-
- /* lookaside lists */
- struct kmem_cache *recv_lookaside;
};
/* TX defines */
@@ -866,28 +845,27 @@ static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
/* Setup the receive dma configuration register for normal operation */
u32 csr = 0x2000; /* FBR1 enable */
- if (adapter->rx_ring.fbr[0]->buffsize == 4096)
+ if (adapter->rx_ring.fbr[1]->buffsize == 4096)
csr |= 0x0800;
- else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
+ else if (adapter->rx_ring.fbr[1]->buffsize == 8192)
csr |= 0x1000;
- else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
+ else if (adapter->rx_ring.fbr[1]->buffsize == 16384)
csr |= 0x1800;
-#ifdef USE_FBR0
+
csr |= 0x0400; /* FBR0 enable */
- if (adapter->rx_ring.fbr[1]->buffsize == 256)
+ if (adapter->rx_ring.fbr[0]->buffsize == 256)
csr |= 0x0100;
- else if (adapter->rx_ring.fbr[1]->buffsize == 512)
+ else if (adapter->rx_ring.fbr[0]->buffsize == 512)
csr |= 0x0200;
- else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
+ else if (adapter->rx_ring.fbr[0]->buffsize == 1024)
csr |= 0x0300;
-#endif
writel(csr, &adapter->regs->rxdma.csr);
csr = readl(&adapter->regs->rxdma.csr);
- if ((csr & 0x00020000) != 0) {
+ if (csr & 0x00020000) {
udelay(5);
csr = readl(&adapter->regs->rxdma.csr);
- if ((csr & 0x00020000) != 0) {
+ if (csr & 0x00020000) {
dev_err(&adapter->pdev->dev,
"RX Dma failed to exit halt state. CSR 0x%08x\n",
csr);
@@ -1758,22 +1736,8 @@ static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
*/
static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
- u16 imr;
- u16 isr;
u16 lcr2;
- et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
- et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
-
- /* Set the link status interrupt only. Bad behavior when link status
- * and auto neg are set, we run into a nested interrupt problem
- */
- imr |= (ET_PHY_INT_MASK_AUTONEGSTAT |
- ET_PHY_INT_MASK_LINKSTAT |
- ET_PHY_INT_MASK_ENABLE);
-
- et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
-
/* Set the LED behavior such that LED 1 indicates speed (off =
* 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
* link and activity (on for link, blink off for activity).
@@ -1798,7 +1762,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter)
}
/**
- * et131x_configure_global_regs - configure JAGCore global regs
+ * et131x_configure_global_regs - configure JAGCore global regs
* @adapter: pointer to our adapter structure
*
* Used to configure the global registers on the JAGCore
@@ -1856,29 +1820,22 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
u32 entry;
u32 psr_num_des;
unsigned long flags;
+ u8 id;
/* Halt RXDMA to perform the reconfigure. */
et131x_rx_dma_disable(adapter);
- /* Load the completion writeback physical address
- *
- * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
- * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
- * are ever returned, make sure the high part is retrieved here
- * before storing the adjusted address.
- */
- writel((u32) ((u64)rx_local->rx_status_bus >> 32),
- &rx_dma->dma_wb_base_hi);
- writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
+ /* Load the completion writeback physical address */
+ writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
+ writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
/* Set the address and parameters of the packet status ring into the
* 1310's registers
*/
- writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
- &rx_dma->psr_base_hi);
- writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
+ writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
+ writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
writel(0, &rx_dma->psr_full_offset);
@@ -1891,56 +1848,56 @@ static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
/* These local variables track the PSR in the adapter structure */
rx_local->local_psr_full = 0;
- /* Now's the best time to initialize FBR1 contents */
- fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
- for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
- fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
- fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
- fbr_entry->word2 = entry;
- fbr_entry++;
- }
-
- /* Set the address and parameters of Free buffer ring 1 (and 0 if
- * required) into the 1310's registers
- */
- writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
- &rx_dma->fbr1_base_hi);
- writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
- writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
- writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
-
- /* This variable tracks the free buffer ring 1 full position, so it
- * has to match the above.
- */
- rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
- writel(
- ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
- &rx_dma->fbr1_min_des);
-
-#ifdef USE_FBR0
- /* Now's the best time to initialize FBR0 contents */
- fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
- for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
- fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
- fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
- fbr_entry->word2 = entry;
- fbr_entry++;
- }
+ for (id = 0; id < NUM_FBRS; id++) {
+ u32 *num_des;
+ u32 *full_offset;
+ u32 *min_des;
+ u32 *base_hi;
+ u32 *base_lo;
+
+ if (id == 0) {
+ num_des = &rx_dma->fbr0_num_des;
+ full_offset = &rx_dma->fbr0_full_offset;
+ min_des = &rx_dma->fbr0_min_des;
+ base_hi = &rx_dma->fbr0_base_hi;
+ base_lo = &rx_dma->fbr0_base_lo;
+ } else {
+ num_des = &rx_dma->fbr1_num_des;
+ full_offset = &rx_dma->fbr1_full_offset;
+ min_des = &rx_dma->fbr1_min_des;
+ base_hi = &rx_dma->fbr1_base_hi;
+ base_lo = &rx_dma->fbr1_base_lo;
+ }
- writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
- &rx_dma->fbr0_base_hi);
- writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
- writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
- writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
+ /* Now's the best time to initialize FBR contents */
+ fbr_entry =
+ (struct fbr_desc *) rx_local->fbr[id]->ring_virtaddr;
+ for (entry = 0;
+ entry < rx_local->fbr[id]->num_entries; entry++) {
+ fbr_entry->addr_hi = rx_local->fbr[id]->bus_high[entry];
+ fbr_entry->addr_lo = rx_local->fbr[id]->bus_low[entry];
+ fbr_entry->word2 = entry;
+ fbr_entry++;
+ }
- /* This variable tracks the free buffer ring 0 full position, so it
- * has to match the above.
- */
- rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
- writel(
- ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
- &rx_dma->fbr0_min_des);
-#endif
+ /* Set the address and parameters of Free buffer ring 1 and 0
+ * into the 1310's registers
+ */
+ writel(upper_32_bits(rx_local->fbr[id]->ring_physaddr),
+ base_hi);
+ writel(lower_32_bits(rx_local->fbr[id]->ring_physaddr),
+ base_lo);
+ writel(rx_local->fbr[id]->num_entries - 1, num_des);
+ writel(ET_DMA10_WRAP, full_offset);
+
+ /* This variable tracks the free buffer ring 1 full position,
+ * so it has to match the above.
+ */
+ rx_local->fbr[id]->local_full = ET_DMA10_WRAP;
+ writel(((rx_local->fbr[id]->num_entries *
+ LO_MARK_PERCENT_FOR_RX) / 100) - 1,
+ min_des);
+ }
/* Program the number of packets we will receive before generating an
* interrupt.
@@ -1971,18 +1928,19 @@ static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
/* Load the hardware with the start of the transmit descriptor ring. */
- writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
+ writel(upper_32_bits(adapter->tx_ring.tx_desc_ring_pa),
&txdma->pr_base_hi);
- writel((u32) adapter->tx_ring.tx_desc_ring_pa,
+ writel(lower_32_bits(adapter->tx_ring.tx_desc_ring_pa),
&txdma->pr_base_lo);
/* Initialise the transmit DMA engine */
writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
/* Load the completion writeback physical address */
- writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
- &txdma->dma_wb_base_hi);
- writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
+ writel(upper_32_bits(adapter->tx_ring.tx_status_pa),
+ &txdma->dma_wb_base_hi);
+ writel(lower_32_bits(adapter->tx_ring.tx_status_pa),
+ &txdma->dma_wb_base_lo);
*adapter->tx_ring.tx_status = 0;
@@ -2267,31 +2225,6 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
}
/**
- * et131x_align_allocated_memory - Align allocated memory on a given boundary
- * @adapter: pointer to our adapter structure
- * @phys_addr: pointer to Physical address
- * @offset: pointer to the offset variable
- * @mask: correct mask
- */
-static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
- u64 *phys_addr, u64 *offset,
- u64 mask)
-{
- u64 new_addr = *phys_addr & ~mask;
-
- *offset = 0;
-
- if (new_addr != *phys_addr) {
- /* Move to next aligned block */
- new_addr += mask + 1;
- /* Return offset for adjusting virt addr */
- *offset = new_addr - *phys_addr;
- /* Return new physical address */
- *phys_addr = new_addr;
- }
-}
-
-/**
* et131x_rx_dma_memory_alloc
* @adapter: pointer to our private adapter structure
*
@@ -2302,19 +2235,19 @@ static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
*/
static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
{
+ u8 id;
u32 i, j;
u32 bufsize;
- u32 pktstat_ringsize, fbr_chunksize;
+ u32 pktstat_ringsize;
+ u32 fbr_chunksize;
struct rx_ring *rx_ring;
/* Setup some convenience pointers */
rx_ring = &adapter->rx_ring;
/* Alloc memory for the lookup table */
-#ifdef USE_FBR0
- rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
-#endif
rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
+ rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
/* The first thing we will do is configure the sizes of the buffer
* rings. These will change based on jumbo packet support. Larger
@@ -2335,211 +2268,85 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
*/
if (adapter->registry_jumbo_packet < 2048) {
-#ifdef USE_FBR0
- rx_ring->fbr[1]->buffsize = 256;
- rx_ring->fbr[1]->num_entries = 512;
-#endif
- rx_ring->fbr[0]->buffsize = 2048;
+ rx_ring->fbr[0]->buffsize = 256;
rx_ring->fbr[0]->num_entries = 512;
+ rx_ring->fbr[1]->buffsize = 2048;
+ rx_ring->fbr[1]->num_entries = 512;
} else if (adapter->registry_jumbo_packet < 4096) {
-#ifdef USE_FBR0
- rx_ring->fbr[1]->buffsize = 512;
- rx_ring->fbr[1]->num_entries = 1024;
-#endif
- rx_ring->fbr[0]->buffsize = 4096;
- rx_ring->fbr[0]->num_entries = 512;
+ rx_ring->fbr[0]->buffsize = 512;
+ rx_ring->fbr[0]->num_entries = 1024;
+ rx_ring->fbr[1]->buffsize = 4096;
+ rx_ring->fbr[1]->num_entries = 512;
} else {
-#ifdef USE_FBR0
- rx_ring->fbr[1]->buffsize = 1024;
- rx_ring->fbr[1]->num_entries = 768;
-#endif
- rx_ring->fbr[0]->buffsize = 16384;
- rx_ring->fbr[0]->num_entries = 128;
+ rx_ring->fbr[0]->buffsize = 1024;
+ rx_ring->fbr[0]->num_entries = 768;
+ rx_ring->fbr[1]->buffsize = 16384;
+ rx_ring->fbr[1]->num_entries = 128;
}
-#ifdef USE_FBR0
adapter->rx_ring.psr_num_entries =
- adapter->rx_ring.fbr[1]->num_entries +
- adapter->rx_ring.fbr[0]->num_entries;
-#else
- adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
-#endif
+ adapter->rx_ring.fbr[0]->num_entries +
+ adapter->rx_ring.fbr[1]->num_entries;
- /* Allocate an area of memory for Free Buffer Ring 1 */
- bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
- 0xfff;
- rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
+ for (id = 0; id < NUM_FBRS; id++) {
+ /* Allocate an area of memory for Free Buffer Ring */
+ bufsize =
+ (sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries);
+ rx_ring->fbr[id]->ring_virtaddr =
+ dma_alloc_coherent(&adapter->pdev->dev,
bufsize,
- &rx_ring->fbr[0]->ring_physaddr,
+ &rx_ring->fbr[id]->ring_physaddr,
GFP_KERNEL);
- if (!rx_ring->fbr[0]->ring_virtaddr) {
- dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Free Buffer Ring 1\n");
- return -ENOMEM;
- }
-
- /* Save physical address
- *
- * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
- * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
- * are ever returned, make sure the high part is retrieved here
- * before storing the adjusted address.
- */
- rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
-
- /* Align Free Buffer Ring 1 on a 4K boundary */
- et131x_align_allocated_memory(adapter,
- &rx_ring->fbr[0]->real_physaddr,
- &rx_ring->fbr[0]->offset, 0x0FFF);
-
- rx_ring->fbr[0]->ring_virtaddr =
- (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
- rx_ring->fbr[0]->offset);
-
-#ifdef USE_FBR0
- /* Allocate an area of memory for Free Buffer Ring 0 */
- bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
- 0xfff;
- rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
- bufsize,
- &rx_ring->fbr[1]->ring_physaddr,
- GFP_KERNEL);
- if (!rx_ring->fbr[1]->ring_virtaddr) {
- dev_err(&adapter->pdev->dev,
- "Cannot alloc memory for Free Buffer Ring 0\n");
- return -ENOMEM;
- }
-
- /* Save physical address
- *
- * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
- * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
- * are ever returned, make sure the high part is retrieved here before
- * storing the adjusted address.
- */
- rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
-
- /* Align Free Buffer Ring 0 on a 4K boundary */
- et131x_align_allocated_memory(adapter,
- &rx_ring->fbr[1]->real_physaddr,
- &rx_ring->fbr[1]->offset, 0x0FFF);
-
- rx_ring->fbr[1]->ring_virtaddr =
- (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
- rx_ring->fbr[1]->offset);
-#endif
- for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
- u64 fbr1_tmp_physaddr;
- u64 fbr1_offset;
- u32 fbr1_align;
-
- /* This code allocates an area of memory big enough for N
- * free buffers + (buffer_size - 1) so that the buffers can
- * be aligned on 4k boundaries. If each buffer were aligned
- * to a buffer_size boundary, the effect would be to double
- * the size of FBR0. By allocating N buffers at once, we
- * reduce this overhead.
- */
- if (rx_ring->fbr[0]->buffsize > 4096)
- fbr1_align = 4096;
- else
- fbr1_align = rx_ring->fbr[0]->buffsize;
-
- fbr_chunksize =
- (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
- rx_ring->fbr[0]->mem_virtaddrs[i] =
- dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
- &rx_ring->fbr[0]->mem_physaddrs[i],
- GFP_KERNEL);
-
- if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
+ if (!rx_ring->fbr[id]->ring_virtaddr) {
dev_err(&adapter->pdev->dev,
- "Could not alloc memory\n");
+ "Cannot alloc memory for Free Buffer Ring %d\n", id);
return -ENOMEM;
}
-
- /* See NOTE in "Save Physical Address" comment above */
- fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
-
- et131x_align_allocated_memory(adapter,
- &fbr1_tmp_physaddr,
- &fbr1_offset, (fbr1_align - 1));
-
- for (j = 0; j < FBR_CHUNKS; j++) {
- u32 index = (i * FBR_CHUNKS) + j;
-
- /* Save the Virtual address of this index for quick
- * access later
- */
- rx_ring->fbr[0]->virt[index] =
- (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
- (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
-
- /* now store the physical address in the descriptor
- * so the device can access it
- */
- rx_ring->fbr[0]->bus_high[index] =
- (u32) (fbr1_tmp_physaddr >> 32);
- rx_ring->fbr[0]->bus_low[index] =
- (u32) fbr1_tmp_physaddr;
-
- fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
-
- rx_ring->fbr[0]->buffer1[index] =
- rx_ring->fbr[0]->virt[index];
- rx_ring->fbr[0]->buffer2[index] =
- rx_ring->fbr[0]->virt[index] - 4;
- }
}
-#ifdef USE_FBR0
- /* Same for FBR0 (if in use) */
- for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
- u64 fbr0_tmp_physaddr;
- u64 fbr0_offset;
+ for (id = 0; id < NUM_FBRS; id++) {
+ fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[id]->buffsize);
- fbr_chunksize =
- ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
- rx_ring->fbr[1]->mem_virtaddrs[i] =
- dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
- &rx_ring->fbr[1]->mem_physaddrs[i],
- GFP_KERNEL);
+ for (i = 0;
+ i < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); i++) {
+ dma_addr_t fbr_tmp_physaddr;
- if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
- dev_err(&adapter->pdev->dev,
- "Could not alloc memory\n");
- return -ENOMEM;
- }
-
- /* See NOTE in "Save Physical Address" comment above */
- fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
+ rx_ring->fbr[id]->mem_virtaddrs[i] = dma_alloc_coherent(
+ &adapter->pdev->dev, fbr_chunksize,
+ &rx_ring->fbr[id]->mem_physaddrs[i],
+ GFP_KERNEL);
- et131x_align_allocated_memory(adapter,
- &fbr0_tmp_physaddr,
- &fbr0_offset,
- rx_ring->fbr[1]->buffsize - 1);
+ if (!rx_ring->fbr[id]->mem_virtaddrs[i]) {
+ dev_err(&adapter->pdev->dev,
+ "Could not alloc memory\n");
+ return -ENOMEM;
+ }
- for (j = 0; j < FBR_CHUNKS; j++) {
- u32 index = (i * FBR_CHUNKS) + j;
+ /* See NOTE in "Save Physical Address" comment above */
+ fbr_tmp_physaddr = rx_ring->fbr[id]->mem_physaddrs[i];
- rx_ring->fbr[1]->virt[index] =
- (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
- (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
+ for (j = 0; j < FBR_CHUNKS; j++) {
+ u32 index = (i * FBR_CHUNKS) + j;
- rx_ring->fbr[1]->bus_high[index] =
- (u32) (fbr0_tmp_physaddr >> 32);
- rx_ring->fbr[1]->bus_low[index] =
- (u32) fbr0_tmp_physaddr;
+ /* Save the Virtual address of this index for
+ * quick access later
+ */
+ rx_ring->fbr[id]->virt[index] =
+ (u8 *) rx_ring->fbr[id]->mem_virtaddrs[i] +
+ (j * rx_ring->fbr[id]->buffsize);
- fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
+ /* now store the physical address in the
+ * descriptor so the device can access it
+ */
+ rx_ring->fbr[id]->bus_high[index] =
+ upper_32_bits(fbr_tmp_physaddr);
+ rx_ring->fbr[id]->bus_low[index] =
+ lower_32_bits(fbr_tmp_physaddr);
- rx_ring->fbr[1]->buffer1[index] =
- rx_ring->fbr[1]->virt[index];
- rx_ring->fbr[1]->buffer2[index] =
- rx_ring->fbr[1]->virt[index] - 4;
+ fbr_tmp_physaddr += rx_ring->fbr[id]->buffsize;
+ }
}
}
-#endif
/* Allocate an area of memory for FIFO of Packet Status ring entries */
pktstat_ringsize =
@@ -2578,21 +2385,6 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus);
- /* Recv
- * kmem_cache_create initializes a lookaside list. After successful
- * creation, nonpaged fixed-size blocks can be allocated from and
- * freed to the lookaside list.
- * RFDs will be allocated from this pool.
- */
- rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
- sizeof(struct rfd),
- 0,
- SLAB_CACHE_DMA |
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
-
/* The RFDs are going to be put on lists later on, so initialize the
* lists now.
*/
@@ -2606,6 +2398,7 @@ static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
*/
static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
{
+ u8 id;
u32 index;
u32 bufsize;
u32 pktstat_ringsize;
@@ -2624,92 +2417,45 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
list_del(&rfd->list_node);
rfd->skb = NULL;
- kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
+ kfree(rfd);
}
- /* Free Free Buffer Ring 1 */
- if (rx_ring->fbr[0]->ring_virtaddr) {
- /* First the packet memory */
- for (index = 0; index <
- (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
- if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
- u32 fbr1_align;
-
- if (rx_ring->fbr[0]->buffsize > 4096)
- fbr1_align = 4096;
- else
- fbr1_align = rx_ring->fbr[0]->buffsize;
-
- bufsize =
- (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
- fbr1_align - 1;
-
- dma_free_coherent(&adapter->pdev->dev,
- bufsize,
- rx_ring->fbr[0]->mem_virtaddrs[index],
- rx_ring->fbr[0]->mem_physaddrs[index]);
-
- rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
- }
- }
-
- /* Now the FIFO itself */
- rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
- rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
-
- bufsize =
- (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
- 0xfff;
-
- dma_free_coherent(&adapter->pdev->dev, bufsize,
- rx_ring->fbr[0]->ring_virtaddr,
- rx_ring->fbr[0]->ring_physaddr);
-
- rx_ring->fbr[0]->ring_virtaddr = NULL;
- }
+ /* Free Free Buffer Rings */
+ for (id = 0; id < NUM_FBRS; id++) {
+ if (!rx_ring->fbr[id]->ring_virtaddr)
+ continue;
-#ifdef USE_FBR0
- /* Now the same for Free Buffer Ring 0 */
- if (rx_ring->fbr[1]->ring_virtaddr) {
/* First the packet memory */
- for (index = 0; index <
- (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
- if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
+ for (index = 0;
+ index < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS);
+ index++) {
+ if (rx_ring->fbr[id]->mem_virtaddrs[index]) {
bufsize =
- (rx_ring->fbr[1]->buffsize *
- (FBR_CHUNKS + 1)) - 1;
+ rx_ring->fbr[id]->buffsize * FBR_CHUNKS;
dma_free_coherent(&adapter->pdev->dev,
bufsize,
- rx_ring->fbr[1]->mem_virtaddrs[index],
- rx_ring->fbr[1]->mem_physaddrs[index]);
+ rx_ring->fbr[id]->mem_virtaddrs[index],
+ rx_ring->fbr[id]->mem_physaddrs[index]);
- rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
+ rx_ring->fbr[id]->mem_virtaddrs[index] = NULL;
}
}
- /* Now the FIFO itself */
- rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
- rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
-
bufsize =
- (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
- 0xfff;
+ sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries;
- dma_free_coherent(&adapter->pdev->dev,
- bufsize,
- rx_ring->fbr[1]->ring_virtaddr,
- rx_ring->fbr[1]->ring_physaddr);
+ dma_free_coherent(&adapter->pdev->dev, bufsize,
+ rx_ring->fbr[id]->ring_virtaddr,
+ rx_ring->fbr[id]->ring_physaddr);
- rx_ring->fbr[1]->ring_virtaddr = NULL;
+ rx_ring->fbr[id]->ring_virtaddr = NULL;
}
-#endif
/* Free Packet Status Ring */
if (rx_ring->ps_ring_virtaddr) {
- pktstat_ringsize =
- sizeof(struct pkt_stat_desc) *
- adapter->rx_ring.psr_num_entries;
+ pktstat_ringsize = sizeof(struct pkt_stat_desc) *
+ adapter->rx_ring.psr_num_entries;
dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
rx_ring->ps_ring_virtaddr,
@@ -2726,18 +2472,9 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
rx_ring->rx_status_block = NULL;
}
- /* Destroy the lookaside (RFD) pool */
- if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
- kmem_cache_destroy(rx_ring->recv_lookaside);
- adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
- }
-
/* Free the FBR Lookup Table */
-#ifdef USE_FBR0
- kfree(rx_ring->fbr[1]);
-#endif
-
kfree(rx_ring->fbr[0]);
+ kfree(rx_ring->fbr[1]);
/* Reset Counters */
rx_ring->num_ready_recv = 0;
@@ -2751,8 +2488,7 @@ static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
*/
static int et131x_init_recv(struct et131x_adapter *adapter)
{
- int status = -ENOMEM;
- struct rfd *rfd = NULL;
+ struct rfd *rfd;
u32 rfdct;
u32 numrfd = 0;
struct rx_ring *rx_ring;
@@ -2762,14 +2498,11 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
/* Setup each RFD */
for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
- rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
- GFP_ATOMIC | GFP_DMA);
+ rfd = kzalloc(sizeof(struct rfd), GFP_ATOMIC | GFP_DMA);
if (!rfd) {
- dev_err(&adapter->pdev->dev,
- "Couldn't alloc RFD out of kmem_cache\n");
- status = -ENOMEM;
- continue;
+ dev_err(&adapter->pdev->dev, "Couldn't alloc RFD\n");
+ return -ENOMEM;
}
rfd->skb = NULL;
@@ -2782,17 +2515,7 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
numrfd++;
}
- if (numrfd > NIC_MIN_NUM_RFD)
- status = 0;
-
- rx_ring->num_rfd = numrfd;
-
- if (status != 0) {
- kmem_cache_free(rx_ring->recv_lookaside, rfd);
- dev_err(&adapter->pdev->dev,
- "Allocation problems in et131x_init_recv\n");
- }
- return status;
+ return 0;
}
/**
@@ -2831,51 +2554,34 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
/* We don't use any of the OOB data besides status. Otherwise, we
* need to clean up OOB data
*/
- if (
-#ifdef USE_FBR0
- (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
-#endif
- (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
+ if (buff_index < rx_local->fbr[ring_index]->num_entries) {
+ u32 *offset;
+ struct fbr_desc *next;
+
spin_lock_irqsave(&adapter->fbr_lock, flags);
- if (ring_index == 1) {
- struct fbr_desc *next = (struct fbr_desc *)
- (rx_local->fbr[0]->ring_virtaddr) +
- INDEX10(rx_local->fbr[0]->local_full);
+ if (ring_index == 0)
+ offset = &rx_dma->fbr0_full_offset;
+ else
+ offset = &rx_dma->fbr1_full_offset;
- /* Handle the Free Buffer Ring advancement here. Write
- * the PA / Buffer Index for the returned buffer into
- * the oldest (next to be freed)FBR entry
- */
- next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
- next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
- next->word2 = buff_index;
-
- writel(bump_free_buff_ring(
- &rx_local->fbr[0]->local_full,
- rx_local->fbr[0]->num_entries - 1),
- &rx_dma->fbr1_full_offset);
- }
-#ifdef USE_FBR0
- else {
- struct fbr_desc *next = (struct fbr_desc *)
- rx_local->fbr[1]->ring_virtaddr +
- INDEX10(rx_local->fbr[1]->local_full);
+ next = (struct fbr_desc *)
+ (rx_local->fbr[ring_index]->ring_virtaddr) +
+ INDEX10(rx_local->fbr[ring_index]->local_full);
+
+ /* Handle the Free Buffer Ring advancement here. Write
+ * the PA / Buffer Index for the returned buffer into
+ * the oldest (next to be freed)FBR entry
+ */
+ next->addr_hi = rx_local->fbr[ring_index]->bus_high[buff_index];
+ next->addr_lo = rx_local->fbr[ring_index]->bus_low[buff_index];
+ next->word2 = buff_index;
+
+ writel(bump_free_buff_ring(
+ &rx_local->fbr[ring_index]->local_full,
+ rx_local->fbr[ring_index]->num_entries - 1),
+ offset);
- /* Handle the Free Buffer Ring advancement here. Write
- * the PA / Buffer Index for the returned buffer into
- * the oldest (next to be freed) FBR entry
- */
- next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
- next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
- next->word2 = buff_index;
-
- writel(bump_free_buff_ring(
- &rx_local->fbr[1]->local_full,
- rx_local->fbr[1]->num_entries - 1),
- &rx_dma->fbr0_full_offset);
- }
-#endif
spin_unlock_irqrestore(&adapter->fbr_lock, flags);
} else {
dev_err(&adapter->pdev->dev,
@@ -2919,6 +2625,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
u32 len;
u32 word0;
u32 word1;
+ struct sk_buff *skb;
/* RX Status block is written by the DMA engine prior to every
* interrupt. It contains the next to be used entry in the Packet
@@ -2929,16 +2636,14 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
/* Check the PSR and wrap bits do not match */
if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
- /* Looks like this ring is not updated yet */
- return NULL;
+ return NULL; /* Looks like this ring is not updated yet */
/* The packet status ring indicates that data is available. */
psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
(rx_local->local_psr_full & 0xFFF);
- /* Grab any information that is required once the PSR is
- * advanced, since we can no longer rely on the memory being
- * accurate
+ /* Grab any information that is required once the PSR is advanced,
+ * since we can no longer rely on the memory being accurate
*/
len = psr->word1 & 0xFFFF;
ring_index = (psr->word1 >> 26) & 0x03;
@@ -2955,40 +2660,24 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
rx_local->local_psr_full ^= 0x1000;
}
- writel(rx_local->local_psr_full,
- &adapter->regs->rxdma.psr_full_offset);
-
-#ifndef USE_FBR0
- if (ring_index != 1)
- return NULL;
-#endif
+ writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
-#ifdef USE_FBR0
if (ring_index > 1 ||
- (ring_index == 0 &&
- buff_index > rx_local->fbr[1]->num_entries - 1) ||
- (ring_index == 1 &&
- buff_index > rx_local->fbr[0]->num_entries - 1)) {
-#else
- if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) {
-#endif
+ buff_index > rx_local->fbr[ring_index]->num_entries - 1) {
/* Illegal buffer or ring index cannot be used by S/W*/
dev_err(&adapter->pdev->dev,
- "NICRxPkts PSR Entry %d indicates "
- "length of %d and/or bad bi(%d)\n",
- rx_local->local_psr_full & 0xFFF,
- len, buff_index);
+ "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
+ rx_local->local_psr_full & 0xFFF, len, buff_index);
return NULL;
}
/* Get and fill the RFD. */
spin_lock_irqsave(&adapter->rcv_lock, flags);
- rfd = NULL;
element = rx_local->recv_list.next;
rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
- if (rfd == NULL) {
+ if (!rfd) {
spin_unlock_irqrestore(&adapter->rcv_lock, flags);
return NULL;
}
@@ -3001,119 +2690,95 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
rfd->bufferindex = buff_index;
rfd->ringindex = ring_index;
- /* In V1 silicon, there is a bug which screws up filtering of
- * runt packets. Therefore runt packet filtering is disabled
- * in the MAC and the packets are dropped here. They are
- * also counted here.
+ /* In V1 silicon, there is a bug which screws up filtering of runt
+ * packets. Therefore runt packet filtering is disabled in the MAC and
+ * the packets are dropped here. They are also counted here.
*/
if (len < (NIC_MIN_PACKET_SIZE + 4)) {
adapter->stats.rx_other_errs++;
len = 0;
}
- if (len) {
- /* Determine if this is a multicast packet coming in */
- if ((word0 & ALCATEL_MULTICAST_PKT) &&
- !(word0 & ALCATEL_BROADCAST_PKT)) {
- /* Promiscuous mode and Multicast mode are
- * not mutually exclusive as was first
- * thought. I guess Promiscuous is just
- * considered a super-set of the other
- * filters. Generally filter is 0x2b when in
- * promiscuous mode.
- */
- if ((adapter->packet_filter &
- ET131X_PACKET_TYPE_MULTICAST)
- && !(adapter->packet_filter &
- ET131X_PACKET_TYPE_PROMISCUOUS)
- && !(adapter->packet_filter &
+ if (len == 0) {
+ rfd->len = 0;
+ goto out;
+ }
+
+ /* Determine if this is a multicast packet coming in */
+ if ((word0 & ALCATEL_MULTICAST_PKT) &&
+ !(word0 & ALCATEL_BROADCAST_PKT)) {
+ /* Promiscuous mode and Multicast mode are not mutually
+ * exclusive as was first thought. I guess Promiscuous is just
+ * considered a super-set of the other filters. Generally filter
+ * is 0x2b when in promiscuous mode.
+ */
+ if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST)
+ && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS)
+ && !(adapter->packet_filter &
ET131X_PACKET_TYPE_ALL_MULTICAST)) {
- /*
- * Note - ring_index for fbr[] array is reversed
- * 1 for FBR0 etc
- */
- buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
- virt[buff_index];
+ buf = rx_local->fbr[ring_index]->virt[buff_index];
- /* Loop through our list to see if the
- * destination address of this packet
- * matches one in our list.
- */
- for (i = 0; i < adapter->multicast_addr_count;
- i++) {
- if (buf[0] ==
- adapter->multicast_list[i][0]
- && buf[1] ==
- adapter->multicast_list[i][1]
- && buf[2] ==
- adapter->multicast_list[i][2]
- && buf[3] ==
- adapter->multicast_list[i][3]
- && buf[4] ==
- adapter->multicast_list[i][4]
- && buf[5] ==
- adapter->multicast_list[i][5]) {
- break;
- }
+ /* Loop through our list to see if the destination
+ * address of this packet matches one in our list.
+ */
+ for (i = 0; i < adapter->multicast_addr_count; i++) {
+ if (buf[0] == adapter->multicast_list[i][0]
+ && buf[1] == adapter->multicast_list[i][1]
+ && buf[2] == adapter->multicast_list[i][2]
+ && buf[3] == adapter->multicast_list[i][3]
+ && buf[4] == adapter->multicast_list[i][4]
+ && buf[5] == adapter->multicast_list[i][5]) {
+ break;
}
-
- /* If our index is equal to the number
- * of Multicast address we have, then
- * this means we did not find this
- * packet's matching address in our
- * list. Set the len to zero,
- * so we free our RFD when we return
- * from this function.
- */
- if (i == adapter->multicast_addr_count)
- len = 0;
}
- if (len > 0)
- adapter->stats.multicast_pkts_rcvd++;
- } else if (word0 & ALCATEL_BROADCAST_PKT)
- adapter->stats.broadcast_pkts_rcvd++;
- else
- /* Not sure what this counter measures in
- * promiscuous mode. Perhaps we should check
- * the MAC address to see if it is directed
- * to us in promiscuous mode.
+ /* If our index is equal to the number of Multicast
+ * address we have, then this means we did not find this
+ * packet's matching address in our list. Set the len to
+ * zero, so we free our RFD when we return from this
+ * function.
*/
- adapter->stats.unicast_pkts_rcvd++;
- }
+ if (i == adapter->multicast_addr_count)
+ len = 0;
+ }
- if (len > 0) {
- struct sk_buff *skb = NULL;
+ if (len > 0)
+ adapter->stats.multicast_pkts_rcvd++;
+ } else if (word0 & ALCATEL_BROADCAST_PKT) {
+ adapter->stats.broadcast_pkts_rcvd++;
+ } else {
+ /* Not sure what this counter measures in promiscuous mode.
+ * Perhaps we should check the MAC address to see if it is
+ * directed to us in promiscuous mode.
+ */
+ adapter->stats.unicast_pkts_rcvd++;
+ }
- /*rfd->len = len - 4; */
- rfd->len = len;
+ if (len == 0) {
+ rfd->len = 0;
+ goto out;
+ }
- skb = dev_alloc_skb(rfd->len + 2);
- if (!skb) {
- dev_err(&adapter->pdev->dev,
- "Couldn't alloc an SKB for Rx\n");
- return NULL;
- }
+ rfd->len = len;
- adapter->net_stats.rx_bytes += rfd->len;
+ skb = dev_alloc_skb(rfd->len + 2);
+ if (!skb) {
+ dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n");
+ return NULL;
+ }
- /*
- * Note - ring_index for fbr[] array is reversed,
- * 1 for FBR0 etc
- */
- memcpy(skb_put(skb, rfd->len),
- rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
- rfd->len);
+ adapter->net_stats.rx_bytes += rfd->len;
- skb->dev = adapter->netdev;
- skb->protocol = eth_type_trans(skb, adapter->netdev);
- skb->ip_summed = CHECKSUM_NONE;
+ memcpy(skb_put(skb, rfd->len),
+ rx_local->fbr[ring_index]->virt[buff_index],
+ rfd->len);
- netif_rx_ni(skb);
- } else {
- rfd->len = 0;
- }
+ skb->dev = adapter->netdev;
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_rx_ni(skb);
+out:
nic_return_rfd(adapter, rfd);
return rfd;
}
@@ -3198,10 +2863,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
return -ENOMEM;
}
- /* Allocate enough memory for the Tx descriptor ring, and allocate
- * some extra so that the ring can be aligned on a 4k boundary.
- */
- desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
+ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
tx_ring->tx_desc_ring =
(struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
desc_size,
@@ -3245,8 +2907,7 @@ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
if (adapter->tx_ring.tx_desc_ring) {
/* Free memory relating to Tx rings here */
- desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
- + 4096 - 1;
+ desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
dma_free_coherent(&adapter->pdev->dev,
desc_size,
adapter->tx_ring.tx_desc_ring,
@@ -3285,6 +2946,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
unsigned long flags;
struct phy_device *phydev = adapter->phydev;
+ dma_addr_t dma_addr;
/* Part of the optimizations of this send routine restrict us to
* sending 24 fragments at a pass. In practice we should never see
@@ -3313,85 +2975,47 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
* This will work until we determine why the hardware
* doesn't seem to like large fragments.
*/
- if ((skb->len - skb->data_len) <= 1514) {
- desc[frag].addr_hi = 0;
+ if (skb_headlen(skb) <= 1514) {
/* Low 16bits are length, high is vlan and
unused currently so zero */
- desc[frag].len_vlan =
- skb->len - skb->data_len;
-
- /* NOTE: Here, the dma_addr_t returned from
- * dma_map_single() is implicitly cast as a
- * u32. Although dma_addr_t can be
- * 64-bit, the address returned by
- * dma_map_single() is always 32-bit
- * addressable (as defined by the pci/dma
- * subsystem)
- */
- desc[frag++].addr_lo =
- dma_map_single(&adapter->pdev->dev,
- skb->data,
- skb->len -
- skb->data_len,
- DMA_TO_DEVICE);
+ desc[frag].len_vlan = skb_headlen(skb);
+ dma_addr = dma_map_single(&adapter->pdev->dev,
+ skb->data,
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+ desc[frag].addr_lo = lower_32_bits(dma_addr);
+ desc[frag].addr_hi = upper_32_bits(dma_addr);
+ frag++;
} else {
- desc[frag].addr_hi = 0;
- desc[frag].len_vlan =
- (skb->len - skb->data_len) / 2;
-
- /* NOTE: Here, the dma_addr_t returned from
- * dma_map_single() is implicitly cast as a
- * u32. Although dma_addr_t can be
- * 64-bit, the address returned by
- * dma_map_single() is always 32-bit
- * addressable (as defined by the pci/dma
- * subsystem)
- */
- desc[frag++].addr_lo =
- dma_map_single(&adapter->pdev->dev,
- skb->data,
- ((skb->len -
- skb->data_len) / 2),
- DMA_TO_DEVICE);
- desc[frag].addr_hi = 0;
-
- desc[frag].len_vlan =
- (skb->len - skb->data_len) / 2;
-
- /* NOTE: Here, the dma_addr_t returned from
- * dma_map_single() is implicitly cast as a
- * u32. Although dma_addr_t can be
- * 64-bit, the address returned by
- * dma_map_single() is always 32-bit
- * addressable (as defined by the pci/dma
- * subsystem)
- */
- desc[frag++].addr_lo =
- dma_map_single(&adapter->pdev->dev,
- skb->data +
- ((skb->len -
- skb->data_len) / 2),
- ((skb->len -
- skb->data_len) / 2),
- DMA_TO_DEVICE);
+ desc[frag].len_vlan = skb_headlen(skb) / 2;
+ dma_addr = dma_map_single(&adapter->pdev->dev,
+ skb->data,
+ (skb_headlen(skb) / 2),
+ DMA_TO_DEVICE);
+ desc[frag].addr_lo = lower_32_bits(dma_addr);
+ desc[frag].addr_hi = upper_32_bits(dma_addr);
+ frag++;
+
+ desc[frag].len_vlan = skb_headlen(skb) / 2;
+ dma_addr = dma_map_single(&adapter->pdev->dev,
+ skb->data +
+ (skb_headlen(skb) / 2),
+ (skb_headlen(skb) / 2),
+ DMA_TO_DEVICE);
+ desc[frag].addr_lo = lower_32_bits(dma_addr);
+ desc[frag].addr_hi = upper_32_bits(dma_addr);
+ frag++;
}
} else {
- desc[frag].addr_hi = 0;
- desc[frag].len_vlan =
- frags[i - 1].size;
-
- /* NOTE: Here, the dma_addr_t returned from
- * dma_map_page() is implicitly cast as a u32.
- * Although dma_addr_t can be 64-bit, the address
- * returned by dma_map_page() is always 32-bit
- * addressable (as defined by the pci/dma subsystem)
- */
- desc[frag++].addr_lo = skb_frag_dma_map(
- &adapter->pdev->dev,
- &frags[i - 1],
- 0,
- frags[i - 1].size,
- DMA_TO_DEVICE);
+ desc[frag].len_vlan = frags[i - 1].size;
+ dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
+ &frags[i - 1],
+ 0,
+ frags[i - 1].size,
+ DMA_TO_DEVICE);
+ desc[frag].addr_lo = lower_32_bits(dma_addr);
+ desc[frag].addr_hi = upper_32_bits(dma_addr);
+ frag++;
}
}
@@ -3521,7 +3145,7 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
tcb->skb = skb;
- if (skb->data != NULL && skb->len - skb->data_len >= 6) {
+ if (skb->data != NULL && skb_headlen(skb) >= 6) {
shbufva = (u16 *) skb->data;
if ((shbufva[0] == 0xffff) &&
@@ -3618,6 +3242,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
unsigned long flags;
struct tx_desc *desc = NULL;
struct net_device_stats *stats = &adapter->net_stats;
+ u64 dma_addr;
if (tcb->flags & fMP_DEST_BROAD)
atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
@@ -3638,8 +3263,11 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
(adapter->tx_ring.tx_desc_ring +
INDEX10(tcb->index_start));
+ dma_addr = desc->addr_lo;
+ dma_addr |= (u64)desc->addr_hi << 32;
+
dma_unmap_single(&adapter->pdev->dev,
- desc->addr_lo,
+ dma_addr,
desc->len_vlan, DMA_TO_DEVICE);
add_10bit(&tcb->index_start, 1);
@@ -3830,7 +3458,12 @@ static void et131x_get_regs(struct net_device *netdev,
et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, 0x0b, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, 0x0c, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_MMD_CTRL, (u16 *)&regs_buff[num++]);
+ et131x_mii_read(adapter, MII_MMD_DATA, (u16 *)&regs_buff[num++]);
et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
+
et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
@@ -3839,6 +3472,7 @@ static void et131x_get_regs(struct net_device *netdev,
(u16 *)&regs_buff[num++]);
et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
(u16 *)&regs_buff[num++]);
+
et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
(u16 *)&regs_buff[num++]);
et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
@@ -3943,7 +3577,7 @@ static struct ethtool_ops et131x_ethtool_ops = {
.get_drvinfo = et131x_get_drvinfo,
.get_regs_len = et131x_get_regs_len,
.get_regs = et131x_get_regs,
- .get_link = ethtool_op_get_link,
+ .get_link = ethtool_op_get_link,
};
/**
* et131x_hwaddr_init - set up the MAC Address on the ET1310
@@ -4110,8 +3744,18 @@ static void et131x_error_timer_handler(unsigned long data)
}
/* This is a periodic timer, so reschedule */
- mod_timer(&adapter->error_timer, jiffies +
- TX_ERROR_PERIOD * HZ / 1000);
+ mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
+}
+
+/**
+ * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
+ * @adapter: pointer to our private adapter structure
+ */
+static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
+{
+ /* Free DMA memory */
+ et131x_tx_dma_memory_free(adapter);
+ et131x_rx_dma_memory_free(adapter);
}
/**
@@ -4144,26 +3788,14 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
/* Init receive data structures */
status = et131x_init_recv(adapter);
- if (status != 0) {
+ if (status) {
dev_err(&adapter->pdev->dev,
"et131x_init_recv FAILED\n");
- et131x_tx_dma_memory_free(adapter);
- et131x_rx_dma_memory_free(adapter);
+ et131x_adapter_memory_free(adapter);
}
return status;
}
-/**
- * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
- * @adapter: pointer to our private adapter structure
- */
-static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
-{
- /* Free DMA memory */
- et131x_tx_dma_memory_free(adapter);
- et131x_rx_dma_memory_free(adapter);
-}
-
static void et131x_adjust_link(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4358,7 +3990,7 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
* PCI subsystem detects that a PCI device which matches the information
* contained in the pci_device_id table has been removed.
*/
-static void __devexit et131x_pci_remove(struct pci_dev *pdev)
+static void et131x_pci_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4558,182 +4190,169 @@ static void et131x_isr_handler(struct work_struct *work)
status &= 0xffffffd7;
- if (status) {
- /* Handle the TXDMA Error interrupt */
- if (status & ET_INTR_TXDMA_ERR) {
- u32 txdma_err;
-
- /* Following read also clears the register (COR) */
- txdma_err = readl(&iomem->txdma.tx_dma_error);
+ if (!status)
+ goto out;
- dev_warn(&adapter->pdev->dev,
- "TXDMA_ERR interrupt, error = %d\n",
- txdma_err);
- }
+ /* Handle the TXDMA Error interrupt */
+ if (status & ET_INTR_TXDMA_ERR) {
+ u32 txdma_err;
- /* Handle Free Buffer Ring 0 and 1 Low interrupt */
- if (status &
- (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
- /*
- * This indicates the number of unused buffers in
- * RXDMA free buffer ring 0 is <= the limit you
- * programmed. Free buffer resources need to be
- * returned. Free buffers are consumed as packets
- * are passed from the network to the host. The host
- * becomes aware of the packets from the contents of
- * the packet status ring. This ring is queried when
- * the packet done interrupt occurs. Packets are then
- * passed to the OS. When the OS is done with the
- * packets the resources can be returned to the
- * ET1310 for re-use. This interrupt is one method of
- * returning resources.
- */
+ /* Following read also clears the register (COR) */
+ txdma_err = readl(&iomem->txdma.tx_dma_error);
- /* If the user has flow control on, then we will
- * send a pause packet, otherwise just exit
- */
- if (adapter->flowcontrol == FLOW_TXONLY ||
- adapter->flowcontrol == FLOW_BOTH) {
- u32 pm_csr;
+ dev_warn(&adapter->pdev->dev,
+ "TXDMA_ERR interrupt, error = %d\n",
+ txdma_err);
+ }
- /* Tell the device to send a pause packet via
- * the back pressure register (bp req and
- * bp xon/xoff)
- */
- pm_csr = readl(&iomem->global.pm_csr);
- if (!et1310_in_phy_coma(adapter))
- writel(3, &iomem->txmac.bp_ctrl);
- }
- }
+ /* Handle Free Buffer Ring 0 and 1 Low interrupt */
+ if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
+ /*
+ * This indicates the number of unused buffers in RXDMA free
+ * buffer ring 0 is <= the limit you programmed. Free buffer
+ * resources need to be returned. Free buffers are consumed as
+ * packets are passed from the network to the host. The host
+ * becomes aware of the packets from the contents of the packet
+ * status ring. This ring is queried when the packet done
+ * interrupt occurs. Packets are then passed to the OS. When
+ * the OS is done with the packets the resources can be
+ * returned to the ET1310 for re-use. This interrupt is one
+ * method of returning resources.
+ */
- /* Handle Packet Status Ring Low Interrupt */
- if (status & ET_INTR_RXDMA_STAT_LOW) {
+ /*
+ * If the user has flow control on, then we will
+ * send a pause packet, otherwise just exit
+ */
+ if (adapter->flowcontrol == FLOW_TXONLY ||
+ adapter->flowcontrol == FLOW_BOTH) {
+ u32 pm_csr;
/*
- * Same idea as with the two Free Buffer Rings.
- * Packets going from the network to the host each
- * consume a free buffer resource and a packet status
- * resource. These resoures are passed to the OS.
- * When the OS is done with the resources, they need
- * to be returned to the ET1310. This is one method
- * of returning the resources.
+ * Tell the device to send a pause packet via the back
+ * pressure register (bp req and bp xon/xoff)
*/
+ pm_csr = readl(&iomem->global.pm_csr);
+ if (!et1310_in_phy_coma(adapter))
+ writel(3, &iomem->txmac.bp_ctrl);
}
+ }
- /* Handle RXDMA Error Interrupt */
- if (status & ET_INTR_RXDMA_ERR) {
- /*
- * The rxdma_error interrupt is sent when a time-out
- * on a request issued by the JAGCore has occurred or
- * a completion is returned with an un-successful
- * status. In both cases the request is considered
- * complete. The JAGCore will automatically re-try the
- * request in question. Normally information on events
- * like these are sent to the host using the "Advanced
- * Error Reporting" capability. This interrupt is
- * another way of getting similar information. The
- * only thing required is to clear the interrupt by
- * reading the ISR in the global resources. The
- * JAGCore will do a re-try on the request. Normally
- * you should never see this interrupt. If you start
- * to see this interrupt occurring frequently then
- * something bad has occurred. A reset might be the
- * thing to do.
- */
- /* TRAP();*/
+ /* Handle Packet Status Ring Low Interrupt */
+ if (status & ET_INTR_RXDMA_STAT_LOW) {
+ /*
+ * Same idea as with the two Free Buffer Rings. Packets going
+ * from the network to the host each consume a free buffer
+ * resource and a packet status resource. These resoures are
+ * passed to the OS. When the OS is done with the resources,
+ * they need to be returned to the ET1310. This is one method
+ * of returning the resources.
+ */
+ }
- dev_warn(&adapter->pdev->dev,
- "RxDMA_ERR interrupt, error %x\n",
- readl(&iomem->txmac.tx_test));
- }
+ /* Handle RXDMA Error Interrupt */
+ if (status & ET_INTR_RXDMA_ERR) {
+ /*
+ * The rxdma_error interrupt is sent when a time-out on a
+ * request issued by the JAGCore has occurred or a completion is
+ * returned with an un-successful status. In both cases the
+ * request is considered complete. The JAGCore will
+ * automatically re-try the request in question. Normally
+ * information on events like these are sent to the host using
+ * the "Advanced Error Reporting" capability. This interrupt is
+ * another way of getting similar information. The only thing
+ * required is to clear the interrupt by reading the ISR in the
+ * global resources. The JAGCore will do a re-try on the
+ * request. Normally you should never see this interrupt. If
+ * you start to see this interrupt occurring frequently then
+ * something bad has occurred. A reset might be the thing to do.
+ */
+ /* TRAP();*/
- /* Handle the Wake on LAN Event */
- if (status & ET_INTR_WOL) {
- /*
- * This is a secondary interrupt for wake on LAN.
- * The driver should never see this, if it does,
- * something serious is wrong. We will TRAP the
- * message when we are in DBG mode, otherwise we
- * will ignore it.
- */
- dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
- }
+ dev_warn(&adapter->pdev->dev,
+ "RxDMA_ERR interrupt, error %x\n",
+ readl(&iomem->txmac.tx_test));
+ }
- /* Let's move on to the TxMac */
- if (status & ET_INTR_TXMAC) {
- u32 err = readl(&iomem->txmac.err);
+ /* Handle the Wake on LAN Event */
+ if (status & ET_INTR_WOL) {
+ /*
+ * This is a secondary interrupt for wake on LAN. The driver
+ * should never see this, if it does, something serious is
+ * wrong. We will TRAP the message when we are in DBG mode,
+ * otherwise we will ignore it.
+ */
+ dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
+ }
- /*
- * When any of the errors occur and TXMAC generates
- * an interrupt to report these errors, it usually
- * means that TXMAC has detected an error in the data
- * stream retrieved from the on-chip Tx Q. All of
- * these errors are catastrophic and TXMAC won't be
- * able to recover data when these errors occur. In
- * a nutshell, the whole Tx path will have to be reset
- * and re-configured afterwards.
- */
- dev_warn(&adapter->pdev->dev,
- "TXMAC interrupt, error 0x%08x\n",
- err);
+ /* Let's move on to the TxMac */
+ if (status & ET_INTR_TXMAC) {
+ u32 err = readl(&iomem->txmac.err);
- /* If we are debugging, we want to see this error,
- * otherwise we just want the device to be reset and
- * continue
- */
- }
+ /*
+ * When any of the errors occur and TXMAC generates an
+ * interrupt to report these errors, it usually means that
+ * TXMAC has detected an error in the data stream retrieved
+ * from the on-chip Tx Q. All of these errors are catastrophic
+ * and TXMAC won't be able to recover data when these errors
+ * occur. In a nutshell, the whole Tx path will have to be reset
+ * and re-configured afterwards.
+ */
+ dev_warn(&adapter->pdev->dev,
+ "TXMAC interrupt, error 0x%08x\n",
+ err);
- /* Handle RXMAC Interrupt */
- if (status & ET_INTR_RXMAC) {
- /*
- * These interrupts are catastrophic to the device,
- * what we need to do is disable the interrupts and
- * set the flag to cause us to reset so we can solve
- * this issue.
- */
- /* MP_SET_FLAG( adapter,
- fMP_ADAPTER_HARDWARE_ERROR); */
+ /*
+ * If we are debugging, we want to see this error, otherwise we
+ * just want the device to be reset and continue
+ */
+ }
- dev_warn(&adapter->pdev->dev,
- "RXMAC interrupt, error 0x%08x. Requesting reset\n",
- readl(&iomem->rxmac.err_reg));
+ /* Handle RXMAC Interrupt */
+ if (status & ET_INTR_RXMAC) {
+ /*
+ * These interrupts are catastrophic to the device, what we need
+ * to do is disable the interrupts and set the flag to cause us
+ * to reset so we can solve this issue.
+ */
+ /* MP_SET_FLAG( adapter, fMP_ADAPTER_HARDWARE_ERROR); */
- dev_warn(&adapter->pdev->dev,
- "Enable 0x%08x, Diag 0x%08x\n",
- readl(&iomem->rxmac.ctrl),
- readl(&iomem->rxmac.rxq_diag));
+ dev_warn(&adapter->pdev->dev,
+ "RXMAC interrupt, error 0x%08x. Requesting reset\n",
+ readl(&iomem->rxmac.err_reg));
- /*
- * If we are debugging, we want to see this error,
- * otherwise we just want the device to be reset and
- * continue
- */
- }
+ dev_warn(&adapter->pdev->dev,
+ "Enable 0x%08x, Diag 0x%08x\n",
+ readl(&iomem->rxmac.ctrl),
+ readl(&iomem->rxmac.rxq_diag));
- /* Handle MAC_STAT Interrupt */
- if (status & ET_INTR_MAC_STAT) {
- /*
- * This means at least one of the un-masked counters
- * in the MAC_STAT block has rolled over. Use this
- * to maintain the top, software managed bits of the
- * counter(s).
- */
- et1310_handle_macstat_interrupt(adapter);
- }
+ /*
+ * If we are debugging, we want to see this error, otherwise we
+ * just want the device to be reset and continue
+ */
+ }
- /* Handle SLV Timeout Interrupt */
- if (status & ET_INTR_SLV_TIMEOUT) {
- /*
- * This means a timeout has occurred on a read or
- * write request to one of the JAGCore registers. The
- * Global Resources block has terminated the request
- * and on a read request, returned a "fake" value.
- * The most likely reasons are: Bad Address or the
- * addressed module is in a power-down state and
- * can't respond.
- */
- }
+ /* Handle MAC_STAT Interrupt */
+ if (status & ET_INTR_MAC_STAT) {
+ /*
+ * This means at least one of the un-masked counters in the
+ * MAC_STAT block has rolled over. Use this to maintain the top,
+ * software managed bits of the counter(s).
+ */
+ et1310_handle_macstat_interrupt(adapter);
}
+
+ /* Handle SLV Timeout Interrupt */
+ if (status & ET_INTR_SLV_TIMEOUT) {
+ /*
+ * This means a timeout has occurred on a read or write request
+ * to one of the JAGCore registers. The Global Resources block
+ * has terminated the request and on a read request, returned a
+ * "fake" value. The most likely reasons are: Bad Address or the
+ * addressed module is in a power-down state and can't respond.
+ */
+ }
+out:
et131x_enable_interrupts(adapter);
}
@@ -5221,7 +4840,7 @@ static const struct net_device_ops et131x_netdev_ops = {
* contained in the pci_device_id table. This routine is the equivalent to
* a device insertion routine.
*/
-static int __devinit et131x_pci_setup(struct pci_dev *pdev,
+static int et131x_pci_setup(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *netdev;
@@ -5423,7 +5042,7 @@ static struct pci_driver et131x_driver = {
.name = DRIVER_NAME,
.id_table = et131x_pci_table,
.probe = et131x_pci_setup,
- .remove = __devexit_p(et131x_pci_remove),
+ .remove = et131x_pci_remove,
.driver.pm = ET131X_PM_OPS,
};