aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/calxeda/xgmac.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-05 06:23:55 +0000
committerDavid S. Miller <davem@davemloft.net>2012-10-07 00:40:54 -0400
commitacb600def2110b1310466c0e485c0d26299898ae (patch)
tree21036c7d0518601aba70dde0246ac229cd8dfc0c /drivers/net/ethernet/calxeda/xgmac.c
parentinfiniband: pass rdma_cm module to netlink_dump_start (diff)
downloadlinux-dev-acb600def2110b1310466c0e485c0d26299898ae.tar.xz
linux-dev-acb600def2110b1310466c0e485c0d26299898ae.zip
net: remove skb recycling
Over time, skb recycling infrastructure got litle interest and many bugs. Generic rx path skb allocation is now using page fragments for efficient GRO / TCP coalescing, and recyling a tx skb for rx path is not worth the pain. Last identified bug is that fat skbs can be recycled and it can endup using high order pages after few iterations. With help from Maxime Bizon, who pointed out that commit 87151b8689d (net: allow pskb_expand_head() to get maximum tailroom) introduced this regression for recycled skbs. Instead of fixing this bug, lets remove skb recycling. Drivers wanting really hot skbs should use build_skb() anyway, to allocate/populate sk_buff right before netif_receive_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/calxeda/xgmac.c')
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2b4b4f529ab4..16814b34d4b6 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -375,7 +375,6 @@ struct xgmac_priv {
unsigned int tx_tail;
void __iomem *base;
- struct sk_buff_head rx_recycle;
unsigned int dma_buf_sz;
dma_addr_t dma_rx_phy;
dma_addr_t dma_tx_phy;
@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
if (priv->rx_skbuff[entry] == NULL) {
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
if (unlikely(skb == NULL))
break;
@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
desc_get_buf_len(p), DMA_TO_DEVICE);
}
- /*
- * If there's room in the queue (limit it to size)
- * we add this skb back into the pool,
- * if it's the right size.
- */
- if ((skb_queue_len(&priv->rx_recycle) <
- DMA_RX_RING_SZ) &&
- skb_recycle_check(skb, priv->dma_buf_sz))
- __skb_queue_head(&priv->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
dev->dev_addr);
}
- skb_queue_head_init(&priv->rx_recycle);
memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
/* Initialize the XGMAC and descriptors */
@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
napi_disable(&priv->napi);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
- skb_queue_purge(&priv->rx_recycle);
/* Disable the MAC core */
xgmac_mac_disable(priv->base);