aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/via/via-rhine.c
diff options
context:
space:
mode:
authorfrançois romieu <romieu@fr.zoreil.com>2015-05-01 22:14:39 +0200
committerDavid S. Miller <davem@davemloft.net>2015-05-04 00:18:26 -0400
commita21bb8bae1057ecd071f6d43b9ee3f97503ad904 (patch)
tree57082c178a05d3f2e89153c9290e8d49690eeee7 /drivers/net/ethernet/via/via-rhine.c
parentvia-rhine: commit receive buffer address before descriptor status update. (diff)
downloadlinux-dev-a21bb8bae1057ecd071f6d43b9ee3f97503ad904.tar.xz
linux-dev-a21bb8bae1057ecd071f6d43b9ee3f97503ad904.zip
via-rhine: allocate and map receive buffer in a single transaction
It's used to initialize the receive ring but it will actually shine when the receive poll code is reworked. Signed-off-by: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/via/via-rhine.c')
-rw-r--r--drivers/net/ethernet/via/via-rhine.c57
1 files changed, 43 insertions, 14 deletions
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 88b45c7a35e0..aa398ea5a609 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1213,12 +1213,47 @@ static void free_ring(struct net_device* dev)
}
-static void alloc_rbufs(struct net_device *dev)
+struct rhine_skb_dma {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+};
+
+static inline int rhine_skb_dma_init(struct net_device *dev,
+ struct rhine_skb_dma *sd)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
+ const int size = rp->rx_buf_sz;
+
+ sd->skb = netdev_alloc_skb(dev, size);
+ if (!sd->skb)
+ return -ENOMEM;
+
+ sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
+ netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
+ dev_kfree_skb_any(sd->skb);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
+ struct rhine_skb_dma *sd, int entry)
+{
+ rp->rx_skbuff_dma[entry] = sd->dma;
+ rp->rx_skbuff[entry] = sd->skb;
+
+ rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
+ dma_wmb();
+}
+
+static void alloc_rbufs(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
dma_addr_t next;
- int i;
+ int rc, i;
rp->dirty_rx = rp->cur_rx = 0;
@@ -1239,20 +1274,14 @@ static void alloc_rbufs(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
- rp->rx_skbuff[i] = skb;
- if (skb == NULL)
- break;
+ struct rhine_skb_dma sd;
- rp->rx_skbuff_dma[i] =
- dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
- rp->rx_skbuff_dma[i] = 0;
- dev_kfree_skb(skb);
+ rc = rhine_skb_dma_init(dev, &sd);
+ if (rc < 0)
break;
- }
- rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
+
+ rhine_skb_dma_nic_store(rp, &sd, i);
+
rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
}
rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);