aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale/fs_enet
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2016-09-09 14:26:23 +0200
committerDavid S. Miller <davem@davemloft.net>2016-09-10 21:17:13 -0700
commit070e1f01827c658b76bef6e3fa79046b4e4a7693 (patch)
treeb4689b5004ea96ae0ca37b8ed7435508eb6b4be5 /drivers/net/ethernet/freescale/fs_enet
parentnet: fs_enet: merge NAPI RX and NAPI TX (diff)
downloadlinux-dev-070e1f01827c658b76bef6e3fa79046b4e4a7693.tar.xz
linux-dev-070e1f01827c658b76bef6e3fa79046b4e4a7693.zip
net: fs_enet: don't unmap DMA when packet len is below copybreak
When the length of the packet is below the defined copybreak limit, the received packet is copied into a newly allocated skb in order to reuse the skb. This is only interesting if it allow us to avoid a new DMA mapping. We shall therefore not DMA unmap and remap the skb->data. Instead, we invalidate the cache with dma_sync_single_for_cpu() once the received data has been copied into the new skb. The following measures have been obtained on a mpc885 running at 132Mhz. Measurement is done using the timebase with packets sent to the target with 'ping -s 1' (packet len is 60): * Without this patch: 182 TB ticks * With this patch: 143 TB ticks As a comparison, if we set the copybreak limit to 0, then we get 148 TB ticks. It means that without this patch, duration is even worse when copying received data to a new skb instead of allocating a new skb for next packet to be received Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale/fs_enet')
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c36
1 files changed, 20 insertions, 16 deletions
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 37574a97a61c..f2a60cd1feed 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -226,21 +226,10 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (sc & BD_ENET_RX_OV)
fep->stats.rx_crc_errors++;
- skb = fep->rx_skbuff[curidx];
-
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
- skbn = skb;
-
+ skbn = fep->rx_skbuff[curidx];
} else {
skb = fep->rx_skbuff[curidx];
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
/*
* Process the incoming frame.
*/
@@ -256,12 +245,30 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
skb_copy_from_linear_data(skb,
skbn->data, pkt_len);
swap(skb, skbn);
+ dma_sync_single_for_cpu(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(pkt_len),
+ DMA_FROM_DEVICE);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skbn)
+ if (skbn) {
+ dma_addr_t dma;
+
skb_align(skbn, ENET_RX_ALIGN);
+
+ dma_unmap_single(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ dma = dma_map_single(fep->dev,
+ skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+ CBDW_BUFADDR(bdp, dma);
+ }
}
if (skbn != NULL) {
@@ -276,9 +283,6 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
}
fep->rx_skbuff[curidx] = skbn;
- CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
CBDW_DATLEN(bdp, 0);
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);