aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/pensando/ionic
diff options
context:
space:
mode:
authorShannon Nelson <snelson@pensando.io>2021-07-27 10:43:29 -0700
committerDavid S. Miller <davem@davemloft.net>2021-07-27 20:15:21 +0100
commite75ccac1d0644c9d7ad531cb9a81c499930c06da (patch)
tree1a606d140d039fc1ff81e0962a3b26a0a28c014f /drivers/net/ethernet/pensando/ionic
parentionic: init reconfig err to 0 (diff)
downloadlinux-dev-e75ccac1d0644c9d7ad531cb9a81c499930c06da.tar.xz
linux-dev-e75ccac1d0644c9d7ad531cb9a81c499930c06da.zip
ionic: use fewer inits on the buf_info struct
Based on Alex's review notes on [1], we don't need to write to the buf_info elements as often, and can tighten up how they are used. Also, use prefetchw() to warm up the page struct for a later get_page(). [1] https://lore.kernel.org/netdev/CAKgT0UfyjoAN7LTnq0NMZfXRv4v7iTCPyAb9pVr3qWMhop_BVw@mail.gmail.com/ Signed-off-by: Shannon Nelson <snelson@pensando.io> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/pensando/ionic')
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c27
1 files changed, 11 insertions, 16 deletions
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 08934888575c..2ba19246d763 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -32,19 +32,13 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
-static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
-{
- buf_info->page = NULL;
- buf_info->page_offset = 0;
- buf_info->dma_addr = 0;
-}
-
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_stats *stats;
struct device *dev;
+ struct page *page;
dev = q->dev;
stats = q_to_rx_stats(q);
@@ -55,26 +49,27 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
return -EINVAL;
}
- buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
- if (unlikely(!buf_info->page)) {
+ page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
+ if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
netdev->name, q->name);
stats->alloc_err++;
return -ENOMEM;
}
- buf_info->page_offset = 0;
- buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
+ buf_info->dma_addr = dma_map_page(dev, page, 0,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
- __free_pages(buf_info->page, 0);
- ionic_rx_buf_reset(buf_info);
+ __free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
netdev->name, q->name);
stats->dma_map_err++;
return -EIO;
}
+ buf_info->page = page;
+ buf_info->page_offset = 0;
+
return 0;
}
@@ -95,7 +90,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
__free_pages(buf_info->page, 0);
- ionic_rx_buf_reset(buf_info);
+ buf_info->page = NULL;
}
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
@@ -139,7 +134,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
- prefetch(buf_info->page);
+ prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
@@ -170,7 +165,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- ionic_rx_buf_reset(buf_info);
+ buf_info->page = NULL;
}
buf_info++;