aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/pensando/ionic/ionic_txrx.c')
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c68
1 files changed, 36 insertions, 32 deletions
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 08934888575c..37c39581b659 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -32,19 +32,13 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
-static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
-{
- buf_info->page = NULL;
- buf_info->page_offset = 0;
- buf_info->dma_addr = 0;
-}
-
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_stats *stats;
struct device *dev;
+ struct page *page;
dev = q->dev;
stats = q_to_rx_stats(q);
@@ -55,26 +49,27 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
return -EINVAL;
}
- buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
- if (unlikely(!buf_info->page)) {
+ page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
+ if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
netdev->name, q->name);
stats->alloc_err++;
return -ENOMEM;
}
- buf_info->page_offset = 0;
- buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
+ buf_info->dma_addr = dma_map_page(dev, page, 0,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
- __free_pages(buf_info->page, 0);
- ionic_rx_buf_reset(buf_info);
+ __free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
netdev->name, q->name);
stats->dma_map_err++;
return -EIO;
}
+ buf_info->page = page;
+ buf_info->page_offset = 0;
+
return 0;
}
@@ -95,7 +90,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
__free_pages(buf_info->page, 0);
- ionic_rx_buf_reset(buf_info);
+ buf_info->page = NULL;
}
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
@@ -139,7 +134,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
- prefetch(buf_info->page);
+ prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
@@ -170,7 +165,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- ionic_rx_buf_reset(buf_info);
+ buf_info->page = NULL;
}
buf_info++;
@@ -274,12 +269,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
}
}
- if (likely(netdev->features & NETIF_F_RXCSUM)) {
- if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
- stats->csum_complete++;
- }
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
+ stats->csum_complete++;
} else {
stats->csum_none++;
}
@@ -451,11 +445,12 @@ void ionic_rx_empty(struct ionic_queue *q)
q->tail_idx = 0;
}
-static void ionic_dim_update(struct ionic_qcq *qcq)
+static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
{
struct dim_sample dim_sample;
struct ionic_lif *lif;
unsigned int qi;
+ u64 pkts, bytes;
if (!qcq->intr.dim_coal_hw)
return;
@@ -463,14 +458,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq)
lif = qcq->q.lif;
qi = qcq->cq.bound_q->index;
- ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[qi]->intr.index,
- qcq->intr.dim_coal_hw);
+ switch (napi_mode) {
+ case IONIC_LIF_F_TX_DIM_INTR:
+ pkts = lif->txqstats[qi].pkts;
+ bytes = lif->txqstats[qi].bytes;
+ break;
+ case IONIC_LIF_F_RX_DIM_INTR:
+ pkts = lif->rxqstats[qi].pkts;
+ bytes = lif->rxqstats[qi].bytes;
+ break;
+ default:
+ pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
+ bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
+ break;
+ }
dim_update_sample(qcq->cq.bound_intr->rearm_count,
- lif->txqstats[qi].pkts,
- lif->txqstats[qi].bytes,
- &dim_sample);
+ pkts, bytes, &dim_sample);
net_dim(&qcq->dim, dim_sample);
}
@@ -491,7 +495,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
ionic_tx_service, NULL, NULL);
if (work_done < budget && napi_complete_done(napi, work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
cq->bound_intr->rearm_count++;
}
@@ -530,7 +534,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
cq->bound_intr->rearm_count++;
}
@@ -576,7 +580,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
- ionic_dim_update(qcq);
+ ionic_dim_update(qcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}