diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c')
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 43 |
1 files changed, 27 insertions, 16 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 7af223b0a37f..e369baf11530 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -217,9 +217,6 @@ static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, va - page_address(page) + off, len - off, pfvf->rbsize); - - otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, - pfvf->rbsize, DMA_FROM_DEVICE); return true; } @@ -382,6 +379,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, if (pfvf->netdev->features & NETIF_F_RXCSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_mark_for_recycle(skb); + napi_gro_frags(napi); } @@ -464,12 +463,13 @@ process_cqe: break; } - if (cq->cq_type == CQ_XDP) { + qidx = cq->cq_idx - pfvf->hw.rx_queues; + + if (cq->cq_type == CQ_XDP) otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); - } else { - otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget, - &tx_pkts, &tx_bytes); - } + else + otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], + cqe, budget, &tx_pkts, &tx_bytes); cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; processed_cqe++; @@ -486,7 +486,11 @@ process_cqe: if (likely(tx_pkts)) { struct netdev_queue *txq; - txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx); + qidx = cq->cq_idx - pfvf->hw.rx_queues; + + if (qidx >= pfvf->hw.tx_queues) + qidx -= pfvf->hw.xdp_queues; + txq = netdev_get_tx_queue(pfvf->netdev, qidx); netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); /* Check if queue was stopped earlier due to ring full */ smp_mb(); @@ -734,7 +738,8 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, sqe_hdr->aura = sq->aura_id; /* Post a CQE Tx after pkt transmission */ sqe_hdr->pnc = 1; - sqe_hdr->sq = qidx; + sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ? + qidx + pfvf->hw.xdp_queues : qidx; } sqe_hdr->total = skb->len; /* Set SQE identifier which will be used later for freeing SKB */ @@ -1178,11 +1183,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, } EXPORT_SYMBOL(otx2_sq_append_skb); -void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx) { struct nix_cqe_rx_s *cqe; + struct otx2_pool *pool; int processed_cqe = 0; - u64 iova, pa; + u16 pool_id; + u64 iova; if (pfvf->xdp_prog) xdp_rxq_info_unreg(&cq->xdp_rxq); @@ -1190,6 +1197,9 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return; + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); + pool = &pfvf->qset.pool[pool_id]; + while (cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; @@ -1202,9 +1212,8 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) continue; } iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; - pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); - otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); - put_page(virt_to_page(phys_to_virt(pa))); + + otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); } /* Free CQEs to HW */ @@ -1219,8 +1228,10 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) struct nix_cqe_tx_s *cqe; int processed_cqe = 0; struct sg_list *sg; + int qidx; - sq = &pfvf->qset.sq[cq->cint_idx]; + qidx = cq->cq_idx - pfvf->hw.rx_queues; + sq = &pfvf->qset.sq[qidx]; if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return; |