aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
1 files changed, 22 insertions, 30 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f1d2dea90a8c..5975521a4c86 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -379,40 +379,35 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp)
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
{
dma_addr_t iova;
+ u8 *buf;
- /* Check if request can be accommodated in previous allocated page */
- if (pool->page && ((pool->page_offset + pool->rbsize) <=
- (PAGE_SIZE << pool->rbpage_order))) {
- pool->pageref++;
- goto ret;
- }
-
- otx2_get_page(pool);
-
- /* Allocate a new page */
- pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- pool->rbpage_order);
- if (unlikely(!pool->page))
+ buf = napi_alloc_frag(pool->rbsize);
+ if (unlikely(!buf))
return -ENOMEM;
- pool->page_offset = 0;
-ret:
- iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
- pool->rbsize, DMA_FROM_DEVICE);
- if (!iova) {
- if (!pool->page_offset)
- __free_pages(pool->page, pool->rbpage_order);
- pool->page = NULL;
+ iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ page_frag_free(buf);
return -ENOMEM;
}
- pool->page_offset += pool->rbsize;
+
return iova;
}
+static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+{
+ dma_addr_t addr;
+
+ local_bh_disable();
+ addr = __otx2_alloc_rbuf(pfvf, pool);
+ local_bh_enable();
+ return addr;
+}
+
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -805,7 +800,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, rbpool);
if (bufptr <= 0) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
@@ -1064,7 +1059,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
return err;
pool->rbsize = buf_size;
- pool->rbpage_order = get_order(buf_size);
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
@@ -1152,13 +1146,12 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
- otx2_get_page(pool);
}
return 0;
@@ -1204,13 +1197,12 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
- otx2_get_page(pool);
}
return 0;