diff options
author | Maxim Mikityanskiy <maximmi@nvidia.com> | 2022-01-28 13:13:11 +0200 |
---|---|---|
committer | Saeed Mahameed <saeedm@nvidia.com> | 2022-03-18 13:51:13 -0700 |
commit | ddc87e7d477552084fd185b315a39d5067b01773 (patch) | |
tree | ce5d6d5bd37dda43e09d4d6e17294726b66883d7 /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |
parent | net/mlx5e: Add XDP multi buffer support to the non-linear legacy RQ (diff) | |
download | linux-dev-ddc87e7d477552084fd185b315a39d5067b01773.tar.xz linux-dev-ddc87e7d477552084fd185b315a39d5067b01773.zip |
net/mlx5e: Store DMA address inside struct page
Use page_pool_set_dma_addr() to store the DMA address of a page inside
struct page, in order to avoid passing struct mlx5e_dma_info to XDP
handlers. Previously, struct mlx5e_dma_info was used to pass both the
DMA address and the page, and it worked well for the single-fragment
case.
When XDP multi buffer is in use, and a fragmented xdp_frame has to be
transmitted, the driver needs to know the DMA addresses of fragments,
however, the array of fragments in struct skb_shared_info doesn't
contain them. In order to pass the DMA addresses, the driver puts them
into struct page itself, which is accessible from the array of fragments
in struct skb_shared_info. The existing XDP handlers are modified to
remove the dependency on struct mlx5e_dma_info.
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to '')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e9ad5b3a30ed..56bb58704bf9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -222,8 +222,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; } -static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info) +static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page) { struct mlx5e_page_cache *cache = &rq->page_cache; u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); @@ -234,12 +233,13 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, return false; } - if (!dev_page_is_reusable(dma_info->page)) { + if (!dev_page_is_reusable(page)) { stats->cache_waive++; return false; } - cache->page_cache[cache->tail] = *dma_info; + cache->page_cache[cache->tail].page = page; + cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page); cache->tail = tail_next; return true; } @@ -287,6 +287,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, dma_info->page = NULL; return -ENOMEM; } + page_pool_set_dma_addr(dma_info->page, dma_info->addr); return 0; } @@ -300,26 +301,27 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, return mlx5e_page_alloc_pool(rq, dma_info); } -void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) +void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page) { - dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir, + dma_addr_t dma_addr = page_pool_get_dma_addr(page); + + dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); + page_pool_set_dma_addr(page, 0); } -void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info, - bool recycle) +void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle) { if (likely(recycle)) { - if (mlx5e_rx_cache_put(rq, dma_info)) + if (mlx5e_rx_cache_put(rq, page)) return; - mlx5e_page_dma_unmap(rq, dma_info); - page_pool_recycle_direct(rq->page_pool, dma_info->page); + mlx5e_page_dma_unmap(rq, page); + page_pool_recycle_direct(rq->page_pool, page); } else { - mlx5e_page_dma_unmap(rq, dma_info); - page_pool_release_page(rq->page_pool, dma_info->page); - put_page(dma_info->page); + mlx5e_page_dma_unmap(rq, page); + page_pool_release_page(rq->page_pool, page); + put_page(page); } } @@ -334,7 +336,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq, */ xsk_buff_free(dma_info->xsk); else - mlx5e_page_release_dynamic(rq, dma_info, recycle); + mlx5e_page_release_dynamic(rq, dma_info->page, recycle); } static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, @@ -1544,7 +1546,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, net_prefetchw(va); /* xdp_frame data area */ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); - if (mlx5e_xdp_handle(rq, di, prog, &xdp)) + if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) return NULL; /* page/packet was consumed by XDP */ rx_headroom = xdp.data - xdp.data_hard_start; @@ -1632,7 +1634,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, di = head_wi->di; prog = rcu_dereference(rq->xdp_prog); - if (prog && mlx5e_xdp_handle(rq, di, prog, &xdp)) { + if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) { if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { int i; @@ -1934,7 +1936,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, net_prefetchw(va); /* xdp_frame data area */ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); - if (mlx5e_xdp_handle(rq, di, prog, &xdp)) { + if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ return NULL; /* page/packet was consumed by XDP */ |