aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
diff options
context:
space:
mode:
authorDragos Tatulea <dtatulea@nvidia.com>2023-01-27 16:58:52 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2023-03-28 13:43:57 -0700
commit8fb1814f58f691373c692df75b761668069e197a (patch)
treeac3b4e88d7a5dd3c19f665a6567cf3a2fe1d6fee /drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
parentnet/mlx5e: RX, Remove mlx5e_alloc_unit argument in page allocation (diff)
downloadlinux-8fb1814f58f691373c692df75b761668069e197a.tar.xz
linux-8fb1814f58f691373c692df75b761668069e197a.zip
net/mlx5e: RX, Remove alloc unit layout constraint for legacy rq
The mlx5e_alloc_unit union is conveniently used to store arrays of pointers to struct page or struct xdp_buff (for xsk). The union is currently expected to have the size of a pointer for xsk batch allocations to work. This is conveniet for the current state of the code but makes it impossible to add a structure of a different size to the alloc unit. A further patch in the series will add the mlx5e_frag_page struct for which the described size constraint will no longer hold. This change removes the usage of mlx5e_alloc_unit union for legacy rq: - A union of arrays is introduced (mlx5e_alloc_units) to replace the array of unions to allow structures of different sizes. - Each fragment has a pointer to a unit in the mlx5e_alloc_units array. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index fab787600459..8a5ae80e6142 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -163,13 +163,10 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
u32 contig, alloc;
int i;
- /* mlx5e_init_frags_partition creates a 1:1 mapping between
- * rq->wqe.frags and rq->wqe.alloc_units, which allows us to
- * allocate XDP buffers straight into alloc_units.
+ /* Each rq->wqe.frags->xskp is 1:1 mapped to an element inside the
+ * rq->wqe.alloc_units->xsk_buffs array allocated here.
*/
- BUILD_BUG_ON(sizeof(rq->wqe.alloc_units[0]) !=
- sizeof(rq->wqe.alloc_units[0].xsk));
- buffs = (struct xdp_buff **)rq->wqe.alloc_units;
+ buffs = rq->wqe.alloc_units->xsk_buffs;
contig = mlx5_wq_cyc_get_size(wq) - ix;
if (wqe_bulk <= contig) {
alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk);
@@ -189,7 +186,7 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
/* Assumes log_num_frags == 0. */
frag = &rq->wqe.frags[j];
- addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ addr = xsk_buff_xdp_get_frame_dma(*frag->xskp);
wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
}
@@ -211,11 +208,11 @@ int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
/* Assumes log_num_frags == 0. */
frag = &rq->wqe.frags[j];
- frag->au->xsk = xsk_buff_alloc(rq->xsk_pool);
- if (unlikely(!frag->au->xsk))
+ *frag->xskp = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!*frag->xskp))
return i;
- addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ addr = xsk_buff_xdp_get_frame_dma(*frag->xskp);
wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
}
@@ -306,7 +303,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
- struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk);
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(*wi->xskp);
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the