aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@nvidia.com>2022-01-26 17:43:33 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2022-03-18 13:51:11 -0700
commit4e8231f1c22d36bbf1eed20b2a54609f2e4c49ed (patch)
treeb8e00ed21e845eba7acc1598f548b8d1fa0aaf55 /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
parentaf_vsock: SOCK_SEQPACKET broken buffer test (diff)
downloadwireguard-linux-4e8231f1c22d36bbf1eed20b2a54609f2e4c49ed.tar.xz
wireguard-linux-4e8231f1c22d36bbf1eed20b2a54609f2e4c49ed.zip
net/mlx5e: Prepare non-linear legacy RQ for XDP multi buffer support
mlx5e_skb_from_cqe_nonlinear creates an xdp_buff first, putting the first fragment as the linear part, and the rest of fragments as fragments to struct skb_shared_info in the tailroom. Then it creates an SKB in place, based on the xdp_buff. The XDP program is not called in this commit yet. This commit contains no functional change, except the SKB is built over the whole frag_stride of the first fragment, instead of the minimal size required (headroom, data and skb_shared_info). Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c75
1 files changed, 61 insertions, 14 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 4b8699f39200..dd8ff62e1693 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1567,45 +1567,92 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_dma_info *di = wi->di;
+ struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
- u32 first_frag_size;
+ struct xdp_buff xdp;
struct sk_buff *skb;
+ u32 truesize;
void *va;
va = page_address(di->page) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- first_frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + frag_consumed_bytes);
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
- first_frag_size, DMA_FROM_DEVICE);
+ rq->buff.frame0_sz, DMA_FROM_DEVICE);
net_prefetch(va + rx_headroom);
- /* XDP is not supported in this configuration, as incoming packets
- * might spread among multiple pages.
- */
- skb = mlx5e_build_linear_skb(rq, va, first_frag_size, rx_headroom,
- frag_consumed_bytes, 0);
- if (unlikely(!skb))
- return NULL;
-
- page_ref_inc(di->page);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
while (cqe_bcnt) {
+ skb_frag_t *frag;
+
+ di = wi->di;
+
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset,
- frag_consumed_bytes, frag_info->frag_stride);
+ dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
+ frag_consumed_bytes, DMA_FROM_DEVICE);
+
+ if (!xdp_buff_has_frags(&xdp)) {
+ /* Init on the first fragment to avoid cold cache access
+ * when possible.
+ */
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp);
+ }
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ __skb_frag_set_page(frag, di->page);
+ skb_frag_off_set(frag, wi->offset);
+ skb_frag_size_set(frag, frag_consumed_bytes);
+
+ if (page_is_pfmemalloc(di->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp);
+
+ sinfo->xdp_frags_size += frag_consumed_bytes;
+ truesize += frag_info->frag_stride;
+
cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
}
+ di = head_wi->di;
+
+ skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
+ xdp.data - xdp.data_hard_start,
+ xdp.data_end - xdp.data,
+ xdp.data - xdp.data_meta);
+ if (unlikely(!skb))
+ return NULL;
+
+ page_ref_inc(di->page);
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ int i;
+
+ /* sinfo->nr_frags is reset by build_skb, calculate again. */
+ xdp_update_skb_shared_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_is_frag_pfmemalloc(&xdp));
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+
return skb;
}