aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@nvidia.com>2022-01-27 17:01:05 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2022-03-18 13:51:12 -0700
commitea5d49bdae8b4c9dcdac574eef96b1bd47000c2a (patch)
tree3ce3ffde92a4ce8296f1f46497cd88e5035d8334 /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
parentnet/mlx5e: Use page-sized fragments with XDP multi buffer (diff)
downloadwireguard-linux-ea5d49bdae8b4c9dcdac574eef96b1bd47000c2a.tar.xz
wireguard-linux-ea5d49bdae8b4c9dcdac574eef96b1bd47000c2a.zip
net/mlx5e: Add XDP multi buffer support to the non-linear legacy RQ
This commit adds XDP multi buffer support to the RX path in the non-linear legacy RQ mode. mlx5e_xdp_handle is called from mlx5e_skb_from_cqe_nonlinear. XDP_TX action for fragmented XDP frames is not yet supported and blocked. Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index dd8ff62e1693..e9ad5b3a30ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1572,6 +1572,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_dma_info *di = wi->di;
struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
+ struct bpf_prog *prog;
struct xdp_buff xdp;
struct sk_buff *skb;
u32 truesize;
@@ -1582,6 +1583,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
rq->buff.frame0_sz, DMA_FROM_DEVICE);
+ net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
@@ -1629,6 +1631,17 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
di = head_wi->di;
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog && mlx5e_xdp_handle(rq, di, prog, &xdp)) {
+ if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+ int i;
+
+ for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
+ mlx5e_put_rx_frag(rq, &head_wi[i], true);
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
xdp.data - xdp.data_hard_start,
xdp.data_end - xdp.data,