diff options
author | Toke Høiland-Jørgensen <toke@redhat.com> | 2023-01-19 14:15:35 -0800 |
---|---|---|
committer | Martin KaFai Lau <martin.lau@kernel.org> | 2023-01-23 09:58:23 -0800 |
commit | bc8d405b1ba92b9a71f9a94f3c08906e9d8c452c (patch) | |
tree | d30b2426574d7400125583cf0dff86a471a1795b /drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c | |
parent | net/mlx5e: Introduce wrapper for xdp_buff (diff) | |
download | linux-bc8d405b1ba92b9a71f9a94f3c08906e9d8c452c.tar.xz linux-bc8d405b1ba92b9a71f9a94f3c08906e9d8c452c.zip |
net/mlx5e: Support RX XDP metadata
Support RX hash and timestamp metadata kfuncs. We need to pass in the cqe
pointer to the mlx5e_skb_from* functions so it can be retrieved from the
XDP ctx to do this.
Cc: Tariq Toukan <tariqt@nvidia.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Willem de Bruijn <willemb@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Anatoly Burakov <anatoly.burakov@intel.com>
Cc: Alexander Lobakin <alexandr.lobakin@intel.com>
Cc: Magnus Karlsson <magnus.karlsson@gmail.com>
Cc: Maryam Tahhan <mtahhan@redhat.com>
Cc: xdp-hints@xdp-project.net
Cc: netdev@vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20230119221536.3349901-17-sdf@google.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c | 14 |
1 files changed, 14 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index 08d4e5c30b40..b7c84ebe8418 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -52,25 +52,30 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) { for (i = 0; i < batch; i++) { + struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk); dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); umr_wqe->inline_mtts[i] = (struct mlx5_mtt) { .ptag = cpu_to_be64(addr | MLX5_EN_WR), }; + mxbuf->rq = rq; } } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) { for (i = 0; i < batch; i++) { + struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk); dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { .key = rq->mkey_be, .va = cpu_to_be64(addr), }; + mxbuf->rq = rq; } } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) { u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2); for (i = 0; i < batch; i++) { + struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk); dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) { @@ -89,6 +94,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) .key = rq->mkey_be, .va = cpu_to_be64(rq->wqe_overflow.addr), }; + mxbuf->rq = rq; } } else { __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) - @@ -96,6 +102,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size); for (i = 0; i < batch; i++) { + struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk); dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) { @@ -108,6 +115,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) .va = cpu_to_be64(rq->wqe_overflow.addr), .bcount = pad_size, }; + mxbuf->rq = rq; } } @@ -238,6 +246,7 @@ static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_b struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) @@ -258,6 +267,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, */ WARN_ON_ONCE(head_offset); + /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */ + mxbuf->cqe = cqe; xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt); xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool); net_prefetch(mxbuf->xdp.data); @@ -292,6 +303,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, + struct mlx5_cqe64 *cqe, u32 cqe_bcnt) { struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk); @@ -304,6 +316,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, */ WARN_ON_ONCE(wi->offset); + /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */ + mxbuf->cqe = cqe; xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt); xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool); net_prefetch(mxbuf->xdp.data); |