aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@nvidia.com>2022-01-31 19:43:53 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2022-03-18 13:51:15 -0700
commita48ad58cec18702249a228267dec29d105bbe5a5 (patch)
tree988712a57fbe994468857db79a1f267fdb3c0531 /drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
parentnet/mlx5e: Unindent the else-block in mlx5e_xmit_xdp_buff (diff)
downloadlinux-dev-a48ad58cec18702249a228267dec29d105bbe5a5.tar.xz
linux-dev-a48ad58cec18702249a228267dec29d105bbe5a5.zip
net/mlx5e: Support multi buffer XDP_TX
This commit enables passing multi buffer XDP frames to the TX handlers on XDP_TX. Fragments are DMA synchronized to the device and queued to the xdpi_fifo for a subsequent unmapping. Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c39
1 files changed, 31 insertions, 8 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 368e54949614..f35b62ce4c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -59,20 +59,17 @@ static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct page *page, struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo = NULL;
struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
+ int i;
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return false;
- if (unlikely(xdp_frame_has_frags(xdpf))) {
- xdp_return_frame(xdpf);
- return false;
- }
-
xdptxd.data = xdpf->data;
xdptxd.len = xdpf->len;
@@ -117,19 +114,45 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
*/
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+ xdpi.page.rq = rq;
dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+ u32 len;
+
+ addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+ len = skb_frag_size(frag);
+ dma_sync_single_for_device(sq->pdev, addr, len,
+ DMA_TO_DEVICE);
+ }
+ }
+
xdptxd.dma_addr = dma_addr;
- xdpi.page.rq = rq;
- xdpi.page.page = page;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
return false;
+ xdpi.page.page = page;
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ xdpi.page.page = skb_frag_page(frag);
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ }
+ }
+
return true;
}