diff options
author | 2025-03-19 16:02:59 +0200 | |
---|---|---|
committer | 2025-03-25 07:29:47 -0700 | |
commit | 16ad8394bf310791d2ad171e266b639b6b265fc5 (patch) | |
tree | b19d63acd42dfbcb531d69e4b344f4e2eb23dbcf | |
parent | net/mlx5e: TX, Utilize WQ fragments edge for multi-packet WQEs (diff) | |
download | wireguard-linux-16ad8394bf310791d2ad171e266b639b6b265fc5.tar.xz wireguard-linux-16ad8394bf310791d2ad171e266b639b6b265fc5.zip |
net/mlx5: Lag, use port selection tables when available
As queue affinity is being deprecated and will no longer be supported
in the future, Always check for the presence of the port selection
namespace. When available, leverage it to distribute traffic
across the physical ports via steering, ensuring compatibility with
future NICs.
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1742392983-153050-2-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 38 |
1 files changed, 9 insertions, 29 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index ba41dd149f53..7db5ca95d322 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -583,8 +583,9 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, } } -static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, - unsigned long *flags) +static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, + enum mlx5_lag_mode mode, + unsigned long *flags) { int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_core_dev *dev0; @@ -592,7 +593,12 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, if (first_idx < 0) return -EINVAL; + if (mode == MLX5_LAG_MODE_MPESW || + mode == MLX5_LAG_MODE_MULTIPATH) + return 0; + dev0 = ldev->pf[first_idx].dev; + if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) { if (ldev->ports > 2) return -EINVAL; @@ -607,32 +613,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, return 0; } -static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, - struct lag_tracker *tracker, - enum mlx5_lag_mode mode, - unsigned long *flags) -{ - int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); - struct lag_func *dev0; - - if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW) - return; - - dev0 = &ldev->pf[first_idx]; - if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && - tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) { - if (ldev->ports > 2) - ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS; - set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); - } -} - static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, struct lag_tracker *tracker, bool shared_fdb, unsigned long *flags) { - bool roce_lag = mode == MLX5_LAG_MODE_ROCE; - *flags = 0; if (shared_fdb) { set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags); @@ -642,11 +626,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, if (mode == MLX5_LAG_MODE_MPESW) set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags); - if (roce_lag) - return mlx5_lag_set_port_sel_mode_roce(ldev, flags); - - mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags); - return 0; + return mlx5_lag_set_port_sel_mode(ldev, mode, flags); } char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) |