aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@mellanox.com>2019-08-07 17:46:15 +0300
committerSaeed Mahameed <saeedm@mellanox.com>2019-08-28 11:49:03 -0700
commit45f171b1182b9c4ab6d854d6f7fd7dd771fed591 (patch)
treef43c3c30cd36dd10537fe3b29ae9c30d8cbb6008 /drivers/net/ethernet/mellanox/mlx5/core/en_main.c
parentnet/mlx5e: Expose new function for TIS destroy loop (diff)
downloadlinux-dev-45f171b1182b9c4ab6d854d6f7fd7dd771fed591.tar.xz
linux-dev-45f171b1182b9c4ab6d854d6f7fd7dd771fed591.zip
net/mlx5e: Support LAG TX port affinity distribution
When the VF LAG is in use, round-robin the TX affinity of channels among the different ports, if supported by the firmware. Create a set of TISes per port, while doing round-robin of the channels over the different sets. Let all SQs of a channel share the same set of TISes. If lag_tx_port_affinity HCA cap bit is supported, num_lag_ports > 1 and we aren't the LACP owner (PF in the regular use), assign the affinities, otherwise use tx_affinity == 0 in TIS context to let the FW assign the affinities itself. The TISes of the LACP owner are mapped only to the native physical port. For VFs, the starting port for round-robin is determined by its vhca_id, because a VF may have only one channel if attached to a single-core VM. Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com> Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Mark Bloch <markb@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_main.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c54
1 files changed, 38 insertions, 16 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 390e614ac46e..2786f5b8057d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1442,7 +1442,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
return err;
csp.tis_lst_sz = 1;
- csp.tisn = c->priv->tisn[0]; /* tc = 0 */
+ csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
@@ -1692,7 +1692,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * priv->max_nch;
- err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
+ err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->sq, &c->sq[tc], tc);
if (err)
goto err_close_sqs;
@@ -1926,6 +1926,13 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_cq(&c->icosq.cq);
}
+static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
+{
+ u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
+
+ return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -1960,6 +1967,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
c->irq_desc = irq_to_desc(irq);
+ c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
err = mlx5e_alloc_xps_cpumask(c, params);
if (err)
@@ -3181,35 +3189,49 @@ void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
void mlx5e_destroy_tises(struct mlx5e_priv *priv)
{
- int tc;
+ int tc, i;
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
+}
+
+static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
}
int mlx5e_create_tises(struct mlx5e_priv *priv)
{
+ int tc, i;
int err;
- int tc;
- for (tc = 0; tc < priv->profile->max_tc; tc++) {
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
+ for (tc = 0; tc < priv->profile->max_tc; tc++) {
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- MLX5_SET(tisc, tisc, prio, tc << 1);
+ MLX5_SET(tisc, tisc, prio, tc << 1);
- err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]);
- if (err)
- goto err_close_tises;
+ if (mlx5e_lag_should_assign_affinity(priv->mdev))
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
+
+ err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
+ if (err)
+ goto err_close_tises;
+ }
}
return 0;
err_close_tises:
- for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ for (; i >= 0; i--) {
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
+ tc = priv->profile->max_tc;
+ }
return err;
}