aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_main.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c176
1 files changed, 143 insertions, 33 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index f33c38629886..b9a175982801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -65,6 +65,7 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "qos.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -1143,7 +1144,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -1233,6 +1233,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p)
{
+ u64 bitmask = 0;
void *in;
void *sqc;
int inlen;
@@ -1248,9 +1249,14 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
MLX5_SET(sqc, sqc, state, p->next_state);
if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
- MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
- MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
+ bitmask |= 1;
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
}
+ if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
+ bitmask |= 1 << 2;
+ MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
+ }
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
err = mlx5_core_modify_sq(mdev, sqn, in);
@@ -1267,6 +1273,7 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
+ u16 qos_queue_group_id,
u32 *sqn)
{
struct mlx5e_modify_sq_param msp = {0};
@@ -1278,6 +1285,10 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
msp.curr_state = MLX5_SQC_STATE_RST;
msp.next_state = MLX5_SQC_STATE_RDY;
+ if (qos_queue_group_id) {
+ msp.qos_update = true;
+ msp.qos_queue_group_id = qos_queue_group_id;
+ }
err = mlx5e_modify_sq(mdev, *sqn, &msp);
if (err)
mlx5e_destroy_sq(mdev, *sqn);
@@ -1288,13 +1299,9 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
static int mlx5e_set_sq_maxrate(struct net_device *dev,
struct mlx5e_txqsq *sq, u32 rate);
-static int mlx5e_open_txqsq(struct mlx5e_channel *c,
- u32 tisn,
- int txq_ix,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq,
- int tc)
+int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
+ struct mlx5e_params *params, struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@ -1304,12 +1311,17 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
if (err)
return err;
+ if (qos_queue_group_id)
+ sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
+ else
+ sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+
csp.tisn = tisn;
csp.tis_lst_sz = 1;
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
@@ -1366,7 +1378,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
}
}
-static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
+void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
@@ -1403,7 +1415,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -1452,7 +1464,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
@@ -1703,7 +1715,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
int txq_ix = c->ix + tc * params->num_channels;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc);
+ params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
if (err)
goto err_close_sqs;
}
@@ -2044,6 +2056,8 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
+
+ mlx5e_qos_deactivate_queues(c);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2051,6 +2065,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
+ mlx5e_qos_close_queues(c);
netif_napi_del(&c->napi);
kvfree(c);
@@ -2198,9 +2213,8 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
}
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
+void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -2379,10 +2393,18 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
+ err = mlx5e_qos_open_queues(priv, chs);
+ if (err)
+ goto err_close_ptp;
+
mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
+err_close_ptp:
+ if (chs->port_ptp)
+ mlx5e_port_ptp_close(chs->port_ptp);
+
err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
@@ -2915,11 +2937,31 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
+int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
+{
+ int qos_queues, nch, ntc, num_txqs, err;
+
+ qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+
+ nch = priv->channels.params.num_channels;
+ ntc = priv->channels.params.num_tc;
+ num_txqs = nch * ntc + qos_queues;
+ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
+ num_txqs += ntc;
+
+ mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
+ err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
+ if (err)
+ netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+
+ return err;
+}
+
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
- int num_txqs, num_rxqs, nch, ntc;
int old_num_txqs, old_ntc;
+ int num_rxqs, nch, ntc;
int err;
old_num_txqs = netdev->real_num_tx_queues;
@@ -2927,18 +2969,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
- num_txqs = nch * ntc;
- if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
- num_txqs += ntc;
num_rxqs = nch * priv->profile->rq_groups;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
- err = netif_set_real_num_tx_queues(netdev, num_txqs);
- if (err) {
- netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+ err = mlx5e_update_tx_netdev_queues(priv);
+ if (err)
goto err_tcs;
- }
err = netif_set_real_num_rx_queues(netdev, num_rxqs);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
@@ -3042,6 +3079,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
+ mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
netif_tx_start_all_queues(priv->netdev);
@@ -3608,6 +3646,14 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ /* MQPRIO is another toplevel qdisc that can't be attached
+ * simultaneously with the offloaded HTB.
+ */
+ if (WARN_ON(priv->htb.maj_id)) {
+ err = -EINVAL;
+ goto out;
+ }
+
new_channels.params = priv->channels.params;
new_channels.params.num_tc = tc ? tc : 1;
@@ -3628,12 +3674,55 @@ out:
return err;
}
+static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
+{
+ int res;
+
+ switch (htb->command) {
+ case TC_HTB_CREATE:
+ return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
+ htb->extack);
+ case TC_HTB_DESTROY:
+ return mlx5e_htb_root_del(priv);
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
+ htb->rate, htb->ceil, htb->extack);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
+ htb->rate, htb->ceil, htb->extack);
+ case TC_HTB_LEAF_DEL:
+ return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid,
+ htb->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return mlx5e_htb_leaf_del_last(priv, htb->classid,
+ htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb->extack);
+ case TC_HTB_NODE_MODIFY:
+ return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
+ htb->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = mlx5e_get_txq_by_classid(priv, htb->classid);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
switch (type) {
case TC_SETUP_BLOCK: {
@@ -3647,6 +3736,11 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data);
+ case TC_SETUP_QDISC_HTB:
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_setup_tc_htb(priv, type_data);
+ mutex_unlock(&priv->state_lock);
+ return err;
default:
return -EOPNOTSUPP;
}
@@ -3811,20 +3905,25 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
+#endif
+
+ if (!enable && priv->htb.maj_id) {
+ netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
return 0;
}
-#endif
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
@@ -3922,9 +4021,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
-#endif
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -5028,6 +5125,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
+ if (mlx5_qos_is_supported(mdev))
+ netdev->features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
@@ -5333,6 +5432,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
return -ENOMEM;
mutex_init(&priv->state_lock);
+ hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5355,8 +5455,14 @@ err_free_cpumask:
void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
{
+ int i;
+
destroy_workqueue(priv->wq);
free_cpumask_var(priv->scratchpad.cpumask);
+
+ for (i = 0; i < priv->htb.max_qos_sqs; i++)
+ kfree(priv->htb.qos_sq_stats[i]);
+ kvfree(priv->htb.qos_sq_stats);
}
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
@@ -5366,13 +5472,17 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
{
struct net_device *netdev;
unsigned int ptp_txqs = 0;
+ int qos_sqs = 0;
int err;
if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
ptp_txqs = profile->max_tc;
+ if (mlx5_qos_is_supported(mdev))
+ qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
+
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
- nch * profile->max_tc + ptp_txqs,
+ nch * profile->max_tc + ptp_txqs + qos_sqs,
nch * profile->rq_groups);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");