aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet/mellanox/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c15
4 files changed, 30 insertions, 18 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 34cba97f7bf4..cede5bdfd598 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
}
cmd->ent_arr[ent->idx] = ent;
- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
@@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ /* no doorbell, no need to keep the entry */
+ free_ent(cmd, ent->idx);
+ if (ent->callback)
+ free_cmd(ent);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2de54d865dc8..1eac7a53d56f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1773,19 +1773,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
- int err = mlx5e_init_rep_rx(priv);
-
- if (err)
- return err;
-
mlx5e_create_q_counters(priv);
- return 0;
+ return mlx5e_init_rep_rx(priv);
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
- mlx5e_destroy_q_counters(priv);
mlx5e_cleanup_rep_rx(priv);
+ mlx5e_destroy_q_counters(priv);
}
static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 781c1d184b60..57ac2ef52e80 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1550,9 +1550,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
MLX5_FLOW_NAMESPACE_KERNEL, 1,
modact);
if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
esw_warn(dev, "Failed to create restore mod header, err: %d\n",
err);
- err = PTR_ERR(mod_hdr);
goto err_mod_hdr;
}
@@ -2219,10 +2219,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
+ mutex_init(&esw->fdb_table.offloads.vports.lock);
+ hash_init(esw->fdb_table.offloads.vports.table);
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
- return err;
+ goto create_acl_err;
err = esw_create_offloads_table(esw, total_vports);
if (err)
@@ -2240,9 +2242,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_fg_err;
- mutex_init(&esw->fdb_table.offloads.vports.lock);
- hash_init(esw->fdb_table.offloads.vports.table);
-
return 0;
create_fg_err:
@@ -2253,18 +2252,19 @@ create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
-
+create_acl_err:
+ mutex_destroy(&esw->fdb_table.offloads.vports.lock);
return err;
}
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
- mutex_destroy(&esw->fdb_table.offloads.vports.lock);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
+ mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
static void
@@ -2377,9 +2377,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err_vports:
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
err_uplink:
- esw_set_passing_vport_metadata(esw, false);
-err_steering_init:
esw_offloads_steering_cleanup(esw);
+err_steering_init:
+ esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index c4ed25bb9ac8..b8d97d44be7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -693,6 +693,12 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
+static void dr_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar,
size_t ncqe)
@@ -753,6 +759,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
+ cq->mcq.comp = dr_cq_complete;
+
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in);
@@ -763,7 +771,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
*cq->mcq.set_ci_db = 0;
- *cq->mcq.arm_db = 0;
+
+ /* set no-zero value, in order to avoid the HW to run db-recovery on
+ * CQ that used in polling mode.
+ */
+ *cq->mcq.arm_db = cpu_to_be32(2 << 28);
+
cq->mcq.vector = 0;
cq->mcq.irqn = irqn;
cq->mcq.uar = uar;