aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
diff options
context:
space:
mode:
authorBodong Wang <bodong@mellanox.com>2019-02-12 22:55:36 -0800
committerSaeed Mahameed <saeedm@mellanox.com>2019-02-14 12:14:41 -0800
commit22e939a91dcb9bd4b773f2a0c0cb4eb016679b49 (patch)
tree77467ed5835670366f4ff3e111f1c9ac99032aa9 /drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
parentnet/mlx5: Introduce Mellanox SmartNIC and modify page management logic (diff)
downloadlinux-dev-22e939a91dcb9bd4b773f2a0c0cb4eb016679b49.tar.xz
linux-dev-22e939a91dcb9bd4b773f2a0c0cb4eb016679b49.zip
net/mlx5: Update enable HCA dependency
With the introduction of ECPF, we require that the ECPF driver will aways call enable/disable HCA for that PF in the same way a PF does this for its VFs. The PF is still responsible for calling enable and disable HCA for its VFs. To distinguish between the ECPF executing enable/disable HCA for itself or for the PF, it sets the embedded CPU function bit in the input params struct of these commands. When the bit is cleared and function ID is zero, it refers to the peer PF. Signed-off-by: Bodong Wang <bodong@mellanox.com> Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/ecpf.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c76
1 files changed, 76 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 28b8c5c5c8c7..1bcf8b8f9713 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -7,3 +7,79 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
}
+
+static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
+
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ MLX5_SET(enable_hca_in, in, function_id, 0);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+}
+
+static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
+
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ MLX5_SET(disable_hca_in, in, function_id, 0);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_peer_pf_enable_hca(dev);
+ if (err)
+ mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
+ err);
+
+ return err;
+}
+
+static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_peer_pf_disable_hca(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
+ err);
+ return;
+ }
+
+ err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
+ if (err)
+ mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
+ err);
+}
+
+int mlx5_ec_init(struct mlx5_core_dev *dev)
+{
+ int err = 0;
+
+ if (!mlx5_core_is_ecpf(dev))
+ return 0;
+
+ /* ECPF shall enable HCA for peer PF in the same way a PF
+ * does this for its VFs.
+ */
+ err = mlx5_peer_pf_init(dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_is_ecpf(dev))
+ return;
+
+ mlx5_peer_pf_cleanup(dev);
+}