aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
authorMark Bloch <markb@mellanox.com>2018-09-17 13:30:49 +0300
committerDoug Ledford <dledford@redhat.com>2018-09-21 20:20:59 -0400
commit0042f9e458a560e13c1da2211cf6429e0c7dd812 (patch)
tree897ace30aed825ae85fe80034c4f27dc7e889d4c /drivers/infiniband/hw/mlx5/qp.c
parentRDMA/mlx5: Allow creating RAW ethernet QP with loopback support (diff)
downloadlinux-dev-0042f9e458a560e13c1da2211cf6429e0c7dd812.tar.xz
linux-dev-0042f9e458a560e13c1da2211cf6429e0c7dd812.zip
RDMA/mlx5: Enable vport loopback when user context or QP mandate
A user can create a QP which can accept loopback traffic, but that's not enough. We need to enable loopback on the vport as well. Currently vport loopback is enabled only when more than 1 users are using the IB device, update the logic to consider whatever a QP which supports loopback was created, if so enable vport loopback even if there is only a single user. Signed-off-by: Mark Bloch <markb@mellanox.com> Reviewed-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to '')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c34
1 files changed, 27 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f29ae401b232..fcaa5c4d6feb 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1256,6 +1256,16 @@ static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
}
+static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq,
+ u32 qp_flags_en)
+{
+ if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_core_destroy_tir(dev->mdev, rq->tirn);
+}
+
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn,
u32 *qp_flags_en)
@@ -1293,17 +1303,17 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ destroy_raw_packet_qp_tir(dev, rq, 0);
+ }
kvfree(in);
return err;
}
-static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq)
-{
- mlx5_core_destroy_tir(dev->mdev, rq->tirn);
-}
-
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in, size_t inlen,
struct ib_pd *pd)
@@ -1372,7 +1382,7 @@ static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
if (qp->rq.wqe_cnt) {
- destroy_raw_packet_qp_tir(dev, rq);
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en);
destroy_raw_packet_qp_rq(dev, rq);
}
@@ -1396,6 +1406,9 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
+ if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
}
@@ -1606,6 +1619,13 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
create_tir:
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
+ }
+
if (err)
goto err;