aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>2021-07-29 14:26:22 -0400
committerJason Gunthorpe <jgg@nvidia.com>2021-08-02 12:45:22 -0300
commitdb4657afd10e45855ac1d8437fcc9a86bd3d741d (patch)
tree116e50fdcc3af066f6954a3ca15dc960d754271a /drivers/infiniband/core
parentRDMA/mlx5: Delay emptying a cache entry when a new MR is added to it recently (diff)
downloadlinux-dev-db4657afd10e45855ac1d8437fcc9a86bd3d741d.tar.xz
linux-dev-db4657afd10e45855ac1d8437fcc9a86bd3d741d.zip
RDMA/cma: Revert INIT-INIT patch
The net/sunrpc/xprtrdma module creates its QP using rdma_create_qp() and immediately post receives, implicitly assuming the QP is in the INIT state and thus valid for ib_post_recv(). The patch noted in Fixes: removed the RESET->INIT modifiy from rdma_create_qp(), breaking NFS rdma for verbs providers that fail the ib_post_recv() for a bad state. This situation was proven using kprobes in rvt_post_recv() and rvt_modify_qp(). The traces showed that the rvt_post_recv() failed before ANY modify QP and that the current state was RESET. Fix by reverting the patch below. Fixes: dc70f7c3ed34 ("RDMA/cma: Remove unnecessary INIT->INIT transition") Link: https://lore.kernel.org/r/1627583182-81330-1-git-send-email-mike.marciniszyn@cornelisnetworks.com Cc: Haakon Bugge <haakon.bugge@oracle.com> Cc: Chuck Lever III <chuck.lever@oracle.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/cma.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 515a7e95a421..5d3b8b8d163d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
return ret;
}
+static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
+ if (ret)
+ return ret;
+
+ return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+}
+
int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
struct rdma_id_private *id_priv;
struct ib_qp *qp;
- int ret = 0;
+ int ret;
id_priv = container_of(id, struct rdma_id_private, id);
if (id->device != pd->device) {
@@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
if (id->qp_type == IB_QPT_UD)
ret = cma_init_ud_qp(id_priv, qp);
+ else
+ ret = cma_init_conn_qp(id_priv, qp);
if (ret)
goto out_destroy;