aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cma.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2020-10-26 11:25:49 -0300
committerJason Gunthorpe <jgg@nvidia.com>2020-10-28 09:14:49 -0300
commit071ba4cc559de47160761b9500b72e8fa09d923d (patch)
tree0433f309f2aae8e868f09260cd1647671236af85 /drivers/infiniband/core/cma.c
parentRDMA/uverbs: Fix false error in query gid IOCTL (diff)
downloadlinux-dev-071ba4cc559de47160761b9500b72e8fa09d923d.tar.xz
linux-dev-071ba4cc559de47160761b9500b72e8fa09d923d.zip
RDMA: Add rdma_connect_locked()
There are two flows for handling RDMA_CM_EVENT_ROUTE_RESOLVED, either the handler triggers a completion and another thread does rdma_connect() or the handler directly calls rdma_connect(). In all cases rdma_connect() needs to hold the handler_mutex, but when handler's are invoked this is already held by the core code. This causes ULPs using the 2nd method to deadlock. Provide a rdma_connect_locked() and have all ULPs call it from their handlers. Link: https://lore.kernel.org/r/0-v2-53c22d5c1405+33-rdma_connect_locking_jgg@nvidia.com Reported-and-tested-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com> Fixes: 2a7cec538169 ("RDMA/cma: Fix locking for the RDMA_CM_CONNECT state") Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Acked-by: Jack Wang <jinpu.wang@cloud.ionos.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r--drivers/infiniband/core/cma.c48
1 files changed, 38 insertions, 10 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 7c2ab1f2fbea..a77750b8954d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -405,10 +405,10 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
/*
* The FSM uses a funny double locking where state is protected by both
* the handler_mutex and the spinlock. State is not allowed to change
- * away from a handler_mutex protected value without also holding
+ * to/from a handler_mutex protected value without also holding
* handler_mutex.
*/
- if (comp == RDMA_CM_CONNECT)
+ if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
lockdep_assert_held(&id_priv->handler_mutex);
spin_lock_irqsave(&id_priv->lock, flags);
@@ -4038,17 +4038,23 @@ out:
return ret;
}
-int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+/**
+ * rdma_connect_locked - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Same as rdma_connect() but can only be called from the
+ * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
+ */
+int rdma_connect_locked(struct rdma_cm_id *id,
+ struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
int ret;
- mutex_lock(&id_priv->handler_mutex);
- if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) {
- ret = -EINVAL;
- goto err_unlock;
- }
+ if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
+ return -EINVAL;
if (!id->qp) {
id_priv->qp_num = conn_param->qp_num;
@@ -4066,11 +4072,33 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
ret = -ENOSYS;
if (ret)
goto err_state;
- mutex_unlock(&id_priv->handler_mutex);
return 0;
err_state:
cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
-err_unlock:
+ return ret;
+}
+EXPORT_SYMBOL(rdma_connect_locked);
+
+/**
+ * rdma_connect - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Users must have resolved a route for the rdma_cm_id to connect with by having
+ * called rdma_resolve_route before calling this routine.
+ *
+ * This call will either connect to a remote QP or obtain remote QP information
+ * for unconnected rdma_cm_id's. The actual operation is based on the
+ * rdma_cm_id's port space.
+ */
+int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ int ret;
+
+ mutex_lock(&id_priv->handler_mutex);
+ ret = rdma_connect_locked(id, conn_param);
mutex_unlock(&id_priv->handler_mutex);
return ret;
}