aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2011-05-26 23:06:44 -0700
committerRoland Dreier <roland@purestorage.com>2011-10-13 09:20:27 -0700
commitd3d72d909e783d048ee39046aa7b4fa798a4dda8 (patch)
treeb773014148c09565b02ad542295b5c6caa7fd498 /drivers/infiniband/core
parentRDMA/core: Add XRC QPs (diff)
downloadlinux-dev-d3d72d909e783d048ee39046aa7b4fa798a4dda8.tar.xz
linux-dev-d3d72d909e783d048ee39046aa7b4fa798a4dda8.zip
RDMA/verbs: Cleanup XRC TGT QPs when destroying XRCD
XRC TGT QPs are intended to be shared among multiple users and processes. Allow the destruction of an XRC TGT QP to be done explicitly through ib_destroy_qp() or when the XRCD is destroyed. To support destroying an XRC TGT QP, we need to track TGT QPs with the XRCD. When the XRCD is destroyed, all tracked XRC TGT QPs are also cleaned up. To avoid stale reference issues, if a user is holding a reference on a TGT QP, we increment a reference count on the QP. The user releases the reference by calling ib_release_qp. This releases any access to the QP from a user above verbs, but allows the QP to continue to exist until destroyed by the XRCD. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/verbs.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 89277e5129be..8c6da5bda4c6 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -316,6 +316,20 @@ EXPORT_SYMBOL(ib_destroy_srq);
/* Queue pairs */
+static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
+{
+ mutex_lock(&xrcd->tgt_qp_mutex);
+ list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
+ mutex_unlock(&xrcd->tgt_qp_mutex);
+}
+
+static void __ib_remove_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
+{
+ mutex_lock(&xrcd->tgt_qp_mutex);
+ list_del(&qp->xrcd_list);
+ mutex_unlock(&xrcd->tgt_qp_mutex);
+}
+
struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
@@ -334,6 +348,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->srq = NULL;
qp->xrcd = qp_init_attr->xrcd;
atomic_inc(&qp_init_attr->xrcd->usecnt);
+ __ib_insert_xrcd_qp(qp_init_attr->xrcd, qp);
} else {
if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
qp->recv_cq = NULL;
@@ -730,6 +745,8 @@ int ib_destroy_qp(struct ib_qp *qp)
rcq = qp->recv_cq;
srq = qp->srq;
xrcd = qp->xrcd;
+ if (xrcd)
+ __ib_remove_xrcd_qp(xrcd, qp);
ret = qp->device->destroy_qp(qp);
if (!ret) {
@@ -743,12 +760,30 @@ int ib_destroy_qp(struct ib_qp *qp)
atomic_dec(&srq->usecnt);
if (xrcd)
atomic_dec(&xrcd->usecnt);
+ } else if (xrcd) {
+ __ib_insert_xrcd_qp(xrcd, qp);
}
return ret;
}
EXPORT_SYMBOL(ib_destroy_qp);
+int ib_release_qp(struct ib_qp *qp)
+{
+ unsigned long flags;
+
+ if (qp->qp_type != IB_QPT_XRC_TGT)
+ return -EINVAL;
+
+ spin_lock_irqsave(&qp->device->event_handler_lock, flags);
+ qp->event_handler = NULL;
+ spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
+
+ atomic_dec(&qp->xrcd->usecnt);
+ return 0;
+}
+EXPORT_SYMBOL(ib_release_qp);
+
/* Completion queues */
struct ib_cq *ib_create_cq(struct ib_device *device,
@@ -1062,6 +1097,8 @@ struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
if (!IS_ERR(xrcd)) {
xrcd->device = device;
atomic_set(&xrcd->usecnt, 0);
+ mutex_init(&xrcd->tgt_qp_mutex);
+ INIT_LIST_HEAD(&xrcd->tgt_qp_list);
}
return xrcd;
@@ -1070,9 +1107,19 @@ EXPORT_SYMBOL(ib_alloc_xrcd);
int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
+ struct ib_qp *qp;
+ int ret;
+
if (atomic_read(&xrcd->usecnt))
return -EBUSY;
+ while (!list_empty(&xrcd->tgt_qp_list)) {
+ qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
+ ret = ib_destroy_qp(qp);
+ if (ret)
+ return ret;
+ }
+
return xrcd->device->dealloc_xrcd(xrcd);
}
EXPORT_SYMBOL(ib_dealloc_xrcd);