aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2020-09-07 15:09:18 +0300
committerJason Gunthorpe <jgg@nvidia.com>2020-09-09 14:14:29 -0300
commit43d781b9fa562f0c6e50f62c870fbfeb9dc85213 (patch)
tree6ee87ba81d8f7a5fde19e0f0665f905384a967c5 /drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
parentRDMA/core: Delete function indirection for alloc/free kernel CQ (diff)
downloadlinux-dev-43d781b9fa562f0c6e50f62c870fbfeb9dc85213.tar.xz
linux-dev-43d781b9fa562f0c6e50f62c870fbfeb9dc85213.zip
RDMA: Allow fail of destroy CQ
Like any other verbs objects, CQ shouldn't fail during destroy, but mlx5_ib didn't follow this contract with mixed IB verbs objects with DEVX. Such mix causes to the situation where FW and kernel are fully interdependent on the reference counting of each side. Kernel verbs and drivers that don't have DEVX flows shouldn't fail. Fixes: e39afe3d6dbd ("RDMA: Convert CQ allocations to be under core responsibility") Link: https://lore.kernel.org/r/20200907120921.476363-7-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c')
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 01cd122a8b69..32aede5a3381 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -235,7 +235,7 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
* @cq: the completion queue to destroy.
* @udata: user data or null for kernel object
*/
-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct pvrdma_cq *vcq = to_vcq(cq);
union pvrdma_cmd_req req;
@@ -261,6 +261,7 @@ void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
pvrdma_free_cq(dev, vcq);
atomic_dec(&dev->num_cqs);
+ return 0;
}
static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)