aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/efa/efa_verbs.c
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2020-09-07 15:09:18 +0300
committerJason Gunthorpe <jgg@nvidia.com>2020-09-09 14:14:29 -0300
commit43d781b9fa562f0c6e50f62c870fbfeb9dc85213 (patch)
tree6ee87ba81d8f7a5fde19e0f0665f905384a967c5 /drivers/infiniband/hw/efa/efa_verbs.c
parentRDMA/core: Delete function indirection for alloc/free kernel CQ (diff)
downloadlinux-43d781b9fa562f0c6e50f62c870fbfeb9dc85213.tar.xz
linux-43d781b9fa562f0c6e50f62c870fbfeb9dc85213.zip
RDMA: Allow fail of destroy CQ
Like any other verbs objects, CQ shouldn't fail during destroy, but mlx5_ib didn't follow this contract with mixed IB verbs objects with DEVX. Such mix causes to the situation where FW and kernel are fully interdependent on the reference counting of each side. Kernel verbs and drivers that don't have DEVX flows shouldn't fail. Fixes: e39afe3d6dbd ("RDMA: Convert CQ allocations to be under core responsibility") Link: https://lore.kernel.org/r/20200907120921.476363-7-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/efa/efa_verbs.c')
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index a03e3514bd8a..57910bcfc572 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -973,7 +973,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
return efa_com_destroy_cq(&dev->edev, &params);
}
-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibcq->device);
struct efa_cq *cq = to_ecq(ibcq);
@@ -986,6 +986,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
efa_destroy_cq_idx(dev, cq->cq_idx);
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
+ return 0;
}
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,