aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/main.c
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2019-02-03 14:55:51 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-02-08 16:51:04 -0700
commit21a428a019c9a6d133e745b529b9bf18c1187e70 (patch)
tree78f13d8cfff04995d1381ac7d19ceb8aab23f6ab /drivers/infiniband/hw/mlx4/main.c
parentRDMA/core: Share driver structure size with core (diff)
downloadlinux-dev-21a428a019c9a6d133e745b529b9bf18c1187e70.tar.xz
linux-dev-21a428a019c9a6d133e745b529b9bf18c1187e70.zip
RDMA: Handle PD allocations by IB/core
The PD allocations in IB/core allows us to simplify drivers and their error flows in their .alloc_pd() paths. The changes in .alloc_pd() go hand in had with relevant update in .dealloc_pd(). We will use this opportunity and convert .dealloc_pd() to don't fail, as it was suggested a long time ago, failures are not happening as we have never seen a WARN_ON print. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4/main.c')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c36
1 files changed, 13 insertions, 23 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d66002a31000..c0f6aea7ed7c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1186,38 +1186,27 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
}
}
-static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct mlx4_ib_pd *pd;
+ struct mlx4_ib_pd *pd = to_mpd(ibpd);
+ struct ib_device *ibdev = ibpd->device;
int err;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
- if (context)
- if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
- mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
- kfree(pd);
- return ERR_PTR(-EFAULT);
- }
- return &pd->ibpd;
+ if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+ mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
+ return -EFAULT;
+ }
+ return 0;
}
-static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
+static void mlx4_ib_dealloc_pd(struct ib_pd *pd)
{
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
- kfree(pd);
-
- return 0;
}
static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
@@ -2580,6 +2569,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
.req_notify_cq = mlx4_ib_arm_cq,
.rereg_user_mr = mlx4_ib_rereg_user_mr,
.resize_cq = mlx4_ib_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
};
static const struct ib_device_ops mlx4_ib_dev_wq_ops = {