aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/qp.c
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2020-09-02 11:16:23 +0300
committerJason Gunthorpe <jgg@nvidia.com>2020-09-17 14:04:33 -0300
commitc0a6b5ecc5b7dd028c2921415ea036074a8f8b00 (patch)
tree097b09fba3a07538745abb63d145e9c7d87f8599 /drivers/infiniband/hw/mlx4/qp.c
parentRDMA: Clean MW allocation and free flows (diff)
downloadlinux-dev-c0a6b5ecc5b7dd028c2921415ea036074a8f8b00.tar.xz
linux-dev-c0a6b5ecc5b7dd028c2921415ea036074a8f8b00.zip
RDMA: Convert RWQ table logic to ib_core allocation scheme
Move struct ib_rwq_ind_table allocation to ib_core. Link: https://lore.kernel.org/r/20200902081623.746359-3-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to '')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c40
1 files changed, 11 insertions, 29 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 1c94a2c4463a..0cb1a6e2fed4 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -4339,34 +4339,32 @@ int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
return 0;
}
-struct ib_rwq_ind_table
-*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct ib_rwq_ind_table *rwq_ind_table;
struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
+ struct ib_device *device = rwq_ind_table->device;
unsigned int base_wqn;
size_t min_resp_len;
- int i;
- int err;
+ int i, err = 0;
if (udata->inlen > 0 &&
!ib_is_udata_cleared(udata, 0,
udata->inlen))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
if (udata->outlen && udata->outlen < min_resp_len)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (ind_tbl_size >
device->attrs.rss_caps.max_rwq_indirection_table_size) {
pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
ind_tbl_size,
device->attrs.rss_caps.max_rwq_indirection_table_size);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
base_wqn = init_attr->ind_tbl[0]->wq_num;
@@ -4374,39 +4372,23 @@ struct ib_rwq_ind_table
if (base_wqn % ind_tbl_size) {
pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
base_wqn);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
for (i = 1; i < ind_tbl_size; i++) {
if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
pr_debug("indirection table's WQNs aren't consecutive\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
}
- rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
- if (!rwq_ind_table)
- return ERR_PTR(-ENOMEM);
-
if (udata->outlen) {
resp.response_length = offsetof(typeof(resp), response_length) +
sizeof(resp.response_length);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
- if (err)
- goto err;
}
- return rwq_ind_table;
-
-err:
- kfree(rwq_ind_table);
- return ERR_PTR(err);
-}
-
-int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
-{
- kfree(ib_rwq_ind_tbl);
- return 0;
+ return err;
}
struct mlx4_ib_drain_cqe {