aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/hns
diff options
context:
space:
mode:
authorWenpeng Liang <liangwenpeng@huawei.com>2022-03-02 14:48:30 +0800
committerJason Gunthorpe <jgg@nvidia.com>2022-03-04 17:36:32 -0400
commit73f7e05609ece4030f2745c4c0c01e0be6889590 (patch)
tree29691c157282e53d5dc43a08878839b76a10ec1b /drivers/infiniband/hw/hns
parentRDMA/hns: Refactor the alloc_srqc() (diff)
downloadlinux-dev-73f7e05609ece4030f2745c4c0c01e0be6889590.tar.xz
linux-dev-73f7e05609ece4030f2745c4c0c01e0be6889590.zip
RDMA/hns: Refactor the alloc_cqc()
Abstract the alloc_cqc() into several parts and separate the process unrelated to allocating CQC. Link: https://lore.kernel.org/r/20220302064830.61706-10-liangwenpeng@huawei.com Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com> Reviewed-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/hns')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c65
1 files changed, 37 insertions, 28 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 3d10300cab85..8acd599ffac1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -100,12 +100,39 @@ static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
mutex_unlock(&cq_table->bank_mutex);
}
+static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq,
+ u64 *mtts, dma_addr_t dma_handle)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
+ return PTR_ERR(mailbox);
+ }
+
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
+
+ ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
+ hr_cq->cqn);
+ if (ret)
+ ibdev_err(ibdev,
+ "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
+ hr_cq->cqn, ret);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_cmd_mailbox *mailbox;
- u64 mtts[MTT_MIN_COUNT] = { 0 };
+ u64 mtts[MTT_MIN_COUNT] = {};
dma_addr_t dma_handle;
int ret;
@@ -121,7 +148,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
if (ret) {
ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
hr_cq->cqn, ret);
- goto err_out;
+ return ret;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
@@ -130,40 +157,17 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
goto err_put;
}
- /* Allocate mailbox memory */
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox)) {
- ret = PTR_ERR(mailbox);
- goto err_xa;
- }
-
- hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
-
- ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
- hr_cq->cqn);
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret) {
- ibdev_err(ibdev,
- "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
- hr_cq->cqn, ret);
+ ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
+ if (ret)
goto err_xa;
- }
-
- hr_cq->cons_index = 0;
- hr_cq->arm_sn = 1;
-
- refcount_set(&hr_cq->refcount, 1);
- init_completion(&hr_cq->free);
return 0;
err_xa:
xa_erase(&cq_table->array, hr_cq->cqn);
-
err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
-err_out:
return ret;
}
@@ -411,6 +415,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cqc;
}
+ hr_cq->cons_index = 0;
+ hr_cq->arm_sn = 1;
+ refcount_set(&hr_cq->refcount, 1);
+ init_completion(&hr_cq->free);
+
return 0;
err_cqc: