aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorWenpeng Liang <liangwenpeng@huawei.com>2020-01-06 20:21:15 +0800
committerJason Gunthorpe <jgg@mellanox.com>2020-01-07 16:26:33 -0400
commitd800c93bacc7a9c381b5fa9c333fb821ba9df9df (patch)
tree34ebbcf283ac9693f68ed074843fe35637b43dfe /drivers/infiniband/hw
parentRDMA/hns: Remove redundant print information (diff)
downloadlinux-dev-d800c93bacc7a9c381b5fa9c333fb821ba9df9df.tar.xz
linux-dev-d800c93bacc7a9c381b5fa9c333fb821ba9df9df.zip
RDMA/hns: Replace custom macros HNS_ROCE_ALIGN_UP
HNS_ROCE_ALIGN_UP can be replaced by round_up() which is defined in kernel.h. Link: https://lore.kernel.org/r/1578313276-29080-7-git-send-email-liweihang@huawei.com Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com> Signed-off-by: Weihang Li <liweihang@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c44
2 files changed, 20 insertions, 26 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 21751e43aab3..93c2210fa60e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -45,8 +45,6 @@
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
-#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
-
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
#define HNS_ROCE_BA_SIZE (32 * 4096)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index a6565b674801..c5b01ec8ca6a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -393,40 +393,38 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
- hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+ round_up((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt = ex_sge_num ?
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
- HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
+ round_up((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), page_size) +
- HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
+ round_up((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
if (ex_sge_num) {
- hr_qp->sge.offset = HNS_ROCE_ALIGN_UP(
- (hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
+ hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift),
+ page_size);
hr_qp->rq.offset = hr_qp->sge.offset +
- HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift),
- page_size);
+ round_up((hr_qp->sge.sge_cnt <<
+ hr_qp->sge.sge_shift),
+ page_size);
} else {
- hr_qp->rq.offset = HNS_ROCE_ALIGN_UP(
- (hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
+ hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift),
+ page_size);
}
}
@@ -593,20 +591,18 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
- size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
- page_size);
+ size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
- (u32)hr_qp->sge.sge_cnt);
+ (u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
- size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift, page_size);
+ size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift,
+ page_size);
}
hr_qp->rq.offset = size;
- size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
- page_size);
+ size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size);
hr_qp->buff_size = size;
/* Get wr and sge number which send */