Commit d800c93b authored by Wenpeng Liang's avatar Wenpeng Liang Committed by Jason Gunthorpe
Browse files

RDMA/hns: Replace custom macros HNS_ROCE_ALIGN_UP

HNS_ROCE_ALIGN_UP can be replaced by round_up() which is defined in
kernel.h.

Link: https://lore.kernel.org/r/1578313276-29080-7-git-send-email-liweihang@huawei.com


Signed-off-by: default avatarWenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 0c53426c
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -45,8 +45,6 @@

#define HNS_ROCE_MAX_MSG_LEN			0x80000000

#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))

#define HNS_ROCE_IB_MIN_SQ_STRIDE		6

#define HNS_ROCE_BA_SIZE			(32 * 4096)
+20 −24
Original line number Diff line number Diff line
@@ -393,38 +393,36 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,

	/* Get buf size, SQ and RQ  are aligned to page_szie */
	if (hr_dev->caps.max_sq_sg <= 2) {
		hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
		hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
				   HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
				   round_up((hr_qp->sq.wqe_cnt <<
					     hr_qp->sq.wqe_shift), PAGE_SIZE);

		hr_qp->sq.offset = 0;
		hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
		hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
					     hr_qp->sq.wqe_shift), PAGE_SIZE);
	} else {
		page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
		hr_qp->sge.sge_cnt = ex_sge_num ?
		   max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
		hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
		hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
					     hr_qp->rq.wqe_shift), page_size) +
				   HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
				   round_up((hr_qp->sge.sge_cnt <<
					     hr_qp->sge.sge_shift), page_size) +
				   HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
				   round_up((hr_qp->sq.wqe_cnt <<
					     hr_qp->sq.wqe_shift), page_size);

		hr_qp->sq.offset = 0;
		if (ex_sge_num) {
			hr_qp->sge.offset = HNS_ROCE_ALIGN_UP(
							(hr_qp->sq.wqe_cnt <<
			hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt <<
						      hr_qp->sq.wqe_shift),
						     page_size);
			hr_qp->rq.offset = hr_qp->sge.offset +
					HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
					   round_up((hr_qp->sge.sge_cnt <<
						     hr_qp->sge.sge_shift),
						    page_size);
		} else {
			hr_qp->rq.offset = HNS_ROCE_ALIGN_UP(
							(hr_qp->sq.wqe_cnt <<
			hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
						     hr_qp->sq.wqe_shift),
						    page_size);
		}
@@ -593,20 +591,18 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
	/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
	hr_qp->sq.offset = 0;
	size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
				 page_size);
	size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);

	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
		hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
					 (u32)hr_qp->sge.sge_cnt);
		hr_qp->sge.offset = size;
		size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<
					  hr_qp->sge.sge_shift, page_size);
		size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift,
				 page_size);
	}

	hr_qp->rq.offset = size;
	size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
				  page_size);
	size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size);
	hr_qp->buff_size = size;

	/* Get wr and sge number which send */