Commit 9581a356 authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe
Browse files

RDMA/hns: Rename macro for defining hns hardware page size

Rename the PAGE_ADDR_SHIFT as HNS_HW_PAGE_SHIFT to make code more
readable.

Link: https://lore.kernel.org/r/1588931159-56875-9-git-send-email-liweihang@huawei.com


Signed-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 252067e9
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -189,8 +189,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
	u32 page_size;
	int i;

	/* The minimum shift of the page accessed by hw is PAGE_ADDR_SHIFT */
	buf->page_shift = max_t(int, PAGE_ADDR_SHIFT, page_shift);
	/* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
	buf->page_shift = max_t(int, HNS_HW_PAGE_SHIFT, page_shift);

	page_size = 1 << buf->page_shift;
	buf->npages = DIV_ROUND_UP(size, page_size);
@@ -261,7 +261,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
	int idx = 0;
	u64 addr;

	if (page_shift < PAGE_ADDR_SHIFT) {
	if (page_shift < HNS_HW_PAGE_SHIFT) {
		dev_err(hr_dev->dev, "Failed to check umem page shift %d!\n",
			page_shift);
		return -EINVAL;
+2 −2
Original line number Diff line number Diff line
@@ -149,14 +149,14 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
	struct hns_roce_buf_attr buf_attr = {};
	int err;

	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_ADDR_SHIFT;
	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
	buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
	buf_attr.region_count = 1;
	buf_attr.fixed_page = true;

	err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
				  hr_dev->caps.cqe_ba_pg_sz + PAGE_ADDR_SHIFT,
				  hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
				  udata, addr);
	if (err)
		ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
+6 −4
Original line number Diff line number Diff line
@@ -262,7 +262,9 @@ enum {
#define HNS_ROCE_PORT_DOWN			0
#define HNS_ROCE_PORT_UP			1

#define PAGE_ADDR_SHIFT				12
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT			12
#define HNS_HW_PAGE_SIZE			(1 << HNS_HW_PAGE_SHIFT)

/* The minimum page count for hardware access page directly. */
#define HNS_HW_DIRECT_PAGE_COUNT 2
@@ -1080,16 +1082,16 @@ static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, int idx)
		return buf->page_list[idx].map;
}

#define hr_hw_page_align(x)		ALIGN(x, 1 << PAGE_ADDR_SHIFT)
#define hr_hw_page_align(x)		ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)

static inline u64 to_hr_hw_page_addr(u64 addr)
{
	return addr >> PAGE_ADDR_SHIFT;
	return addr >> HNS_HW_PAGE_SHIFT;
}

static inline u32 to_hr_hw_page_shift(u32 page_shift)
{
	return page_shift - PAGE_ADDR_SHIFT;
	return page_shift - HNS_HW_PAGE_SHIFT;
}

static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
+2 −2
Original line number Diff line number Diff line
@@ -5606,7 +5606,7 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
	else
		eq->hop_num = hr_dev->caps.eqe_hop_num;

	buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_ADDR_SHIFT;
	buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
	buf_attr.region[0].size = eq->entries * eq->eqe_size;
	buf_attr.region[0].hopnum = eq->hop_num;
	buf_attr.region_count = 1;
@@ -5614,7 +5614,7 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)

	err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
				  hr_dev->caps.eqe_ba_pg_sz +
				  PAGE_ADDR_SHIFT, NULL, 0);
				  HNS_HW_PAGE_SHIFT, NULL, 0);
	if (err)
		dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);

+3 −3
Original line number Diff line number Diff line
@@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,

	mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
	buf_attr.page_shift = is_fast ? PAGE_SHIFT :
			      hr_dev->caps.pbl_buf_pg_sz + PAGE_ADDR_SHIFT;
			      hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
	buf_attr.region[0].size = length;
	buf_attr.region[0].hopnum = mr->pbl_hop_num;
	buf_attr.region_count = 1;
@@ -130,7 +130,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
	buf_attr.mtt_only = is_fast;

	err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
				  hr_dev->caps.pbl_ba_pg_sz + PAGE_ADDR_SHIFT,
				  hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
				  udata, start);
	if (err)
		ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
@@ -819,7 +819,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
	}

	/* must bigger than minimum hardware page shift */
	if (best_pg_shift < PAGE_ADDR_SHIFT || all_pg_count < 1) {
	if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
		ret = -EINVAL;
		ibdev_err(ibdev, "Failed to check mtr page shift %d count %d\n",
			  best_pg_shift, all_pg_count);
Loading