Commit 8e029d38 authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe
Browse files

RDMA/hns: Optimize the usage of MTR

Currently, the MTR region is configed before hns_roce_mtr_map() is
invoked, but in some scenarios, the region is configed at MTR creation,
the caller need to store this config and call hns_roce_mtr_map() later. So
optimize the usage by wrapping the MTR region config into MTR.

Link: https://lore.kernel.org/r/1589982799-28728-10-git-send-email-liweihang@huawei.com


Signed-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 494c3b31
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -359,6 +359,8 @@ struct hns_roce_mtr {
		unsigned int	ba_pg_shift; /* BA table page shift */
		unsigned int	buf_pg_shift; /* buffer page shift */
		int		buf_pg_count;  /* buffer page count */
		struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
		unsigned int	region_count;
	} hem_cfg; /* config for hardware addressing */
};

@@ -1145,7 +1147,6 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
			  struct hns_roce_mtr *mtr);
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		     struct hns_roce_buf_region *regions, int region_cnt,
		     dma_addr_t *pages, int page_cnt);

int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
+28 −26
Original line number Diff line number Diff line
@@ -483,7 +483,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
	struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_mr *mr = to_hr_mr(ibmr);
	struct hns_roce_buf_region region = {};
	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
	int ret = 0;

	mr->npages = 0;
@@ -499,11 +499,11 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		goto err_page_list;
	}

	region.offset = 0;
	region.count = mr->npages;
	region.hopnum = mr->pbl_hop_num;
	ret = hns_roce_mtr_map(hr_dev, &mr->pbl_mtr, &region, 1, mr->page_list,
			       mr->npages);
	mtr->hem_cfg.region[0].offset = 0;
	mtr->hem_cfg.region[0].count = mr->npages;
	mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
	mtr->hem_cfg.region_count = 1;
	ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
	if (ret) {
		ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
		ret = 0;
@@ -863,7 +863,6 @@ static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
}

int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		     struct hns_roce_buf_region *regions, int region_cnt,
		     dma_addr_t *pages, int page_cnt)
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
@@ -871,8 +870,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
	int err;
	int i;

	for (i = 0; i < region_cnt; i++) {
		r = &regions[i];
	for (i = 0; i < mtr->hem_cfg.region_count; i++) {
		r = &mtr->hem_cfg.region[i];
		if (r->offset + r->count > page_cnt) {
			err = -EINVAL;
			ibdev_err(ibdev,
@@ -945,15 +944,16 @@ done:
}

/* convert buffer size to page index and page count */
static int mtr_init_region(struct hns_roce_buf_attr *attr, int page_cnt,
			   struct hns_roce_buf_region *regions, int region_cnt,
			   unsigned int page_shift)
static unsigned int mtr_init_region(struct hns_roce_buf_attr *attr,
				    int page_cnt,
				    struct hns_roce_buf_region *regions,
				    int region_cnt, unsigned int page_shift)
{
	unsigned int page_size = 1 << page_shift;
	int max_region = attr->region_count;
	struct hns_roce_buf_region *r;
	unsigned int i = 0;
	int page_idx = 0;
	int i = 0;

	for (; i < region_cnt && i < max_region && page_idx < page_cnt; i++) {
		r = &regions[i];
@@ -982,7 +982,6 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
			unsigned int page_shift, struct ib_udata *udata,
			unsigned long user_addr)
{
	struct hns_roce_buf_region regions[HNS_ROCE_MAX_BT_REGION] = {};
	struct ib_device *ibdev = &hr_dev->ib_dev;
	dma_addr_t *pages = NULL;
	int region_cnt = 0;
@@ -1014,18 +1013,22 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
	hns_roce_hem_list_init(&mtr->hem_list);
	mtr->hem_cfg.is_direct = !has_mtt;
	mtr->hem_cfg.ba_pg_shift = page_shift;
	if (has_mtt) {
	mtr->hem_cfg.region_count = 0;
	region_cnt = mtr_init_region(buf_attr, all_pg_cnt,
					     regions, ARRAY_SIZE(regions),
				     mtr->hem_cfg.region,
				     ARRAY_SIZE(mtr->hem_cfg.region),
				     mtr->hem_cfg.buf_pg_shift);
	if (region_cnt < 1) {
		err = -ENOBUFS;
			ibdev_err(ibdev, "Failed to init mtr region %d\n",
				  region_cnt);
		ibdev_err(ibdev, "failed to init mtr region %d\n", region_cnt);
		goto err_alloc_bufs;
	}

	mtr->hem_cfg.region_count = region_cnt;

	if (has_mtt) {
		err = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
						regions, region_cnt,
						mtr->hem_cfg.region, region_cnt,
						page_shift);
		if (err) {
			ibdev_err(ibdev, "Failed to request mtr hem, err %d\n",
@@ -1061,8 +1064,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		mtr->hem_cfg.root_ba = pages[0];
	} else {
		/* write buffer's dma address to BA table */
		err = hns_roce_mtr_map(hr_dev, mtr, regions, region_cnt, pages,
				       all_pg_cnt);
		err = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
		if (err) {
			ibdev_err(ibdev, "Failed to map mtr pages, err %d\n",
				  err);