Commit 2a2f1887 authored by Lijun Ou's avatar Lijun Ou Committed by Jason Gunthorpe
Browse files

RDMA/hns: Refactor the code of creating srq

Move the related codes of creating user srq and kernel srq into two
independent functions as well as remove some unused code and
simplifications.

Link: https://lore.kernel.org/r/1562593285-8037-3-git-send-email-oulijun@huawei.com


Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 4f8f0d5e
Loading
Loading
Loading
Loading
+183 −127
Original line number Diff line number Diff line
@@ -175,71 +175,19 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}

static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
				   u32 page_shift)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
	struct hns_roce_idx_que *idx_que = &srq->idx_que;

	idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
	if (!idx_que->bitmap)
		return -ENOMEM;

	idx_que->buf_size = srq->idx_que.buf_size;

	if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
			       &idx_que->idx_buf, page_shift)) {
		bitmap_free(idx_que->bitmap);
		return -ENOMEM;
	}

	return 0;
}

int hns_roce_create_srq(struct ib_srq *ib_srq,
			struct ib_srq_init_attr *srq_init_attr,
			struct ib_udata *udata)
static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
			   int srq_buf_size)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
	struct hns_roce_ib_create_srq_resp resp = {};
	struct hns_roce_srq *srq = to_hr_srq(ib_srq);
	int srq_desc_size;
	int srq_buf_size;
	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
	struct hns_roce_ib_create_srq  ucmd;
	u32 page_shift;
	int ret = 0;
	u32 npages;
	u32 cqn;

	/* Check the actual SRQ wqe and SRQ sge num */
	if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
	    srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
		return -EINVAL;

	mutex_init(&srq->mutex);
	spin_lock_init(&srq->lock);

	srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
	srq->max_gs = srq_init_attr->attr.max_sge;

	srq_desc_size = max(16, 16 * srq->max_gs);

	srq->wqe_shift = ilog2(srq_desc_size);

	srq_buf_size = srq->max * srq_desc_size;

	srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
	srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
	srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
	srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;

	if (udata) {
		struct hns_roce_ib_create_srq  ucmd;
	int ret;

	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
		return -EFAULT;

		srq->umem =
			ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
	srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
	if (IS_ERR(srq->umem))
		return PTR_ERR(srq->umem);

@@ -248,28 +196,24 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
			 (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
			 (1 << hr_dev->caps.srqwqe_buf_pg_sz);
		page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
			ret = hns_roce_mtt_init(hr_dev, npages,
						page_shift,
						&srq->mtt);
		ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
	} else
			ret = hns_roce_mtt_init(hr_dev,
						ib_umem_page_count(srq->umem),
		ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem),
					PAGE_SHIFT, &srq->mtt);
	if (ret)
			goto err_buf;
		goto err_user_buf;

	ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
	if (ret)
			goto err_srq_mtt;
		goto err_user_srq_mtt;

	/* config index queue BA */
	srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
					srq->idx_que.buf_size, 0, 0);
	if (IS_ERR(srq->idx_que.umem)) {
			dev_err(hr_dev->dev,
				"ib_umem_get error for index queue\n");
		dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
		ret = PTR_ERR(srq->idx_que.umem);
			goto err_srq_mtt;
		goto err_user_srq_mtt;
	}

	if (hr_dev->caps.idx_buf_pg_sz) {
@@ -277,18 +221,18 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
			 (1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
			 (1 << hr_dev->caps.idx_buf_pg_sz);
		page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
			ret = hns_roce_mtt_init(hr_dev, npages,
						page_shift, &srq->idx_que.mtt);
		ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
					&srq->idx_que.mtt);
	} else {
			ret = hns_roce_mtt_init(
				hr_dev, ib_umem_page_count(srq->idx_que.umem),
				PAGE_SHIFT, &srq->idx_que.mtt);
		ret = hns_roce_mtt_init(hr_dev,
					ib_umem_page_count(srq->idx_que.umem),
					PAGE_SHIFT,
					&srq->idx_que.mtt);
	}

	if (ret) {
			dev_err(hr_dev->dev,
				"hns_roce_mtt_init error for idx que\n");
			goto err_idx_mtt;
		dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
		goto err_user_idx_mtt;
	}

	ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
@@ -296,33 +240,74 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
	if (ret) {
		dev_err(hr_dev->dev,
			"hns_roce_ib_umem_write_mtt error for idx que\n");
			goto err_idx_buf;
		goto err_user_idx_buf;
	}
	} else {
		page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
		if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
				       (1 << page_shift) * 2, &srq->buf,
				       page_shift))

	return 0;

err_user_idx_buf:
	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);

err_user_idx_mtt:
	ib_umem_release(srq->idx_que.umem);

err_user_srq_mtt:
	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);

err_user_buf:
	ib_umem_release(srq->umem);

	return ret;
}

static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
				   u32 page_shift)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
	struct hns_roce_idx_que *idx_que = &srq->idx_que;

	idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
	if (!idx_que->bitmap)
		return -ENOMEM;

	idx_que->buf_size = srq->idx_que.buf_size;

	if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
			       &idx_que->idx_buf, page_shift)) {
		bitmap_free(idx_que->bitmap);
		return -ENOMEM;
	}

	return 0;
}

static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
	u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
	int ret;

	if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
			       &srq->buf, page_shift))
		return -ENOMEM;

	srq->head = 0;
	srq->tail = srq->max - 1;

		ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
					srq->buf.page_shift, &srq->mtt);
	ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
				&srq->mtt);
	if (ret)
			goto err_buf;
		goto err_kernel_buf;

	ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
	if (ret)
			goto err_srq_mtt;
		goto err_kernel_srq_mtt;

	page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
		ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
	ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
	if (ret) {
			dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
				ret);
			goto err_srq_mtt;
		dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
		goto err_kernel_srq_mtt;
	}

	/* Init mtt table for idx_que */
@@ -330,18 +315,104 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
				srq->idx_que.idx_buf.page_shift,
				&srq->idx_que.mtt);
	if (ret)
			goto err_create_idx;
		goto err_kernel_create_idx;

	/* Write buffer address into the mtt table */
	ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
				     &srq->idx_que.idx_buf);
	if (ret)
			goto err_idx_buf;
		goto err_kernel_idx_buf;

	srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
	if (!srq->wrid) {
		ret = -ENOMEM;
			goto err_idx_buf;
		goto err_kernel_idx_buf;
	}

	return 0;

err_kernel_idx_buf:
	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);

err_kernel_create_idx:
	hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
			  &srq->idx_que.idx_buf);
	kfree(srq->idx_que.bitmap);

err_kernel_srq_mtt:
	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);

err_kernel_buf:
	hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);

	return ret;
}

static void destroy_user_srq(struct hns_roce_dev *hr_dev,
			     struct hns_roce_srq *srq)
{
	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
	ib_umem_release(srq->idx_que.umem);
	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
	ib_umem_release(srq->umem);
}

static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
			       struct hns_roce_srq *srq, int srq_buf_size)
{
	kvfree(srq->wrid);
	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
	hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
	kfree(srq->idx_que.bitmap);
	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
	hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
}

int hns_roce_create_srq(struct ib_srq *ib_srq,
			struct ib_srq_init_attr *srq_init_attr,
			struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
	struct hns_roce_ib_create_srq_resp resp = {};
	struct hns_roce_srq *srq = to_hr_srq(ib_srq);
	int srq_desc_size;
	int srq_buf_size;
	int ret = 0;
	u32 cqn;

	/* Check the actual SRQ wqe and SRQ sge num */
	if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
	    srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
		return -EINVAL;

	mutex_init(&srq->mutex);
	spin_lock_init(&srq->lock);

	srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
	srq->max_gs = srq_init_attr->attr.max_sge;

	srq_desc_size = max(16, 16 * srq->max_gs);

	srq->wqe_shift = ilog2(srq_desc_size);

	srq_buf_size = srq->max * srq_desc_size;

	srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
	srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
	srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
	srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;

	if (udata) {
		ret = create_user_srq(srq, udata, srq_buf_size);
		if (ret) {
			dev_err(hr_dev->dev, "Create user srq failed\n");
			goto err_srq;
		}
	} else {
		ret = create_kernel_srq(srq, srq_buf_size);
		if (ret) {
			dev_err(hr_dev->dev, "Create kernel srq failed\n");
			goto err_srq;
		}
	}

@@ -373,27 +444,12 @@ err_srqc_alloc:
	hns_roce_srq_free(hr_dev, srq);

err_wrid:
	kvfree(srq->wrid);

err_idx_buf:
	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);

err_idx_mtt:
	ib_umem_release(srq->idx_que.umem);

err_create_idx:
	hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
			  &srq->idx_que.idx_buf);
	bitmap_free(srq->idx_que.bitmap);

err_srq_mtt:
	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);

err_buf:
	ib_umem_release(srq->umem);
	if (!udata)
		hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
	if (udata)
		destroy_user_srq(hr_dev, srq);
	else
		destroy_kernel_srq(hr_dev, srq, srq_buf_size);

err_srq:
	return ret;
}