Commit b08fe048 authored by Devesh Sharma's avatar Devesh Sharma Committed by Jason Gunthorpe
Browse files

RDMA/bnxt_re: Refactor net ring allocation function

Introducing a new attribute structure to reduce the long list of arguments
passed in bnxt_re_net_ring_alloc() function.

The caller of bnxt_re_net_ring_alloc should fill in the list of attributes
in bnxt_re_ring_attr structure and then pass the pointer to the function.

Link: https://lore.kernel.org/r/1581786665-23705-5-git-send-email-devesh.sharma@broadcom.com


Signed-off-by: default avatarNaresh Kumar PBS <nareshkumar.pbs@broadcom.com>
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarDevesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 0c4dcd60
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -89,6 +89,15 @@

#define BNXT_RE_DEFAULT_ACK_DELAY	16

struct bnxt_re_ring_attr {
	dma_addr_t	*dma_arr;
	int		pages;
	int		type;
	u32		depth;
	u32		lrid; /* Logical ring id */
	u8		mode;
};

struct bnxt_re_work {
	struct work_struct	work;
	unsigned long		event;
+35 −29
Original line number Diff line number Diff line
@@ -427,9 +427,9 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
	return rc;
}

static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
				  int pages, int type, u32 ring_mask,
				  u32 map_index, u16 *fw_ring_id)
static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
				  struct bnxt_re_ring_attr *ring_attr,
				  u16 *fw_ring_id)
{
	struct bnxt_en_dev *en_dev = rdev->en_dev;
	struct hwrm_ring_alloc_input req = {0};
@@ -443,18 +443,18 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
	memset(&fw_msg, 0, sizeof(fw_msg));
	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
	req.enables = 0;
	req.page_tbl_addr =  cpu_to_le64(dma_arr[0]);
	if (pages > 1) {
	req.page_tbl_addr =  cpu_to_le64(ring_attr->dma_arr[0]);
	if (ring_attr->pages > 1) {
		/* Page size is in log2 units */
		req.page_size = BNXT_PAGE_SHIFT;
		req.page_tbl_depth = 1;
	}
	req.fbo = 0;
	/* Association of ring index with doorbell index and MSIX number */
	req.logical_id = cpu_to_le16(map_index);
	req.length = cpu_to_le32(ring_mask + 1);
	req.ring_type = type;
	req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
	req.logical_id = cpu_to_le16(ring_attr->lrid);
	req.length = cpu_to_le32(ring_attr->depth + 1);
	req.ring_type = ring_attr->type;
	req.int_mode = ring_attr->mode;
	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
@@ -1006,10 +1006,10 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)

static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
	struct bnxt_re_ring_attr rattr = {};
	struct bnxt_qplib_ctx *qplib_ctx;
	int num_vec_created = 0;
	dma_addr_t *pg_map;
	int rc = 0, i;
	int pages;
	u8 type;

	/* Configure and allocate resources for qplib */
@@ -1030,10 +1030,13 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
	if (rc)
		goto dealloc_res;

	qplib_ctx = &rdev->qplib_ctx;
	for (i = 0; i < rdev->num_msix - 1; i++) {
		rdev->nq[i].res = &rdev->qplib_res;
		rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
			BNXT_RE_MAX_SRQC_COUNT + 2;
		struct bnxt_qplib_nq *nq;

		nq = &rdev->nq[i];
		nq->hwq.max_elements = (qplib_ctx->cq_count +
					qplib_ctx->srqc_count + 2);
		rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
		if (rc) {
			dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
@@ -1041,12 +1044,13 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
			goto free_nq;
		}
		type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
		pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr;
		pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count;
		rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
					    BNXT_QPLIB_NQE_MAX_CNT - 1,
					    rdev->msix_entries[i + 1].ring_idx,
					    &rdev->nq[i].ring_id);
		rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
		rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
		rattr.type = type;
		rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
		rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
		rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
		rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
		if (rc) {
			dev_err(rdev_to_dev(rdev),
				"Failed to allocate NQ fw id with rc = 0x%x",
@@ -1371,10 +1375,10 @@ static void bnxt_re_worker(struct work_struct *work)

static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
{
	dma_addr_t *pg_map;
	u32 db_offt, ridx;
	int pages, vid;
	struct bnxt_re_ring_attr rattr;
	u32 db_offt;
	bool locked;
	int vid;
	u8 type;
	int rc;

@@ -1383,6 +1387,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
	locked = true;

	/* Registered a new RoCE device instance to netdev */
	memset(&rattr, 0, sizeof(rattr));
	rc = bnxt_re_register_netdev(rdev);
	if (rc) {
		rtnl_unlock();
@@ -1422,12 +1427,13 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
	}

	type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
	pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr;
	pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count;
	ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
	rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
				    BNXT_QPLIB_CREQE_MAX_CNT - 1,
				    ridx, &rdev->rcfw.creq_ring_id);
	rattr.dma_arr = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr;
	rattr.pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count;
	rattr.type = type;
	rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
	rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
	rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
	rc = bnxt_re_net_ring_alloc(rdev, &rattr, &rdev->rcfw.creq_ring_id);
	if (rc) {
		pr_err("Failed to allocate CREQ: %#x\n", rc);
		goto free_rcfw;