Commit b0301a5a authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-next'



Yuval Basson says:

====================
qed: Add xrc core support for RoCE

This patch adds support for configuring XRC and provides the necessary
APIs for rdma upper layer driver (qedr) to enable the XRC feature.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d8bed686 7bfb399e
Loading
Loading
Loading
Loading
+50 −10
Original line number Diff line number Diff line
@@ -110,6 +110,7 @@ struct src_ent {
	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)

#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))

#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
	ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
@@ -293,18 +294,40 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
	return NULL;
}

static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
				  u32 num_srqs, u32 num_xrc_srqs)
{
	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;

	p_mgr->srq_count = num_srqs;
	p_mgr->xrc_srq_count = num_xrc_srqs;
}

u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
			      enum ilt_clients ilt_client)
{
	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
	struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];

	return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
}

static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
{
	u32 page_size;

	page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
	return page_size / XRC_SRQ_CXT_SIZE;
}

u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
{
	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
	u32 total_srqs;

	total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;

	return p_mgr->srq_count;
	return total_srqs;
}

/* set the iids count per protocol */
@@ -692,7 +715,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
	}

	/* TSDM (SRQ CONTEXT) */
	total = qed_cxt_get_srq_count(p_hwfn);
	total = qed_cxt_get_total_srq_count(p_hwfn);

	if (total) {
		p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
@@ -1962,11 +1985,9 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
				   struct qed_rdma_pf_params *p_params,
				   u32 num_tasks)
{
	u32 num_cons, num_qps, num_srqs;
	u32 num_cons, num_qps;
	enum protocol_type proto;

	num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);

	if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
		DP_NOTICE(p_hwfn,
			  "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
@@ -1989,6 +2010,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
	}

	if (num_cons && num_tasks) {
		u32 num_srqs, num_xrc_srqs;

		qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);

		/* Deliberatly passing ROCE for tasks id. This is because
@@ -1997,7 +2020,13 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
		qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
					    QED_CXT_ROCE_TID_SEG, 1,
					    num_tasks, false);
		qed_cxt_set_srq_count(p_hwfn, num_srqs);

		num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);

		/* XRC SRQs populate a single ILT page */
		num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);

		qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
	} else {
		DP_INFO(p_hwfn->cdev,
			"RDMA personality used without setting params!\n");
@@ -2163,10 +2192,17 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
		p_blk = &p_cli->pf_blks[CDUC_BLK];
		break;
	case QED_ELEM_SRQ:
		/* The first ILT page is not used for regular SRQs. Skip it. */
		iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
		elem_size = SRQ_CXT_SIZE;
		p_blk = &p_cli->pf_blks[SRQ_BLK];
		break;
	case QED_ELEM_XRC_SRQ:
		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
		elem_size = XRC_SRQ_CXT_SIZE;
		p_blk = &p_cli->pf_blks[SRQ_BLK];
		break;
	case QED_ELEM_TASK:
		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
		elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
@@ -2386,8 +2422,12 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
		return rc;

	/* Free TSDM CXT */
	rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
				    qed_cxt_get_srq_count(p_hwfn));
	rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
				    p_hwfn->p_cxt_mngr->xrc_srq_count);

	rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
				    p_hwfn->p_cxt_mngr->xrc_srq_count,
				    p_hwfn->p_cxt_mngr->srq_count);

	return rc;
}
+8 −2
Original line number Diff line number Diff line
@@ -82,7 +82,8 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
enum qed_cxt_elem_type {
	QED_ELEM_CXT,
	QED_ELEM_SRQ,
	QED_ELEM_TASK
	QED_ELEM_TASK,
	QED_ELEM_XRC_SRQ,
};

u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
@@ -235,7 +236,6 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
				enum protocol_type type);
u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
				enum protocol_type type);
u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn);
int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);

#define QED_CTX_WORKING_MEM 0
@@ -358,6 +358,7 @@ struct qed_cxt_mngr {

	/* total number of SRQ's for this hwfn */
	u32 srq_count;
	u32 xrc_srq_count;

	/* Maximal number of L2 steering filters */
	u32 arfs_count;
@@ -372,4 +373,9 @@ u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);

u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
			      enum ilt_clients ilt_client);

u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);

#endif
+5 −1
Original line number Diff line number Diff line
@@ -2269,6 +2269,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
		/* EQ */
		n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
		if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
			u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn);
			enum protocol_type rdma_proto;

			if (QED_IS_ROCE_PERSONALITY(p_hwfn))
@@ -2279,7 +2280,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
			num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
							       rdma_proto,
							       NULL) * 2;
			n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
			/* EQ should be able to get events from all SRQ's
			 * at the same time
			 */
			n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
		} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
			num_cons =
			    qed_cxt_get_proto_cid_count(p_hwfn,
+128 −19
Original line number Diff line number Diff line
@@ -212,13 +212,22 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
		goto free_rdma_port;
	}

	/* Allocate bit map for XRC Domains */
	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
				 QED_RDMA_MAX_XRCDS, "XRCD");
	if (rc) {
		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
			   "Failed to allocate xrcd_map,rc = %d\n", rc);
		goto free_pd_map;
	}

	/* Allocate DPI bitmap */
	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
				 p_hwfn->dpi_count, "DPI");
	if (rc) {
		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
			   "Failed to allocate DPI bitmap, rc = %d\n", rc);
		goto free_pd_map;
		goto free_xrcd_map;
	}

	/* Allocate bitmap for cq's. The maximum number of CQs is bound to
@@ -271,14 +280,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
		goto free_cid_map;
	}

	/* The first SRQ follows the last XRC SRQ. This means that the
	 * SRQ IDs start from an offset equals to max_xrc_srqs.
	 */
	p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count;
	rc = qed_rdma_bmap_alloc(p_hwfn,
				 &p_rdma_info->xrc_srq_map,
				 p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ");
	if (rc) {
		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
			   "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
		goto free_real_cid_map;
	}

	/* Allocate bitmap for srqs */
	p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn);
	p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
				 p_rdma_info->num_srqs, "SRQ");
	if (rc) {
		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
			   "Failed to allocate srq bitmap, rc = %d\n", rc);
		goto free_real_cid_map;
		goto free_xrc_srq_map;
	}

	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
@@ -292,6 +314,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)

free_srq_map:
	kfree(p_rdma_info->srq_map.bitmap);
free_xrc_srq_map:
	kfree(p_rdma_info->xrc_srq_map.bitmap);
free_real_cid_map:
	kfree(p_rdma_info->real_cid_map.bitmap);
free_cid_map:
@@ -304,6 +328,8 @@ free_cq_map:
	kfree(p_rdma_info->cq_map.bitmap);
free_dpi_map:
	kfree(p_rdma_info->dpi_map.bitmap);
free_xrcd_map:
	kfree(p_rdma_info->xrcd_map.bitmap);
free_pd_map:
	kfree(p_rdma_info->pd_map.bitmap);
free_rdma_port:
@@ -377,6 +403,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);

	kfree(p_rdma_info->port);
	kfree(p_rdma_info->dev);
@@ -612,7 +639,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
	p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
							   QED_RDMA_CNQ_RAM);
	p_params_header->num_cnqs = params->desired_cnq;

	p_params_header->first_reg_srq_id =
	    cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset);
	p_params_header->reg_srq_base_addr =
	    cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM));
	if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
		p_params_header->cq_ring_mode = 1;
	else
@@ -983,6 +1013,41 @@ static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}

static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id)
{
	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
	u32 returned_id;
	int rc;

	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n");

	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
	rc = qed_rdma_bmap_alloc_id(p_hwfn,
				    &p_hwfn->p_rdma_info->xrcd_map,
				    &returned_id);
	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
	if (rc) {
		DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n");
		return rc;
	}

	*xrcd_id = (u16)returned_id;

	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
	return rc;
}

static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id)
{
	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;

	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);

	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}

static enum qed_rdma_toggle_bit
qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
{
@@ -1306,6 +1371,8 @@ qed_rdma_create_qp(void *rdma_cxt,
	qp->resp_offloaded = false;
	qp->e2e_flow_control_en = qp->use_srq ? false : true;
	qp->stats_queue = in_params->stats_queue;
	qp->qp_type = in_params->qp_type;
	qp->xrcd_id = in_params->xrcd_id;

	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
		rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
@@ -1418,6 +1485,18 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
			   qp->cur_state);
	}

	switch (qp->qp_type) {
	case QED_RDMA_QP_TYPE_XRC_INI:
		qp->has_req = 1;
		break;
	case QED_RDMA_QP_TYPE_XRC_TGT:
		qp->has_resp = 1;
		break;
	default:
		qp->has_req = 1;
		qp->has_resp = 1;
	}

	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
		enum qed_iwarp_qp_state new_state =
		    qed_roce2iwarp_state(qp->cur_state);
@@ -1657,6 +1736,15 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
	return QED_AFFIN_HWFN(cdev);
}

static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn,
					      bool is_xrc)
{
	if (is_xrc)
		return &p_hwfn->p_rdma_info->xrc_srq_map;

	return &p_hwfn->p_rdma_info->srq_map;
}

static int qed_rdma_modify_srq(void *rdma_cxt,
			       struct qed_rdma_modify_srq_in_params *in_params)
{
@@ -1686,8 +1774,8 @@ static int qed_rdma_modify_srq(void *rdma_cxt,
	if (rc)
		return rc;

	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x",
		   in_params->srq_id);
	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
		   in_params->srq_id, in_params->is_xrc);

	return rc;
}
@@ -1702,6 +1790,7 @@ qed_rdma_destroy_srq(void *rdma_cxt,
	struct qed_spq_entry *p_ent;
	struct qed_bmap *bmap;
	u16 opaque_fid;
	u16 offset;
	int rc;

	opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1723,14 +1812,16 @@ qed_rdma_destroy_srq(void *rdma_cxt,
	if (rc)
		return rc;

	bmap = &p_hwfn->p_rdma_info->srq_map;
	bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
	offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;

	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
	qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
	qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset);
	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);

	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x",
		   in_params->srq_id);
	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
		   "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
		   in_params->srq_id, in_params->is_xrc);

	return rc;
}
@@ -1748,24 +1839,26 @@ qed_rdma_create_srq(void *rdma_cxt,
	u16 opaque_fid, srq_id;
	struct qed_bmap *bmap;
	u32 returned_id;
	u16 offset;
	int rc;

	bmap = &p_hwfn->p_rdma_info->srq_map;
	bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
	rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);

	if (rc) {
		DP_NOTICE(p_hwfn, "failed to allocate srq id\n");
		DP_NOTICE(p_hwfn,
			  "failed to allocate xrc/srq id (is_xrc=%u)\n",
			  in_params->is_xrc);
		return rc;
	}

	elem_type = QED_ELEM_SRQ;
	elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ);
	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
	if (rc)
		goto err;
	/* returned id is no greater than u16 */
	srq_id = (u16)returned_id;

	opaque_fid = p_hwfn->hw_info.opaque_fid;

	opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1782,20 +1875,34 @@ qed_rdma_create_srq(void *rdma_cxt,
	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
	p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
	p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
	p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
	p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
	p_ramrod->page_size = cpu_to_le16(in_params->page_size);
	DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
	offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
	srq_id = (u16)returned_id + offset;
	p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);

	if (in_params->is_xrc) {
		SET_FIELD(p_ramrod->flags,
			  RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1);
		SET_FIELD(p_ramrod->flags,
			  RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
			  in_params->reserved_key_en);
		p_ramrod->xrc_srq_cq_cid =
			cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
				     in_params->cq_cid);
		p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id);
	}
	rc = qed_spq_post(p_hwfn, p_ent, NULL);
	if (rc)
		goto err;

	out_params->srq_id = srq_id;

	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
		   "SRQ created Id = %x\n", out_params->srq_id);

	DP_VERBOSE(p_hwfn,
		   QED_MSG_RDMA,
		   "XRC/SRQ created Id = %x (is_xrc=%u)\n",
		   out_params->srq_id, in_params->is_xrc);
	return rc;

err:
@@ -1961,6 +2068,8 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
	.rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
	.rdma_alloc_pd = &qed_rdma_alloc_pd,
	.rdma_dealloc_pd = &qed_rdma_free_pd,
	.rdma_alloc_xrcd = &qed_rdma_alloc_xrcd,
	.rdma_dealloc_xrcd = &qed_rdma_free_xrcd,
	.rdma_create_cq = &qed_rdma_create_cq,
	.rdma_destroy_cq = &qed_rdma_destroy_cq,
	.rdma_create_qp = &qed_rdma_create_qp,
+19 −0
Original line number Diff line number Diff line
@@ -63,6 +63,11 @@
#define QED_RDMA_MAX_CQE_32_BIT             (0x7FFFFFFF - 1)
#define QED_RDMA_MAX_CQE_16_BIT             (0x7FFF - 1)

/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
 * SRQs is much smaller so there's no need to have that many domains.
 */
#define QED_RDMA_MAX_XRCDS      (roundup_pow_of_two(RDMA_MAX_XRC_SRQS))

enum qed_rdma_toggle_bit {
	QED_RDMA_TOGGLE_BIT_CLEAR = 0,
	QED_RDMA_TOGGLE_BIT_SET = 1
@@ -81,9 +86,11 @@ struct qed_rdma_info {

	struct qed_bmap cq_map;
	struct qed_bmap pd_map;
	struct qed_bmap xrcd_map;
	struct qed_bmap tid_map;
	struct qed_bmap qp_map;
	struct qed_bmap srq_map;
	struct qed_bmap xrc_srq_map;
	struct qed_bmap cid_map;
	struct qed_bmap tcp_cid_map;
	struct qed_bmap real_cid_map;
@@ -111,6 +118,7 @@ struct qed_rdma_qp {
	u32 qpid;
	u16 icid;
	enum qed_roce_qp_state cur_state;
	enum qed_rdma_qp_type qp_type;
	enum qed_iwarp_qp_state iwarp_state;
	bool use_srq;
	bool signal_all;
@@ -153,18 +161,21 @@ struct qed_rdma_qp {
	dma_addr_t orq_phys_addr;
	u8 orq_num_pages;
	bool req_offloaded;
	bool has_req;

	/* responder */
	u8 max_rd_atomic_resp;
	u32 rq_psn;
	u16 rq_cq_id;
	u16 rq_num_pages;
	u16 xrcd_id;
	dma_addr_t rq_pbl_ptr;
	void *irq;
	dma_addr_t irq_phys_addr;
	u8 irq_num_pages;
	bool resp_offloaded;
	u32 cq_prod;
	bool has_resp;

	u8 remote_mac_addr[6];
	u8 local_mac_addr[6];
@@ -174,6 +185,14 @@ struct qed_rdma_qp {
	struct qed_iwarp_ep *ep;
};

static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
{
	if (qp->qp_type == QED_RDMA_QP_TYPE_XRC_TGT ||
	    qp->qp_type == QED_RDMA_QP_TYPE_XRC_INI)
		return true;

	return false;
}
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
Loading