Commit 2a3d923f authored by Lijun Ou's avatar Lijun Ou Committed by Jason Gunthorpe
Browse files

RDMA/hns: Replace magic numbers with #defines



This patch makes the code more readable by removing magic numbers.

Signed-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 669cefb6
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -78,7 +78,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
	if (!pgdir)
		return NULL;

	bitmap_fill(pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2);
	bitmap_fill(pgdir->order1,
		    HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	pgdir->bits[0] = pgdir->order0;
	pgdir->bits[1] = pgdir->order1;
	pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
@@ -116,7 +117,7 @@ found:
	db->u.pgdir	= pgdir;
	db->index	= i;
	db->db_record	= pgdir->page + db->index;
	db->dma		= pgdir->db_dma  + db->index * 4;
	db->dma		= pgdir->db_dma  + db->index * HNS_ROCE_DB_UNIT_SIZE;
	db->order	= order;

	return 0;
@@ -170,7 +171,8 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
	i >>= o;
	set_bit(i, db->u.pgdir->bits[o]);

	if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) {
	if (bitmap_full(db->u.pgdir->order1,
			HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
		dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
				  db->u.pgdir->db_dma);
		list_del(&db->u.pgdir->list);
+32 −5
Original line number Diff line number Diff line
@@ -37,9 +37,12 @@

#define DRV_NAME "hns_roce"

/* hip08 is a pci device, it includes two version according pci version id */
#define PCI_REVISION_ID_HIP08_A			0x20
#define PCI_REVISION_ID_HIP08_B			0x21

#define HNS_ROCE_HW_VER1	('h' << 24 | 'i' << 16 | '0' << 8 | '6')

#define MAC_ADDR_OCTET_NUM			6
#define HNS_ROCE_MAX_MSG_LEN			0x80000000

#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
@@ -48,6 +51,10 @@

#define HNS_ROCE_BA_SIZE			(32 * 4096)

#define BA_BYTE_LEN				8

#define BITS_PER_BYTE				8

/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM			0x40
#define HNS_ROCE_MIN_WQE_NUM			0x20
@@ -55,6 +62,7 @@
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MAX_INNER_MTPT_NUM		0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM		0x100000
#define HNS_ROCE_MAX_SGE_NUM			2

#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS	20
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT	\
@@ -64,6 +72,9 @@

#define HNS_ROCE_MAX_IRQ_NUM			128

#define HNS_ROCE_SGE_IN_WQE			2
#define HNS_ROCE_SGE_SHIFT			4

#define EQ_ENABLE				1
#define EQ_DISABLE				0

@@ -81,6 +92,7 @@
#define HNS_ROCE_MAX_PORTS			6
#define HNS_ROCE_MAX_GID_NUM			16
#define HNS_ROCE_GID_SIZE			16
#define HNS_ROCE_SGE_SIZE			16

#define HNS_ROCE_HOP_NUM_0			0xff

@@ -111,6 +123,8 @@
#define PAGES_SHIFT_24				24
#define PAGES_SHIFT_32				32

#define HNS_ROCE_PCI_BAR_NUM			2

#define HNS_ROCE_IDX_QUE_ENTRY_SZ		4
#define SRQ_DB_REG				0x230

@@ -213,6 +227,9 @@ enum hns_roce_mtt_type {
	MTT_TYPE_IDX
};

#define HNS_ROCE_DB_TYPE_COUNT			2
#define HNS_ROCE_DB_UNIT_SIZE			4

enum {
	HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
};
@@ -413,8 +430,8 @@ struct hns_roce_buf {
struct hns_roce_db_pgdir {
	struct list_head	list;
	DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
	DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / 2);
	unsigned long		*bits[2];
	DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	unsigned long		*bits[HNS_ROCE_DB_TYPE_COUNT];
	u32			*page;
	dma_addr_t		db_dma;
};
@@ -535,7 +552,7 @@ struct hns_roce_av {
	u8          hop_limit;
	__le32      sl_tclass_flowlabel;
	u8          dgid[HNS_ROCE_GID_SIZE];
	u8          mac[6];
	u8          mac[ETH_ALEN];
	__le16      vlan;
	bool	    vlan_en;
};
@@ -940,6 +957,16 @@ struct hns_roce_hw {
	const struct ib_device_ops *hns_roce_dev_srq_ops;
};

enum hns_phy_state {
	HNS_ROCE_PHY_SLEEP		= 1,
	HNS_ROCE_PHY_POLLING		= 2,
	HNS_ROCE_PHY_DISABLED		= 3,
	HNS_ROCE_PHY_TRAINING		= 4,
	HNS_ROCE_PHY_LINKUP		= 5,
	HNS_ROCE_PHY_LINKERR		= 6,
	HNS_ROCE_PHY_TEST		= 7
};

struct hns_roce_dev {
	struct ib_device	ib_dev;
	struct platform_device  *pdev;
@@ -962,7 +989,7 @@ struct hns_roce_dev {
	struct hns_roce_caps	caps;
	struct xarray		qp_table_xa;

	unsigned char	dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM];
	unsigned char	dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
	u64			sys_image_guid;
	u32                     vendor_id;
	u32                     vendor_part_id;
+9 −9
Original line number Diff line number Diff line
@@ -165,7 +165,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
					     + PAGE_SHIFT);
		mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
					     + PAGE_SHIFT);
		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
		mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
		mhop->hop_num = hr_dev->caps.mtt_hop_num;
		break;
	case HEM_TYPE_CQE:
@@ -173,7 +173,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
					     + PAGE_SHIFT);
		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
					     + PAGE_SHIFT);
		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
		mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
		mhop->hop_num = hr_dev->caps.cqe_hop_num;
		break;
	case HEM_TYPE_SRQWQE:
@@ -181,7 +181,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
					    + PAGE_SHIFT);
		mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
					    + PAGE_SHIFT);
		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
		mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
		mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
		break;
	case HEM_TYPE_IDX:
@@ -189,7 +189,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
				       + PAGE_SHIFT);
		mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
				       + PAGE_SHIFT);
		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
		mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
		mhop->hop_num = hr_dev->caps.idx_hop_num;
		break;
	default:
@@ -206,7 +206,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
	 * MTT/CQE alloc hem for bt pages.
	 */
	bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
	chunk_ba_num = mhop->bt_chunk_size / 8;
	chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
	chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
			      mhop->bt_chunk_size;
	table_idx = (*obj & (table->num_obj - 1)) /
@@ -436,7 +436,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
	buf_chunk_size = mhop.buf_chunk_size;
	bt_chunk_size = mhop.bt_chunk_size;
	hop_num = mhop.hop_num;
	chunk_ba_num = bt_chunk_size / 8;
	chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;

	bt_num = hns_roce_get_bt_num(table->type, hop_num);
	switch (bt_num) {
@@ -646,7 +646,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,

	bt_chunk_size = mhop.bt_chunk_size;
	hop_num = mhop.hop_num;
	chunk_ba_num = bt_chunk_size / 8;
	chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;

	bt_num = hns_roce_get_bt_num(table->type, hop_num);
	switch (bt_num) {
@@ -800,7 +800,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
		i = mhop.l0_idx;
		j = mhop.l1_idx;
		if (mhop.hop_num == 2)
			hem_idx = i * (mhop.bt_chunk_size / 8) + j;
			hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
		else if (mhop.hop_num == 1 ||
			 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
			hem_idx = i;
@@ -1000,7 +1000,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
		}
		obj_per_chunk = buf_chunk_size / obj_size;
		num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
		bt_chunk_num = bt_chunk_size / 8;
		bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
		if (type >= HEM_TYPE_MTT)
			num_bt_l0 = bt_chunk_num;

+1 −1
Original line number Diff line number Diff line
@@ -818,7 +818,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
		attr.dest_qp_num	= hr_qp->qpn;
		memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
		       hr_dev->dev_addr[port],
		       MAC_ADDR_OCTET_NUM);
		       ETH_ALEN);

		memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
		memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
+36 −29
Original line number Diff line number Diff line
@@ -3426,7 +3426,9 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
	else
		roce_set_field(context->byte_4_sqpn_tst,
			       V2_QPC_BYTE_4_SGE_SHIFT_M,
			       V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
			       V2_QPC_BYTE_4_SGE_SHIFT_S,
			       hr_qp->sq.max_gs >
			       HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
			       ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);

	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
@@ -3708,13 +3710,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
		       V2_QPC_BYTE_20_SGID_IDX_M,
		       V2_QPC_BYTE_20_SGID_IDX_S, 0);
	memcpy(&(context->dmac), dmac, 4);
	memcpy(&(context->dmac), dmac, sizeof(u32));
	roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
		       V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
	qpc_mask->dmac = 0;
	roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
		       V2_QPC_BYTE_52_DMAC_S, 0);

	/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
	roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
		       V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
	roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
@@ -3756,6 +3759,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
		       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);

	/* rocee send 2^lp_sgen_ini segs every time */
	roce_set_field(context->byte_168_irrl_idx,
		       V2_QPC_BYTE_168_LP_SGEN_INI_M,
		       V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
@@ -3810,14 +3814,15 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);

	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
	context->sq_cur_sge_blk_addr =
		       ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
				      ((u32)(mtts[hr_qp->sge.offset / page_size]
				      >> PAGE_ADDR_SHIFT)) : 0;
	context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
		       hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
		       ((u32)(mtts[hr_qp->sge.offset / page_size] >>
		       PAGE_ADDR_SHIFT)) : 0;
	roce_set_field(context->byte_184_irrl_idx,
		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
		       ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
		       ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
		       HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
		       (mtts[hr_qp->sge.offset / page_size] >>
		       (32 + PAGE_ADDR_SHIFT)) : 0);
	qpc_mask->sq_cur_sge_blk_addr = 0;
@@ -4144,7 +4149,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
		roce_set_field(context->byte_224_retry_msg,
			       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
			       V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
			       attr->sq_psn >> 16);
			       attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
		roce_set_field(qpc_mask->byte_224_retry_msg,
			       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
			       V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
@@ -4374,11 +4379,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
						  V2_QPC_BYTE_56_DQPN_M,
						  V2_QPC_BYTE_56_DQPN_S);
	qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
						  V2_QPC_BYTE_76_RRE_S)) << 2) |
				    V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) |
				    ((roce_get_bit(context->byte_76_srqn_op_en,
						  V2_QPC_BYTE_76_RWE_S)) << 1) |
				    V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) |
				    ((roce_get_bit(context->byte_76_srqn_op_en,
						  V2_QPC_BYTE_76_ATE_S)) << 3);
				    V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);

	if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
	    hr_qp->ibqp.qp_type == IB_QPT_UC) {
		struct ib_global_route *grh =
@@ -5150,8 +5156,8 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
			dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
					  eq->l1_dma[i]);

			for (j = 0; j < bt_chk_sz / 8; j++) {
				idx = i * (bt_chk_sz / 8) + j;
			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
				idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
				if ((i == eq->l0_last_num - 1)
				     && j == eq->l1_last_num - 1) {
					eqe_alloc = (buf_chk_sz / eq->eqe_size)
@@ -5367,9 +5373,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
	buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
	bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);

	ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
		  / buf_chk_sz;
	bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
	ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
			      buf_chk_sz);
	bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);

	/* hop_num = 0 */
	if (mhop_num == HNS_ROCE_HOP_NUM_0) {
@@ -5414,12 +5420,12 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
		goto err_dma_alloc_l0;

	if (mhop_num == 1) {
		if (ba_num > (bt_chk_sz / 8))
		if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
			dev_err(dev, "ba_num %d is too large for 1 hop\n",
				ba_num);

		/* alloc buf */
		for (i = 0; i < bt_chk_sz / 8; i++) {
		for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
			if (eq_buf_cnt + 1 < ba_num) {
				size = buf_chk_sz;
			} else {
@@ -5443,7 +5449,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,

	} else if (mhop_num == 2) {
		/* alloc L1 BT and buf */
		for (i = 0; i < bt_chk_sz / 8; i++) {
		for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
			eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
							  &(eq->l1_dma[i]),
							  GFP_KERNEL);
@@ -5451,8 +5457,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
				goto err_dma_alloc_l1;
			*(eq->bt_l0 + i) = eq->l1_dma[i];

			for (j = 0; j < bt_chk_sz / 8; j++) {
				idx = i * bt_chk_sz / 8 + j;
			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
				idx = i * bt_chk_sz / BA_BYTE_LEN + j;
				if (eq_buf_cnt + 1 < ba_num) {
					size = buf_chk_sz;
				} else {
@@ -5497,8 +5503,8 @@ err_dma_alloc_l1:
		dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
				  eq->l1_dma[i]);

		for (j = 0; j < bt_chk_sz / 8; j++) {
			idx = i * bt_chk_sz / 8 + j;
		for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
			idx = i * bt_chk_sz / BA_BYTE_LEN + j;
			dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
					  eq->buf_dma[idx]);
		}
@@ -5521,11 +5527,11 @@ err_dma_alloc_buf:
			dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
					  eq->l1_dma[i]);

			for (j = 0; j < bt_chk_sz / 8; j++) {
			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
				if (i == record_i && j >= record_j)
					break;

				idx = i * bt_chk_sz / 8 + j;
				idx = i * bt_chk_sz / BA_BYTE_LEN + j;
				dma_free_coherent(dev, buf_chk_sz,
						  eq->buf[idx],
						  eq->buf_dma[idx]);
@@ -5982,7 +5988,7 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que)
	bit_num = ffs(idx_que->bitmap[i]);
	idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));

	return i * sizeof(u64) * 8 + (bit_num - 1);
	return i * BITS_PER_LONG_LONG + (bit_num - 1);
}

static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
@@ -6058,7 +6064,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
		 */
		wmb();

		srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn;
		srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
				(srq->srqn & V2_DB_BYTE_4_TAG_M);
		srq_db.parameter = srq->head;

		hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
Loading