Commit 9c89cc9b authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'net-hns3-misc-updates-for-next'

Huazhong Tan says:

====================
net: hns3: misc updates for -next

This series includes some misc updates for the HNS3 ethernet driver.

 #1 adds support for 1280 queues
 #2 adds mapping for BAR45 which is needed by RoCE client.
 #3 extend the interrupt resources.
 #4&#5 add support to query firmware's calculated shaping parameters.
====================

Link: https://lore.kernel.org/r/1605863783-36995-1-git-send-email-tanhuazhong@huawei.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 16de5970 c331ecf1
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -689,6 +689,7 @@ struct hnae3_knic_private_info {
struct hnae3_roce_private_info {
	struct net_device *netdev;
	void __iomem *roce_io_base;
	void __iomem *roce_mem_base;
	int base_vector;
	int num_vectors;

+0 −3
Original line number Diff line number Diff line
@@ -3645,8 +3645,6 @@ map_ring_fail:

static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
#define HNS3_VECTOR_PF_MAX_NUM		64

	struct hnae3_handle *h = priv->ae_handle;
	struct hns3_enet_tqp_vector *tqp_vector;
	struct hnae3_vector_info *vector;
@@ -3659,7 +3657,6 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
	/* RSS size, cpu online and vector_num should be the same */
	/* Should consider 2p/4p later */
	vector_num = min_t(u16, num_online_cpus(), tqp_num);
	vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);

	vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
			      GFP_KERNEL);
+13 −10
Original line number Diff line number Diff line
@@ -307,6 +307,9 @@ enum hclge_opcode_type {
#define HCLGE_TQP_REG_OFFSET		0x80000
#define HCLGE_TQP_REG_SIZE		0x200

#define HCLGE_TQP_MAX_SIZE_DEV_V2	1024
#define HCLGE_TQP_EXT_REG_OFFSET	0x100

#define HCLGE_RCB_INIT_QUERY_TIMEOUT	10
#define HCLGE_RCB_INIT_FLAG_EN_B	0
#define HCLGE_RCB_INIT_FLAG_FINI_B	8
@@ -336,7 +339,9 @@ enum hclge_int_type {
};

struct hclge_ctrl_vector_chain_cmd {
	u8 int_vector_id;
#define HCLGE_VECTOR_ID_L_S	0
#define HCLGE_VECTOR_ID_L_M	GENMASK(7, 0)
	u8 int_vector_id_l;
	u8 int_cause_num;
#define HCLGE_INT_TYPE_S	0
#define HCLGE_INT_TYPE_M	GENMASK(1, 0)
@@ -346,7 +351,9 @@ struct hclge_ctrl_vector_chain_cmd {
#define HCLGE_INT_GL_IDX_M	GENMASK(14, 13)
	__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
	u8 vfid;
	u8 rsv;
#define HCLGE_VECTOR_ID_H_S	8
#define HCLGE_VECTOR_ID_H_M	GENMASK(15, 8)
	u8 int_vector_id_h;
};

#define HCLGE_MAX_TC_NUM		8
@@ -470,16 +477,13 @@ struct hclge_pf_res_cmd {
	__le16 tqp_num;
	__le16 buf_size;
	__le16 msixcap_localid_ba_nic;
	__le16 msixcap_localid_ba_rocee;
#define HCLGE_MSIX_OFT_ROCEE_S		0
#define HCLGE_MSIX_OFT_ROCEE_M		GENMASK(15, 0)
#define HCLGE_PF_VEC_NUM_S		0
#define HCLGE_PF_VEC_NUM_M		GENMASK(7, 0)
	__le16 pf_intr_vector_number;
	__le16 msixcap_localid_number_nic;
	__le16 pf_intr_vector_number_roce;
	__le16 pf_own_fun_number;
	__le16 tx_buf_size;
	__le16 dv_buf_size;
	__le32 rsv[2];
	__le16 ext_tqp_num;
	u8 rsv[6];
};

#define HCLGE_CFG_OFFSET_S	0
@@ -643,7 +647,6 @@ struct hclge_config_mac_speed_dup_cmd {
	u8 rsv[22];
};

#define HCLGE_RING_ID_MASK		GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B		0

#define HCLGE_MAC_CFG_AN_EN_B		0
+42 −6
Original line number Diff line number Diff line
@@ -498,6 +498,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
	dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
	dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
		 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
	dev_info(&hdev->pdev->dev, "PG_P flag: %#x\n", pg_shap_cfg_cmd->flag);
	dev_info(&hdev->pdev->dev, "PG_P pg_rate: %u(Mbps)\n",
		 le32_to_cpu(pg_shap_cfg_cmd->pg_rate));

	cmd = HCLGE_OPC_TM_PORT_SHAPPING;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -508,6 +511,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
	port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
	dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
		 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
	dev_info(&hdev->pdev->dev, "PORT flag: %#x\n", port_shap_cfg_cmd->flag);
	dev_info(&hdev->pdev->dev, "PORT port_rate: %u(Mbps)\n",
		 le32_to_cpu(port_shap_cfg_cmd->port_rate));

	cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -655,6 +661,9 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
	dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
	dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
		 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
	dev_info(&hdev->pdev->dev, "PRI_C flag: %#x\n", shap_cfg_cmd->flag);
	dev_info(&hdev->pdev->dev, "PRI_C pri_rate: %u(Mbps)\n",
		 le32_to_cpu(shap_cfg_cmd->pri_rate));

	cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
	hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -666,6 +675,9 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
	dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
	dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
		 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
	dev_info(&hdev->pdev->dev, "PRI_P flag: %#x\n", shap_cfg_cmd->flag);
	dev_info(&hdev->pdev->dev, "PRI_P pri_rate: %u(Mbps)\n",
		 le32_to_cpu(shap_cfg_cmd->pri_rate));

	hclge_dbg_dump_tm_pg(hdev);

@@ -681,14 +693,17 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
{
	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
	struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
	struct hclge_qs_to_pri_link_cmd *map;
	struct hclge_tqp_tx_queue_tc_cmd *tc;
	enum hclge_opcode_type cmd;
	struct hclge_desc desc;
	int queue_id, group_id;
	u32 qset_mapping[32];
	int tc_id, qset_id;
	int pri_id, ret;
	u16 qs_id_l;
	u16 qs_id_h;
	u8 grp_num;
	u32 i;

	ret = kstrtouint(cmd_buf, 0, &queue_id);
@@ -701,7 +716,24 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		goto err_tm_map_cmd_send;
	qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF;
	qset_id = le16_to_cpu(nq_to_qs_map->qset_id);

	/* convert qset_id to the following format, drop the vld bit
	 *            | qs_id_h | vld | qs_id_l |
	 * qset_id:   | 15 ~ 11 |  10 |  9 ~ 0  |
	 *             \         \   /         /
	 *              \         \ /         /
	 * qset_id: | 15 | 14 ~ 10 |  9 ~ 0  |
	 */
	qs_id_l = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_L_MSK,
				  HCLGE_TM_QS_ID_L_S);
	qs_id_h = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
				  HCLGE_TM_QS_ID_H_EXT_S);
	qset_id = 0;
	hnae3_set_field(qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
			qs_id_l);
	hnae3_set_field(qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
			qs_id_h);

	cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
@@ -731,9 +763,11 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
		return;
	}

	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
	cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
	for (group_id = 0; group_id < 32; group_id++) {
	for (group_id = 0; group_id < grp_num; group_id++) {
		hclge_cmd_setup_basic_desc(&desc, cmd, true);
		bp_to_qs_map_cmd->tc_id = tc_id;
		bp_to_qs_map_cmd->qs_group_id = group_id;
@@ -748,7 +782,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
	dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");

	i = 0;
	for (group_id = 0; group_id < 4; group_id++) {
	for (group_id = 0; group_id < grp_num / 8; group_id++) {
		dev_info(&hdev->pdev->dev,
			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
			 group_id * 256, qset_mapping[(u32)(i + 7)],
@@ -1379,6 +1413,7 @@ static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
	u8 ir_u, ir_b, ir_s, bs_b, bs_s;
	struct hclge_desc desc;
	u32 shapping_para;
	u32 rate;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
@@ -1400,10 +1435,11 @@ static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
	ir_s = hclge_tm_get_field(shapping_para, IR_S);
	bs_b = hclge_tm_get_field(shapping_para, BS_B);
	bs_s = hclge_tm_get_field(shapping_para, BS_S);
	rate = le32_to_cpu(shap_cfg_cmd->qs_rate);

	dev_info(&hdev->pdev->dev,
		 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
		 qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
		 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u, flag:%#x, rate:%u(Mbps)\n",
		 qsid, ir_b, ir_u, ir_s, bs_b, bs_s, shap_cfg_cmd->flag, rate);
}

static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
+112 −48
Original line number Diff line number Diff line
@@ -556,7 +556,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
					   true);

		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
		desc[0].data[0] = cpu_to_le32(tqp->index);
		ret = hclge_cmd_send(&hdev->hw, desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
@@ -576,7 +576,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
					   HCLGE_OPC_QUERY_TX_STATS,
					   true);

		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
		desc[0].data[0] = cpu_to_le32(tqp->index);
		ret = hclge_cmd_send(&hdev->hw, desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
@@ -886,7 +886,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
	}

	req = (struct hclge_pf_res_cmd *)desc.data;
	hdev->num_tqps = le16_to_cpu(req->tqp_num);
	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
			 le16_to_cpu(req->ext_tqp_num);
	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;

	if (req->tx_buf_size)
@@ -905,35 +906,24 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)

	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);

	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
		dev_err(&hdev->pdev->dev,
			"only %u msi resources available, not enough for pf(min:2).\n",
			hdev->num_nic_msi);
		return -EINVAL;
	}

	if (hnae3_dev_roce_supported(hdev)) {
		hdev->roce_base_msix_offset =
		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
		hdev->num_roce_msi =
		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);

		/* nic's msix numbers is always equals to the roce's. */
		hdev->num_nic_msi = hdev->num_roce_msi;
			le16_to_cpu(req->pf_intr_vector_number_roce);

		/* PF should have NIC vectors and Roce vectors,
		 * NIC vectors are queued before Roce vectors.
		 */
		hdev->num_msi = hdev->num_roce_msi +
				hdev->roce_base_msix_offset;
		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
	} else {
		hdev->num_msi =
		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);

		hdev->num_nic_msi = hdev->num_msi;
	}

	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
		dev_err(&hdev->pdev->dev,
			"Just %u msi resources, not enough for pf(min:2).\n",
			hdev->num_nic_msi);
		return -EINVAL;
		hdev->num_msi = hdev->num_nic_msi;
	}

	return 0;
@@ -1598,8 +1588,20 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
		tqp->q.buf_size = hdev->rx_buf_len;
		tqp->q.tx_desc_num = hdev->num_tx_desc;
		tqp->q.rx_desc_num = hdev->num_rx_desc;
		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +

		/* need an extended offset to configure queues >=
		 * HCLGE_TQP_MAX_SIZE_DEV_V2
		 */
		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
			tqp->q.io_base = hdev->hw.io_base +
					 HCLGE_TQP_REG_OFFSET +
					 i * HCLGE_TQP_REG_SIZE;
		else
			tqp->q.io_base = hdev->hw.io_base +
					 HCLGE_TQP_REG_OFFSET +
					 HCLGE_TQP_EXT_REG_OFFSET +
					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
					 HCLGE_TQP_REG_SIZE;

		tqp++;
	}
@@ -2412,17 +2414,18 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
{
	struct hnae3_handle *roce = &vport->roce;
	struct hnae3_handle *nic = &vport->nic;
	struct hclge_dev *hdev = vport->back;

	roce->rinfo.num_vectors = vport->back->num_roce_msi;

	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
	    vport->back->num_msi_left == 0)
	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
		return -EINVAL;

	roce->rinfo.base_vector = vport->back->roce_base_vector;
	roce->rinfo.base_vector = hdev->roce_base_vector;

	roce->rinfo.netdev = nic->kinfo.netdev;
	roce->rinfo.roce_io_base = vport->back->hw.io_base;
	roce->rinfo.roce_io_base = hdev->hw.io_base;
	roce->rinfo.roce_mem_base = hdev->hw.mem_base;

	roce->pdev = nic->pdev;
	roce->ae_algo = nic->ae_algo;
@@ -2456,7 +2459,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)

	hdev->base_msi_vector = pdev->irq;
	hdev->roce_base_vector = hdev->base_msi_vector +
				hdev->roce_base_msix_offset;
				hdev->num_nic_msi;

	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
					   sizeof(u16), GFP_KERNEL);
@@ -4129,6 +4132,30 @@ struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
		return container_of(handle, struct hclge_vport, nic);
}

static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
				  struct hnae3_vector_info *vector_info)
{
#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64

	vector_info->vector = pci_irq_vector(hdev->pdev, idx);

	/* need an extend offset to config vector >= 64 */
	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
		vector_info->io_addr = hdev->hw.io_base +
				HCLGE_VECTOR_REG_BASE +
				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
	else
		vector_info->io_addr = hdev->hw.io_base +
				HCLGE_VECTOR_EXT_REG_BASE +
				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
				HCLGE_VECTOR_REG_OFFSET_H +
				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
				HCLGE_VECTOR_REG_OFFSET;

	hdev->vector_status[idx] = hdev->vport[0].vport_id;
	hdev->vector_irq[idx] = vector_info->vector;
}

static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
			    struct hnae3_vector_info *vector_info)
{
@@ -4136,23 +4163,16 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
	struct hnae3_vector_info *vector = vector_info;
	struct hclge_dev *hdev = vport->back;
	int alloc = 0;
	int i, j;
	u16 i = 0;
	u16 j;

	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
	vector_num = min(hdev->num_msi_left, vector_num);

	for (j = 0; j < vector_num; j++) {
		for (i = 1; i < hdev->num_msi; i++) {
		while (++i < hdev->num_nic_msi) {
			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
				vector->vector = pci_irq_vector(hdev->pdev, i);
				vector->io_addr = hdev->hw.io_base +
					HCLGE_VECTOR_REG_BASE +
					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
					vport->vport_id *
					HCLGE_VECTOR_VF_OFFSET;
				hdev->vector_status[i] = vport->vport_id;
				hdev->vector_irq[i] = vector->vector;

				hclge_get_vector_info(hdev, i, vector);
				vector++;
				alloc++;

@@ -4701,7 +4721,12 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,

	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
	hclge_cmd_setup_basic_desc(&desc, op, false);
	req->int_vector_id = vector_id;
	req->int_vector_id_l = hnae3_get_field(vector_id,
					       HCLGE_VECTOR_ID_L_M,
					       HCLGE_VECTOR_ID_L_S);
	req->int_vector_id_h = hnae3_get_field(vector_id,
					       HCLGE_VECTOR_ID_H_M,
					       HCLGE_VECTOR_ID_H_S);

	i = 0;
	for (node = ring_chain; node; node = node->next) {
@@ -4733,7 +4758,14 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
			hclge_cmd_setup_basic_desc(&desc,
						   op,
						   false);
			req->int_vector_id = vector_id;
			req->int_vector_id_l =
				hnae3_get_field(vector_id,
						HCLGE_VECTOR_ID_L_M,
						HCLGE_VECTOR_ID_L_S);
			req->int_vector_id_h =
				hnae3_get_field(vector_id,
						HCLGE_VECTOR_ID_H_M,
						HCLGE_VECTOR_ID_H_S);
		}
	}

@@ -6852,7 +6884,7 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
	req->tqp_id = cpu_to_le16(tqp_id);
	req->stream_id = cpu_to_le16(stream_id);
	if (enable)
		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
@@ -9314,7 +9346,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);

	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
	req->tqp_id = cpu_to_le16(queue_id);
	if (enable)
		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);

@@ -9337,7 +9369,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);

	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
	req->tqp_id = cpu_to_le16(queue_id);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
@@ -9877,6 +9909,28 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
	}
}

static int hclge_dev_mem_map(struct hclge_dev *hdev)
{
#define HCLGE_MEM_BAR		4

	struct pci_dev *pdev = hdev->pdev;
	struct hclge_hw *hw = &hdev->hw;

	/* for device does not have device memory, return directly */
	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
		return 0;

	hw->mem_base = devm_ioremap_wc(&pdev->dev,
				       pci_resource_start(pdev, HCLGE_MEM_BAR),
				       pci_resource_len(pdev, HCLGE_MEM_BAR));
	if (!hw->mem_base) {
		dev_err(&pdev->dev, "failed to map device memroy\n");
		return -EFAULT;
	}

	return 0;
}

static int hclge_pci_init(struct hclge_dev *hdev)
{
	struct pci_dev *pdev = hdev->pdev;
@@ -9915,9 +9969,16 @@ static int hclge_pci_init(struct hclge_dev *hdev)
		goto err_clr_master;
	}

	ret = hclge_dev_mem_map(hdev);
	if (ret)
		goto err_unmap_io_base;

	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);

	return 0;

err_unmap_io_base:
	pcim_iounmap(pdev, hdev->hw.io_base);
err_clr_master:
	pci_clear_master(pdev);
	pci_release_regions(pdev);
@@ -9931,6 +9992,9 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
	struct pci_dev *pdev = hdev->pdev;

	if (hdev->hw.mem_base)
		devm_iounmap(&pdev->dev, hdev->hw.mem_base);

	pcim_iounmap(pdev, hdev->hw.io_base);
	pci_free_irq_vectors(pdev);
	pci_clear_master(pdev);
Loading