Commit fcd71efd authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'hns3-next'



Huazhong Tan says:

====================
net: hns3: some code optimizations & cleanups & bugfixes

[patch 01/12] fixes a TX timeout issue.

[patch 02/12 - 04/12] adds some patch related to TM module.

[patch 05/12] fixes a compile warning.

[patch 06/12] adds Asym Pause support for autoneg

[patch 07/12] optimizes the error handler for VF reset.

[patch 08/12] deals with the empty interrupt case.

[patch 09/12 - 12/12] adds some cleanups & optimizations.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5b18c705 82c8ae6e
Loading
Loading
Loading
Loading
+27 −39
Original line number Diff line number Diff line
@@ -28,8 +28,7 @@
#define hns3_set_field(origin, shift, val)	((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S)	DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)

static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_ring(struct hnae3_handle *h);
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);

static const char hns3_driver_name[] = "hns3";
@@ -463,6 +462,20 @@ static int hns3_nic_net_open(struct net_device *netdev)
	return 0;
}

static void hns3_reset_tx_queue(struct hnae3_handle *h)
{
	struct net_device *ndev = h->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(ndev);
	struct netdev_queue *dev_queue;
	u32 i;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
		dev_queue = netdev_get_tx_queue(ndev,
						priv->ring_data[i].queue_index);
		netdev_tx_reset_queue(dev_queue);
	}
}

static void hns3_nic_net_down(struct net_device *netdev)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -493,7 +506,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
	 * to disable the ring through firmware when downing the netdev.
	 */
	if (!hns3_nic_resetting(netdev))
		hns3_clear_all_ring(priv->ae_handle);
		hns3_clear_all_ring(priv->ae_handle, false);

	hns3_reset_tx_queue(priv->ae_handle);
}

static int hns3_nic_net_stop(struct net_device *netdev)
@@ -1475,12 +1490,10 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
			start = u64_stats_fetch_begin_irq(&ring->syncp);
			rx_bytes += ring->stats.rx_bytes;
			rx_pkts += ring->stats.rx_pkts;
			rx_drop += ring->stats.non_vld_descs;
			rx_drop += ring->stats.l2_err;
			rx_errors += ring->stats.non_vld_descs;
			rx_errors += ring->stats.l2_err;
			rx_errors += ring->stats.l3l4_csum_err;
			rx_crc_errors += ring->stats.l2_err;
			rx_crc_errors += ring->stats.l3l4_csum_err;
			rx_multicast += ring->stats.rx_multicast;
			rx_length_errors += ring->stats.err_pkt_len;
		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
@@ -2754,14 +2767,6 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
					       vlan_tag);
	}

	if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.non_vld_descs++;
		u64_stats_update_end(&ring->syncp);

		return -EINVAL;
	}

	if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
				  BIT(HNS3_RXD_L2E_B))))) {
		u64_stats_update_begin(&ring->syncp);
@@ -3921,7 +3926,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)

	hns3_del_all_fd_rules(netdev, true);

	hns3_force_clear_all_ring(handle);
	hns3_clear_all_ring(handle, true);

	hns3_nic_uninit_vector_data(priv);

@@ -4090,42 +4095,25 @@ static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
	}
}

static void hns3_force_clear_all_ring(struct hnae3_handle *h)
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
{
	struct net_device *ndev = h->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(ndev);
	struct hns3_enet_ring *ring;
	u32 i;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
		ring = priv->ring_data[i].ring;
		hns3_clear_tx_ring(ring);

		ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
		hns3_force_clear_rx_ring(ring);
	}
}

static void hns3_clear_all_ring(struct hnae3_handle *h)
{
	struct net_device *ndev = h->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(ndev);
	u32 i;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
		struct netdev_queue *dev_queue;
		struct hns3_enet_ring *ring;

		ring = priv->ring_data[i].ring;
		hns3_clear_tx_ring(ring);
		dev_queue = netdev_get_tx_queue(ndev,
						priv->ring_data[i].queue_index);
		netdev_tx_reset_queue(dev_queue);

		ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
		/* Continue to clear other rings even if clearing some
		 * rings failed.
		 */
		if (force)
			hns3_force_clear_rx_ring(ring);
		else
			hns3_clear_rx_ring(ring);
	}
}
@@ -4331,8 +4319,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
		return 0;
	}

	hns3_clear_all_ring(handle);
	hns3_force_clear_all_ring(handle);
	hns3_clear_all_ring(handle, true);
	hns3_reset_tx_queue(priv->ae_handle);

	hns3_nic_uninit_vector_data(priv);

+0 −20
Original line number Diff line number Diff line
@@ -384,7 +384,6 @@ struct ring_stats {
			u64 rx_err_cnt;
			u64 reuse_pg_cnt;
			u64 err_pkt_len;
			u64 non_vld_descs;
			u64 err_bd_num;
			u64 l2_err;
			u64 l3l4_csum_err;
@@ -446,25 +445,6 @@ enum hns3_flow_level_range {
	HNS3_FLOW_ULTRA = 3,
};

enum hns3_link_mode_bits {
	HNS3_LM_FIBRE_BIT = BIT(0),
	HNS3_LM_AUTONEG_BIT = BIT(1),
	HNS3_LM_TP_BIT = BIT(2),
	HNS3_LM_PAUSE_BIT = BIT(3),
	HNS3_LM_BACKPLANE_BIT = BIT(4),
	HNS3_LM_10BASET_HALF_BIT = BIT(5),
	HNS3_LM_10BASET_FULL_BIT = BIT(6),
	HNS3_LM_100BASET_HALF_BIT = BIT(7),
	HNS3_LM_100BASET_FULL_BIT = BIT(8),
	HNS3_LM_1000BASET_FULL_BIT = BIT(9),
	HNS3_LM_10000BASEKR_FULL_BIT = BIT(10),
	HNS3_LM_25000BASEKR_FULL_BIT = BIT(11),
	HNS3_LM_40000BASELR4_FULL_BIT = BIT(12),
	HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13),
	HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14),
	HNS3_LM_COUNT = 15
};

#define HNS3_INT_GL_MAX			0x1FE0
#define HNS3_INT_GL_50K			0x0014
#define HNS3_INT_GL_20K			0x0032
+0 −1
Original line number Diff line number Diff line
@@ -44,7 +44,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
	HNS3_TQP_STAT("errors", rx_err_cnt),
	HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
	HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
	HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
	HNS3_TQP_STAT("err_bd_num", err_bd_num),
	HNS3_TQP_STAT("l2_err", l2_err),
	HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
+12 −3
Original line number Diff line number Diff line
@@ -232,6 +232,7 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
	struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
	struct hclge_cmq_ring *csq = &hw->cmq.csq;
	struct hclge_desc *desc_to_use;
	bool complete = false;
	u32 timeout = 0;
@@ -241,8 +242,16 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)

	spin_lock_bh(&hw->cmq.csq.lock);

	if (num > hclge_ring_space(&hw->cmq.csq) ||
	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
	if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
		spin_unlock_bh(&hw->cmq.csq.lock);
		return -EBUSY;
	}

	if (num > hclge_ring_space(&hw->cmq.csq)) {
		/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
		 * need update the SW HEAD pointer csq->next_to_clean
		 */
		csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
		spin_unlock_bh(&hw->cmq.csq.lock);
		return -EBUSY;
	}
@@ -280,7 +289,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
	}

	if (!complete) {
		retval = -EAGAIN;
		retval = -EBADE;
	} else {
		retval = hclge_cmd_check_retval(hw, desc, num, ntc);
	}
+1 −1
Original line number Diff line number Diff line
@@ -884,7 +884,7 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_TOTAL_PKT_BUF		0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV		0xA000	 /* 40k byte */
#define HCLGE_DEFAULT_NON_DCB_DV	0x7800	/* 30K byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF	0x200	/* 512 byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF	0x1400	/* 5120 byte */

#define HCLGE_TYPE_CRQ			0
#define HCLGE_TYPE_CSQ			1
Loading