Commit 31660a97 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-hns3-updates-for-next'

Huazhong Tan says:

====================
net: hns3: updates for -next

There are some optimizations related to IO path.

Change since V1:
- fixes a unsuitable handling in hns3_lb_clear_tx_ring() of #6 which
  pointed out by Saeed Mahameed.

previous version:
V1: https://patchwork.ozlabs.org/project/netdev/cover/1600085217-26245-1-git-send-email-tanhuazhong@huawei.com/


====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b948577b 619ae331
Loading
Loading
Loading
Loading
+131 −94
Original line number Diff line number Diff line
@@ -1383,6 +1383,27 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
	return bd_num;
}

static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
			     bool doorbell)
{
	ring->pending_buf += num;

	if (!doorbell) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.tx_more++;
		u64_stats_update_end(&ring->syncp);
		return;
	}

	if (!ring->pending_buf)
		return;

	writel(ring->pending_buf,
	       ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
	ring->pending_buf = 0;
	WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}

netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -1391,11 +1412,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
	int pre_ntu, next_to_use_head;
	struct sk_buff *frag_skb;
	int bd_num = 0;
	bool doorbell;
	int ret;

	/* Hardware can only handle short frames above 32 bytes */
	if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
	if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
		hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
		return NETDEV_TX_OK;
	}

	/* Prefetch the data used later */
	prefetch(skb->data);
@@ -1406,6 +1430,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_busy++;
			u64_stats_update_end(&ring->syncp);
			hns3_tx_doorbell(ring, 0, true);
			return NETDEV_TX_BUSY;
		} else if (ret == -ENOMEM) {
			u64_stats_update_begin(&ring->syncp);
@@ -1446,11 +1471,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)

	/* Complete translate all packets */
	dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
	netdev_tx_sent_queue(dev_queue, skb->len);

	wmb(); /* Commit all data before submit */

	hnae3_queue_xmit(ring->tqp, bd_num);
	doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
					  netdev_xmit_more());
	hns3_tx_doorbell(ring, bd_num, doorbell);

	return NETDEV_TX_OK;

@@ -1459,6 +1482,7 @@ fill_err:

out_err_tx_ok:
	dev_kfree_skb_any(skb);
	hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
	return NETDEV_TX_OK;
}

@@ -1839,13 +1863,13 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
		    tx_ring->next_to_clean, napi->state);

	netdev_info(ndev,
		    "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
		    "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
		    tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
		    tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
		    tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);

	netdev_info(ndev,
		    "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
		    tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
		    "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
		    tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
		    tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);

	/* When mac received many pause frames continuous, it's unable to send
@@ -2302,17 +2326,19 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
	cb->buf  = page_address(p);
	cb->length = hns3_page_size(ring);
	cb->type = DESC_TYPE_PAGE;
	page_ref_add(p, USHRT_MAX - 1);
	cb->pagecnt_bias = USHRT_MAX;

	return 0;
}

static void hns3_free_buffer(struct hns3_enet_ring *ring,
			     struct hns3_desc_cb *cb)
			     struct hns3_desc_cb *cb, int budget)
{
	if (cb->type == DESC_TYPE_SKB)
		dev_kfree_skb_any((struct sk_buff *)cb->priv);
	else if (!HNAE3_IS_TX_RING(ring))
		put_page((struct page *)cb->priv);
		napi_consume_skb(cb->priv, budget);
	else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
		__page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
	memset(cb, 0, sizeof(*cb));
}

@@ -2344,7 +2370,8 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
	ring->desc[i].addr = 0;
}

static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
				    int budget)
{
	struct hns3_desc_cb *cb = &ring->desc_cb[i];

@@ -2352,7 +2379,7 @@ static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
		return;

	hns3_buffer_detach(ring, i);
	hns3_free_buffer(ring, cb);
	hns3_free_buffer(ring, cb, budget);
}

static void hns3_free_buffers(struct hns3_enet_ring *ring)
@@ -2360,7 +2387,7 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
	int i;

	for (i = 0; i < ring->desc_num; i++)
		hns3_free_buffer_detach(ring, i);
		hns3_free_buffer_detach(ring, i, 0);
}

/* free desc along with its attached buffer */
@@ -2405,7 +2432,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
	return 0;

out_with_buf:
	hns3_free_buffer(ring, cb);
	hns3_free_buffer(ring, cb, 0);
out:
	return ret;
}
@@ -2437,7 +2464,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)

out_buffer_fail:
	for (j = i - 1; j >= 0; j--)
		hns3_free_buffer_detach(ring, j);
		hns3_free_buffer_detach(ring, j, 0);
	return ret;
}

@@ -2464,71 +2491,62 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
			DMA_FROM_DEVICE);
}

static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
				  int *bytes, int *pkts)
static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
				  int *bytes, int *pkts, int budget)
{
	/* pair with ring->last_to_use update in hns3_tx_doorbell(),
	 * smp_store_release() is not used in hns3_tx_doorbell() because
	 * the doorbell operation already have the needed barrier operation.
	 */
	int ltu = smp_load_acquire(&ring->last_to_use);
	int ntc = ring->next_to_clean;
	struct hns3_desc_cb *desc_cb;
	bool reclaimed = false;
	struct hns3_desc *desc;

	while (ltu != ntc) {
		desc = &ring->desc[ntc];

		if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
				BIT(HNS3_TXD_VLD_B))
			break;

	while (head != ntc) {
		desc_cb = &ring->desc_cb[ntc];
		(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
		(*bytes) += desc_cb->length;
		/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
		hns3_free_buffer_detach(ring, ntc);
		hns3_free_buffer_detach(ring, ntc, budget);

		if (++ntc == ring->desc_num)
			ntc = 0;

		/* Issue prefetch for next Tx descriptor */
		prefetch(&ring->desc_cb[ntc]);
		reclaimed = true;
	}

	if (unlikely(!reclaimed))
		return false;

	/* This smp_store_release() pairs with smp_load_acquire() in
	 * ring_space called by hns3_nic_net_xmit.
	 */
	smp_store_release(&ring->next_to_clean, ntc);
	return true;
}

static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
{
	int u = ring->next_to_use;
	int c = ring->next_to_clean;

	if (unlikely(h > ring->desc_num))
		return 0;

	return u > c ? (h > c && h <= u) : (h > c || h <= u);
}

void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
{
	struct net_device *netdev = ring_to_netdev(ring);
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct netdev_queue *dev_queue;
	int bytes, pkts;
	int head;

	head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);

	if (is_ring_empty(ring) || head == ring->next_to_clean)
		return; /* no data to poll */

	rmb(); /* Make sure head is ready before touch any data */

	if (unlikely(!is_valid_clean_head(ring, head))) {
		hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head,
			    ring->next_to_use, ring->next_to_clean);

		u64_stats_update_begin(&ring->syncp);
		ring->stats.io_err_cnt++;
		u64_stats_update_end(&ring->syncp);
		return;
	}

	bytes = 0;
	pkts = 0;
	hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);

	if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
		return;

	ring->tqp_vector->tx_group.total_bytes += bytes;
	ring->tqp_vector->tx_group.total_packets += pkts;
@@ -2600,8 +2618,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
		ring_ptr_move_fw(ring, next_to_use);
	}

	wmb(); /* Make all data has been write before submit */
	writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
	writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}

static bool hns3_page_is_reusable(struct page *page)
@@ -2610,6 +2627,11 @@ static bool hns3_page_is_reusable(struct page *page)
		!page_is_pfmemalloc(page);
}

static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
	return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
}

static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
				struct hns3_enet_ring *ring, int pull_len,
				struct hns3_desc_cb *desc_cb)
@@ -2618,6 +2640,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	int size = le16_to_cpu(desc->rx.size);
	u32 truesize = hns3_buf_size(ring);

	desc_cb->pagecnt_bias--;
	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
			size - pull_len, truesize);

@@ -2625,20 +2648,27 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	 * when page_offset rollback to zero, flag default unreuse
	 */
	if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
	    (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
	    (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
		__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
		return;
	}

	/* Move offset up to the next cache line */
	desc_cb->page_offset += truesize;

	if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
		desc_cb->reuse_flag = 1;
		/* Bump ref count on page before it is given */
		get_page(desc_cb->priv);
	} else if (page_count(desc_cb->priv) == 1) {
	} else if (hns3_can_reuse_page(desc_cb)) {
		desc_cb->reuse_flag = 1;
		desc_cb->page_offset = 0;
		get_page(desc_cb->priv);
	} else if (desc_cb->pagecnt_bias) {
		__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
		return;
	}

	if (unlikely(!desc_cb->pagecnt_bias)) {
		page_ref_add(desc_cb->priv, USHRT_MAX);
		desc_cb->pagecnt_bias = USHRT_MAX;
	}
}

@@ -2814,6 +2844,16 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
	}
}

static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{
	ring->desc[ring->next_to_clean].rx.bd_base_info &=
		cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
	ring->next_to_clean += 1;

	if (unlikely(ring->next_to_clean == ring->desc_num))
		ring->next_to_clean = 0;
}

static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
			  unsigned char *va)
{
@@ -2846,9 +2886,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
		if (likely(hns3_page_is_reusable(desc_cb->priv)))
			desc_cb->reuse_flag = 1;
		else /* This page cannot be reused so discard it */
			put_page(desc_cb->priv);
			__page_frag_cache_drain(desc_cb->priv,
						desc_cb->pagecnt_bias);

		ring_ptr_move_fw(ring, next_to_clean);
		hns3_rx_ring_move_fw(ring);
		return 0;
	}
	u64_stats_update_begin(&ring->syncp);
@@ -2859,7 +2900,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
	__skb_put(skb, ring->pull_len);
	hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
			    desc_cb);
	ring_ptr_move_fw(ring, next_to_clean);
	hns3_rx_ring_move_fw(ring);

	return 0;
}
@@ -2914,7 +2955,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)

		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
		trace_hns3_rx_desc(ring);
		ring_ptr_move_fw(ring, next_to_clean);
		hns3_rx_ring_move_fw(ring);
		ring->pending_buf++;
	} while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));

@@ -3056,32 +3097,32 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)

	prefetch(desc);

	length = le16_to_cpu(desc->rx.size);
	if (!skb) {
		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);

		/* Check valid BD */
		if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
			return -ENXIO;

	if (!skb) {
		dma_rmb();
		length = le16_to_cpu(desc->rx.size);

		ring->va = desc_cb->buf + desc_cb->page_offset;

		dma_sync_single_for_cpu(ring_to_dev(ring),
				desc_cb->dma + desc_cb->page_offset,
				hns3_buf_size(ring),
				DMA_FROM_DEVICE);
	}

	/* Prefetch first cache line of first page
	 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
	 * line size is 64B so need to prefetch twice to make it 128B. But in
	 * actual we can have greater size of caches with 128B Level 1 cache
	 * lines. In such a case, single fetch would suffice to cache in the
	 * relevant part of the header.
		/* Prefetch first cache line of first page.
		 * Idea is to cache few bytes of the header of the packet.
		 * Our L1 Cache line size is 64B so need to prefetch twice to make
		 * it 128B. But in actual we can have greater size of caches with
		 * 128B Level 1 cache lines. In such a case, single fetch would
		 * suffice to cache in the relevant part of the header.
		 */
		net_prefetch(ring->va);

	if (!skb) {
		ret = hns3_alloc_skb(ring, length, ring->va);
		skb = ring->skb;

@@ -3121,19 +3162,11 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
	int unused_count = hns3_desc_unused(ring);
	int recv_pkts = 0;
	int recv_bds = 0;
	int err, num;
	int err;

	num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
	num -= unused_count;
	unused_count -= ring->pending_buf;

	if (num <= 0)
		goto out;

	rmb(); /* Make sure num taken effect before the other data is touched */

	while (recv_pkts < budget && recv_bds < num) {
	while (recv_pkts < budget) {
		/* Reuse or realloc buffers */
		if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
			hns3_nic_alloc_rx_buffers(ring, unused_count);
@@ -3151,7 +3184,6 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
			recv_pkts++;
		}

		recv_bds += ring->pending_buf;
		unused_count += ring->pending_buf;
		ring->skb = NULL;
		ring->pending_buf = 0;
@@ -3320,7 +3352,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
	 * budget and be more aggressive about cleaning up the Tx descriptors.
	 */
	hns3_for_each_ring(ring, tqp_vector->tx_group)
		hns3_clean_tx_ring(ring);
		hns3_clean_tx_ring(ring, budget);

	/* make sure rx ring budget not smaller than 1 */
	if (tqp_vector->num_tqps > 1)
@@ -3673,6 +3705,7 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
	ring->desc_num = desc_num;
	ring->next_to_use = 0;
	ring->next_to_clean = 0;
	ring->last_to_use = 0;
}

static void hns3_queue_to_ring(struct hnae3_queue *tqp,
@@ -3752,6 +3785,7 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
	ring->desc_cb = NULL;
	ring->next_to_clean = 0;
	ring->next_to_use = 0;
	ring->last_to_use = 0;
	ring->pending_buf = 0;
	if (ring->skb) {
		dev_kfree_skb_any(ring->skb);
@@ -4162,9 +4196,11 @@ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
	while (ring->next_to_clean != ring->next_to_use) {
		ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
		hns3_free_buffer_detach(ring, ring->next_to_clean);
		hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
		ring_ptr_move_fw(ring, next_to_clean);
	}

	ring->pending_buf = 0;
}

static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
@@ -4267,6 +4303,7 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
		hns3_clear_tx_ring(&priv->ring[i]);
		priv->ring[i].next_to_clean = 0;
		priv->ring[i].next_to_use = 0;
		priv->ring[i].last_to_use = 0;

		rx_ring = &priv->ring[i + h->kinfo.num_tqps];
		hns3_init_ring_hw(rx_ring);
+7 −13
Original line number Diff line number Diff line
@@ -287,6 +287,7 @@ struct hns3_desc_cb {

	/* desc type, used by the ring user to mark the type of the priv data */
	u16 type;
	u16 pagecnt_bias;
};

enum hns3_pkt_l3type {
@@ -343,14 +344,13 @@ enum hns3_pkt_ol4type {
};

struct ring_stats {
	u64 io_err_cnt;
	u64 sw_err_cnt;
	u64 seg_pkt_cnt;
	union {
		struct {
			u64 tx_pkts;
			u64 tx_bytes;
			u64 tx_err_cnt;
			u64 tx_more;
			u64 restart_queue;
			u64 tx_busy;
			u64 tx_copy;
@@ -396,8 +396,10 @@ struct hns3_enet_ring {
	 * next_to_use
	 */
	int next_to_clean;

	u32 pull_len; /* head length for current packet */
	union {
		int last_to_use;	/* last idx used by xmit */
		u32 pull_len;		/* memcpy len for current rx packet */
	};
	u32 frag_num;
	void *va; /* first buffer address for current packet */

@@ -512,11 +514,6 @@ static inline int ring_space(struct hns3_enet_ring *ring)
			(begin - end)) - 1;
}

static inline int is_ring_empty(struct hns3_enet_ring *ring)
{
	return ring->next_to_use == ring->next_to_clean;
}

static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{
	return readl(base + reg);
@@ -542,9 +539,6 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_write_dev(a, reg, value) \
	hns3_write_reg((a)->io_base, (reg), (value))

#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
		(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)

#define ring_to_dev(ring) ((ring)->dev)

#define ring_to_netdev(ring)	((ring)->tqp_vector->napi.dev)
@@ -582,7 +576,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
		      struct ethtool_channels *ch);

void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
+2 −4
Original line number Diff line number Diff line
@@ -27,12 +27,11 @@ struct hns3_sfp_type {

static const struct hns3_stats hns3_txq_stats[] = {
	/* Tx per-queue statistics */
	HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
	HNS3_TQP_STAT("dropped", sw_err_cnt),
	HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
	HNS3_TQP_STAT("packets", tx_pkts),
	HNS3_TQP_STAT("bytes", tx_bytes),
	HNS3_TQP_STAT("errors", tx_err_cnt),
	HNS3_TQP_STAT("more", tx_more),
	HNS3_TQP_STAT("wake", restart_queue),
	HNS3_TQP_STAT("busy", tx_busy),
	HNS3_TQP_STAT("copy", tx_copy),
@@ -46,7 +45,6 @@ static const struct hns3_stats hns3_txq_stats[] = {

static const struct hns3_stats hns3_rxq_stats[] = {
	/* Rx per-queue statistics */
	HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
	HNS3_TQP_STAT("dropped", sw_err_cnt),
	HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
	HNS3_TQP_STAT("packets", rx_pkts),
@@ -232,7 +230,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
	for (i = start_ringid; i <= end_ringid; i++) {
		struct hns3_enet_ring *ring = &priv->ring[i];

		hns3_clean_tx_ring(ring);
		hns3_clean_tx_ring(ring, 0);
	}
}