Commit aeda9bf8 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller
Browse files

net: hns3: batch the page reference count updates



Batch the page reference count updates instead of doing them
one at a time. By doing this we can improve the overall receive
performance by avoid some atomic increment operations when the
rx page is reused.

Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b948577b
Loading
Loading
Loading
Loading
+24 −8
Original line number Diff line number Diff line
@@ -2302,6 +2302,8 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
	cb->buf  = page_address(p);
	cb->length = hns3_page_size(ring);
	cb->type = DESC_TYPE_PAGE;
	page_ref_add(p, USHRT_MAX - 1);
	cb->pagecnt_bias = USHRT_MAX;

	return 0;
}
@@ -2311,8 +2313,8 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring,
{
	if (cb->type == DESC_TYPE_SKB)
		dev_kfree_skb_any((struct sk_buff *)cb->priv);
	else if (!HNAE3_IS_TX_RING(ring))
		put_page((struct page *)cb->priv);
	else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
		__page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
	memset(cb, 0, sizeof(*cb));
}

@@ -2610,6 +2612,11 @@ static bool hns3_page_is_reusable(struct page *page)
		!page_is_pfmemalloc(page);
}

static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
	return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
}

static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
				struct hns3_enet_ring *ring, int pull_len,
				struct hns3_desc_cb *desc_cb)
@@ -2618,6 +2625,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	int size = le16_to_cpu(desc->rx.size);
	u32 truesize = hns3_buf_size(ring);

	desc_cb->pagecnt_bias--;
	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
			size - pull_len, truesize);

@@ -2625,20 +2633,27 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	 * when page_offset rollback to zero, flag default unreuse
	 */
	if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
	    (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
	    (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
		__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
		return;
	}

	/* Move offset up to the next cache line */
	desc_cb->page_offset += truesize;

	if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
		desc_cb->reuse_flag = 1;
		/* Bump ref count on page before it is given */
		get_page(desc_cb->priv);
	} else if (page_count(desc_cb->priv) == 1) {
	} else if (hns3_can_reuse_page(desc_cb)) {
		desc_cb->reuse_flag = 1;
		desc_cb->page_offset = 0;
		get_page(desc_cb->priv);
	} else if (desc_cb->pagecnt_bias) {
		__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
		return;
	}

	if (unlikely(!desc_cb->pagecnt_bias)) {
		page_ref_add(desc_cb->priv, USHRT_MAX);
		desc_cb->pagecnt_bias = USHRT_MAX;
	}
}

@@ -2846,7 +2861,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
		if (likely(hns3_page_is_reusable(desc_cb->priv)))
			desc_cb->reuse_flag = 1;
		else /* This page cannot be reused so discard it */
			put_page(desc_cb->priv);
			__page_frag_cache_drain(desc_cb->priv,
						desc_cb->pagecnt_bias);

		ring_ptr_move_fw(ring, next_to_clean);
		return 0;
+1 −0
Original line number Diff line number Diff line
@@ -287,6 +287,7 @@ struct hns3_desc_cb {

	/* desc type, used by the ring user to mark the type of the priv data */
	u16 type;
	u16 pagecnt_bias;
};

enum hns3_pkt_l3type {