Commit 751e4251 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net_prefetch-API'

Tariq Toukan says:

====================
net_prefetch API

This patchset adds a common net API for L1 cacheline size-aware prefetch.

Patch 1 introduces the common API in net and aligns the drivers to use it.
Patches 2 and 3 add usage in mlx4 and mlx5 Eth drivers.

Series generated against net-next commit:
079f921e Merge tag 'batadv-next-for-davem-20200824' of git://git.open-mesh.org/linux-merge


====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 04e006b4 aed4d4c6
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -2372,10 +2372,7 @@ no_mem:
			if (fl->use_pages) {
				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;

				prefetch(addr);
#if L1_CACHE_BYTES < 128
				prefetch(addr + L1_CACHE_BYTES);
#endif
				net_prefetch(addr);
				__refill_fl(adap, fl);
				if (lro > 0) {
					lro_add_page(adap, qs, fl,
+1 −4
Original line number Diff line number Diff line
@@ -557,10 +557,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
	va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;

	/* prefetch first cache line of first page */
	prefetch(va);
#if L1_CACHE_BYTES < 128
	prefetch(va + L1_CACHE_BYTES);
#endif
	net_prefetch(va);

	skb = *out_skb = napi_alloc_skb(&ring_data->napi,
					HNS_RX_HEAD_SIZE);
+1 −4
Original line number Diff line number Diff line
@@ -3091,10 +3091,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
	 * lines. In such a case, single fetch would suffice to cache in the
	 * relevant part of the header.
	 */
	prefetch(ring->va);
#if L1_CACHE_BYTES < 128
	prefetch(ring->va + L1_CACHE_BYTES);
#endif
	net_prefetch(ring->va);

	if (!skb) {
		ret = hns3_alloc_skb(ring, length, ring->va);
+1 −4
Original line number Diff line number Diff line
@@ -310,10 +310,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
				  rx_buffer->page_offset;

		/* prefetch first cache line of first page */
		prefetch(page_addr);
#if L1_CACHE_BYTES < 128
		prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
#endif
		net_prefetch(page_addr);

		/* allocate a skb to store the frags */
		skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+4 −8
Original line number Diff line number Diff line
@@ -1992,10 +1992,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
	struct sk_buff *skb;

	/* prefetch first cache line of first page */
	prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
	prefetch(xdp->data + L1_CACHE_BYTES);
#endif
	net_prefetch(xdp->data);

	/* Note, we get here by enabling legacy-rx via:
	 *
	 *    ethtool --set-priv-flags <dev> legacy-rx on
@@ -2078,10 +2076,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
	 * likely have a consumer accessing first few bytes of meta
	 * data, and then actual data.
	 */
	prefetch(xdp->data_meta);
#if L1_CACHE_BYTES < 128
	prefetch(xdp->data_meta + L1_CACHE_BYTES);
#endif
	net_prefetch(xdp->data_meta);

	/* build an skb around the page buffer */
	skb = build_skb(xdp->data_hard_start, truesize);
	if (unlikely(!skb))
Loading