Commit da43f0aa authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'mvneta-access-skb_shared_info-only-on-last-frag'

Lorenzo Bianconi says:

====================
mvneta: access skb_shared_info only on last frag

Build skb_shared_info on mvneta_rx_swbm stack and sync it to xdp_buff
skb_shared_info area only on the last fragment.
Avoid avoid unnecessary xdp_buff initialization in mvneta_rx_swbm routine.
This a preliminary series to complete xdp multi-buff in mvneta driver.
====================

Link: https://lore.kernel.org/r/cover.1605889258.git.lorenzo@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 9a71baf7 039fbc47
Loading
Loading
Loading
Loading
+35 −20
Original line number Diff line number Diff line
@@ -2033,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)

static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		    struct xdp_buff *xdp, int sync_len, bool napi)
		    struct xdp_buff *xdp, struct skb_shared_info *sinfo,
		    int sync_len)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	int i;

	for (i = 0; i < sinfo->nr_frags; i++)
		page_pool_put_full_page(rxq->page_pool,
					skb_frag_page(&sinfo->frags[i]), napi);
					skb_frag_page(&sinfo->frags[i]), true);
	page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
			   sync_len, napi);
			   sync_len, true);
}

static int
@@ -2179,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
	       struct bpf_prog *prog, struct xdp_buff *xdp,
	       u32 frame_sz, struct mvneta_stats *stats)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	unsigned int len, data_len, sync;
	u32 ret, act;

@@ -2199,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,

		err = xdp_do_redirect(pp->dev, xdp, prog);
		if (unlikely(err)) {
			mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
			mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
			ret = MVNETA_XDP_DROPPED;
		} else {
			ret = MVNETA_XDP_REDIR;
@@ -2210,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
	case XDP_TX:
		ret = mvneta_xdp_xmit_back(pp, xdp);
		if (ret != MVNETA_XDP_TX)
			mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
			mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
		break;
	default:
		bpf_warn_invalid_xdp_action(act);
@@ -2219,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		trace_xdp_exception(pp->dev, prog, act);
		fallthrough;
	case XDP_DROP:
		mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
		mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
		ret = MVNETA_XDP_DROPPED;
		stats->xdp_drop++;
		break;
@@ -2277,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
			    struct mvneta_rx_desc *rx_desc,
			    struct mvneta_rx_queue *rxq,
			    struct xdp_buff *xdp, int *size,
			    struct skb_shared_info *xdp_sinfo,
			    struct page *page)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	struct net_device *dev = pp->dev;
	enum dma_data_direction dma_dir;
	int data_len, len;
@@ -2297,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
				len, dma_dir);
	rx_desc->buf_phys_addr = 0;

	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];

		skb_frag_off_set(frag, pp->rx_offset_correction);
		skb_frag_size_set(frag, data_len);
		__skb_frag_set_page(frag, page);
		sinfo->nr_frags++;

		/* last fragment */
		if (len == *size) {
			struct skb_shared_info *sinfo;

			sinfo = xdp_get_shared_info_from_buff(xdp);
			sinfo->nr_frags = xdp_sinfo->nr_frags;
			memcpy(sinfo->frags, xdp_sinfo->frags,
			       sinfo->nr_frags * sizeof(skb_frag_t));
		}
	} else {
		page_pool_put_full_page(rxq->page_pool, page, true);
	}
@@ -2347,13 +2357,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{
	int rx_proc = 0, rx_todo, refill, size = 0;
	struct net_device *dev = pp->dev;
	struct xdp_buff xdp_buf = {
		.frame_sz = PAGE_SIZE,
		.rxq = &rxq->xdp_rxq,
	};
	struct skb_shared_info sinfo;
	struct mvneta_stats ps = {};
	struct bpf_prog *xdp_prog;
	u32 desc_status, frame_sz;
	struct xdp_buff xdp_buf;

	xdp_buf.data_hard_start = NULL;
	xdp_buf.frame_sz = PAGE_SIZE;
	xdp_buf.rxq = &rxq->xdp_rxq;

	sinfo.nr_frags = 0;

	/* Get number of received packets */
	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2393,11 +2407,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
				rx_desc->buf_phys_addr = 0;
				page_pool_put_full_page(rxq->page_pool, page,
							true);
				continue;
				goto next;
			}

			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
						    &size, page);
						    &size, &sinfo, page);
		} /* Middle or Last descriptor */

		if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2405,7 +2419,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
			continue;

		if (size) {
			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
			goto next;
		}

@@ -2417,7 +2431,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
		if (IS_ERR(skb)) {
			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);

			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);

			u64_stats_update_begin(&stats->syncp);
			stats->es.skb_alloc_error++;
@@ -2434,11 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
		napi_gro_receive(napi, skb);
next:
		xdp_buf.data_hard_start = NULL;
		sinfo.nr_frags = 0;
	}
	rcu_read_unlock();

	if (xdp_buf.data_hard_start)
		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);

	if (ps.xdp_redirect)
		xdp_do_flush_map();