Commit 494f44d5 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by Alexei Starovoitov
Browse files

mvneta: Add XDP frame size to driver



This marvell driver mvneta uses PAGE_SIZE frames, which makes it
really easy to convert.  Driver updates rxq and now frame_sz
once per NAPI call.

This driver takes advantage of page_pool PP_FLAG_DMA_SYNC_DEV that
can help reduce the number of cache-lines that need to be flushed
when doing DMA sync for_device. Due to xdp_adjust_tail can grow the
area accessible to the by the CPU (can possibly write into), then max
sync length *after* bpf_prog_run_xdp() needs to be taken into account.

For XDP_TX action the driver is smart and does DMA-sync. When growing
tail this is still safe, because page_pool have DMA-mapped the entire
page size.

Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Cc: thomas.petazzoni@bootlin.com
Link: https://lore.kernel.org/bpf/158945335786.97035.12714388304493736747.stgit@firesoul
parent 983e4345
Loading
Loading
Loading
Loading
+15 −10
Original line number Diff line number Diff line
@@ -2148,12 +2148,17 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
	       struct bpf_prog *prog, struct xdp_buff *xdp,
	       struct mvneta_stats *stats)
{
	unsigned int len;
	unsigned int len, sync;
	struct page *page;
	u32 ret, act;

	len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
	act = bpf_prog_run_xdp(prog, xdp);

	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
	sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
	sync = max(sync, len);

	switch (act) {
	case XDP_PASS:
		stats->xdp_pass++;
@@ -2164,9 +2169,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		err = xdp_do_redirect(pp->dev, xdp, prog);
		if (unlikely(err)) {
			ret = MVNETA_XDP_DROPPED;
			page_pool_put_page(rxq->page_pool,
					   virt_to_head_page(xdp->data), len,
					   true);
			page = virt_to_head_page(xdp->data);
			page_pool_put_page(rxq->page_pool, page, sync, true);
		} else {
			ret = MVNETA_XDP_REDIR;
			stats->xdp_redirect++;
@@ -2175,10 +2179,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
	}
	case XDP_TX:
		ret = mvneta_xdp_xmit_back(pp, xdp);
		if (ret != MVNETA_XDP_TX)
			page_pool_put_page(rxq->page_pool,
					   virt_to_head_page(xdp->data), len,
					   true);
		if (ret != MVNETA_XDP_TX) {
			page = virt_to_head_page(xdp->data);
			page_pool_put_page(rxq->page_pool, page, sync, true);
		}
		break;
	default:
		bpf_warn_invalid_xdp_action(act);
@@ -2187,8 +2191,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		trace_xdp_exception(pp->dev, prog, act);
		/* fall through */
	case XDP_DROP:
		page_pool_put_page(rxq->page_pool,
				   virt_to_head_page(xdp->data), len, true);
		page = virt_to_head_page(xdp->data);
		page_pool_put_page(rxq->page_pool, page, sync, true);
		ret = MVNETA_XDP_DROPPED;
		stats->xdp_drop++;
		break;
@@ -2320,6 +2324,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
	rcu_read_lock();
	xdp_prog = READ_ONCE(pp->xdp_prog);
	xdp_buf.rxq = &rxq->xdp_rxq;
	xdp_buf.frame_sz = PAGE_SIZE;

	/* Fairness NAPI loop */
	while (rx_proc < budget && rx_proc < rx_todo) {