Commit 495de55f authored by Ilias Apalodimas's avatar Ilias Apalodimas Committed by Alexei Starovoitov
Browse files

net: netsec: Add support for XDP frame size



This driver takes advantage of page_pool PP_FLAG_DMA_SYNC_DEV that
can help reduce the number of cache-lines that need to be flushed
when doing DMA sync for_device. Due to xdp_adjust_tail can grow the
area accessible to the by the CPU (can possibly write into), then max
sync length *after* bpf_prog_run_xdp() needs to be taken into account.

For XDP_TX action the driver is smart and does DMA-sync. When growing
tail this is still safe, because page_pool have DMA-mapped the entire
page size.

Signed-off-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/bpf/158945336295.97035.15034759661036971024.stgit@firesoul
parent 494f44d5
Loading
Loading
Loading
Loading
+18 −12
Original line number Diff line number Diff line
@@ -884,23 +884,28 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
			  struct xdp_buff *xdp)
{
	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
	unsigned int len = xdp->data_end - xdp->data;
	unsigned int sync, len = xdp->data_end - xdp->data;
	u32 ret = NETSEC_XDP_PASS;
	struct page *page;
	int err;
	u32 act;

	act = bpf_prog_run_xdp(prog, xdp);

	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
	sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
	sync = max(sync, len);

	switch (act) {
	case XDP_PASS:
		ret = NETSEC_XDP_PASS;
		break;
	case XDP_TX:
		ret = netsec_xdp_xmit_back(priv, xdp);
		if (ret != NETSEC_XDP_TX)
			page_pool_put_page(dring->page_pool,
					   virt_to_head_page(xdp->data), len,
					   true);
		if (ret != NETSEC_XDP_TX) {
			page = virt_to_head_page(xdp->data);
			page_pool_put_page(dring->page_pool, page, sync, true);
		}
		break;
	case XDP_REDIRECT:
		err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -908,9 +913,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
			ret = NETSEC_XDP_REDIR;
		} else {
			ret = NETSEC_XDP_CONSUMED;
			page_pool_put_page(dring->page_pool,
					   virt_to_head_page(xdp->data), len,
					   true);
			page = virt_to_head_page(xdp->data);
			page_pool_put_page(dring->page_pool, page, sync, true);
		}
		break;
	default:
@@ -921,8 +925,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
		/* fall through -- handle aborts by dropping packet */
	case XDP_DROP:
		ret = NETSEC_XDP_CONSUMED;
		page_pool_put_page(dring->page_pool,
				   virt_to_head_page(xdp->data), len, true);
		page = virt_to_head_page(xdp->data);
		page_pool_put_page(dring->page_pool, page, sync, true);
		break;
	}

@@ -936,10 +940,14 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
	struct netsec_rx_pkt_info rx_info;
	enum dma_data_direction dma_dir;
	struct bpf_prog *xdp_prog;
	struct xdp_buff xdp;
	u16 xdp_xmit = 0;
	u32 xdp_act = 0;
	int done = 0;

	xdp.rxq = &dring->xdp_rxq;
	xdp.frame_sz = PAGE_SIZE;

	rcu_read_lock();
	xdp_prog = READ_ONCE(priv->xdp_prog);
	dma_dir = page_pool_get_dma_dir(dring->page_pool);
@@ -953,7 +961,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
		struct sk_buff *skb = NULL;
		u16 pkt_len, desc_len;
		dma_addr_t dma_handle;
		struct xdp_buff xdp;
		void *buf_addr;

		if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
@@ -1002,7 +1009,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
		xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
		xdp_set_data_meta_invalid(&xdp);
		xdp.data_end = xdp.data + pkt_len;
		xdp.rxq = &dring->xdp_rxq;

		if (xdp_prog) {
			xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);