Commit dfe44c1f authored by Charles McLachlan's avatar Charles McLachlan Committed by David S. Miller
Browse files

sfc: handle XDP_TX outcomes of XDP eBPF programs



Provide an ndo_xdp_xmit function that uses the XDP tx queue for this
CPU to send the packet.

Signed-off-by: default avatarCharles McLachlan <cmclachlan@solarflare.com>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3990a8ff
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -228,6 +228,8 @@ static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
			u32 flags);

#define EFX_ASSERT_RESET_SERIALISED(efx)		\
	do {						\
@@ -2633,6 +2635,7 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
	.ndo_udp_tunnel_add	= efx_udp_tunnel_add,
	.ndo_udp_tunnel_del	= efx_udp_tunnel_del,
	.ndo_xdp_xmit		= efx_xdp_xmit,
	.ndo_bpf		= efx_xdp
};

@@ -2680,6 +2683,17 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
	}
}

static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
			u32 flags)
{
	struct efx_nic *efx = netdev_priv(dev);

	if (!netif_running(dev))
		return -EINVAL;

	return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
}

static void efx_update_name(struct efx_nic *efx)
{
	strcpy(efx->name, efx->net_dev->name);
+3 −0
Original line number Diff line number Diff line
@@ -322,4 +322,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
	return true;
}

int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
		       bool flush);

#endif /* EFX_EFX_H */
+11 −1
Original line number Diff line number Diff line
@@ -653,6 +653,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
	u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
	struct efx_rx_queue *rx_queue;
	struct bpf_prog *xdp_prog;
	struct xdp_frame *xdpf;
	struct xdp_buff xdp;
	u32 xdp_act;
	s16 offset;
@@ -713,7 +714,16 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
		break;

	case XDP_TX:
		return -EOPNOTSUPP;
		/* Buffer ownership passes to tx on success. */
		xdpf = convert_to_xdp_frame(&xdp);
		err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
		if (unlikely(err != 1)) {
			efx_free_rx_buffers(rx_queue, rx_buf, 1);
			if (net_ratelimit())
				netif_err(efx, rx_err, efx->net_dev,
					  "XDP TX failed (%d)\n", err);
		}
		break;

	case XDP_REDIRECT:
		err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
+88 −0
Original line number Diff line number Diff line
@@ -599,6 +599,94 @@ err:
	return NETDEV_TX_OK;
}

static void efx_xdp_return_frames(int n,  struct xdp_frame **xdpfs)
{
	int i;

	for (i = 0; i < n; i++)
		xdp_return_frame_rx_napi(xdpfs[i]);
}

/* Transmit a packet from an XDP buffer
 *
 * Returns number of packets sent on success, error code otherwise.
 * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
 * (for XDP redirect).
 */
int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
		       bool flush)
{
	struct efx_tx_buffer *tx_buffer;
	struct efx_tx_queue *tx_queue;
	struct xdp_frame *xdpf;
	dma_addr_t dma_addr;
	unsigned int len;
	int space;
	int cpu;
	int i;

	cpu = raw_smp_processor_id();

	if (!efx->xdp_tx_queue_count ||
	    unlikely(cpu >= efx->xdp_tx_queue_count))
		return -EINVAL;

	tx_queue = efx->xdp_tx_queues[cpu];
	if (unlikely(!tx_queue))
		return -EINVAL;

	if (unlikely(n && !xdpfs))
		return -EINVAL;

	if (!n)
		return 0;

	/* Check for available space. We should never need multiple
	 * descriptors per frame.
	 */
	space = efx->txq_entries +
		tx_queue->read_count - tx_queue->insert_count;

	for (i = 0; i < n; i++) {
		xdpf = xdpfs[i];

		if (i >= space)
			break;

		/* We'll want a descriptor for this tx. */
		prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));

		len = xdpf->len;

		/* Map for DMA. */
		dma_addr = dma_map_single(&efx->pci_dev->dev,
					  xdpf->data, len,
					  DMA_TO_DEVICE);
		if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
			break;

		/*  Create descriptor and set up for unmapping DMA. */
		tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
		tx_buffer->xdpf = xdpf;
		tx_buffer->flags = EFX_TX_BUF_XDP |
				   EFX_TX_BUF_MAP_SINGLE;
		tx_buffer->dma_offset = 0;
		tx_buffer->unmap_len = len;
		tx_queue->tx_packets++;
	}

	/* Pass mapped frames to hardware. */
	if (flush && i > 0)
		efx_nic_push_buffers(tx_queue);

	if (i == 0)
		return -EIO;

	efx_xdp_return_frames(n - i, xdpfs + i);

	return i;
}

/* Remove packets from the TX queue
 *
 * This removes packets from the TX queue, up to and including the