Commit c2d3d6a4 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Daniel Borkmann
Browse files

xsk: Move queue_id, dev and need_wakeup to buffer pool



Move queue_id, dev, and need_wakeup from the umem to the
buffer pool. This so that we in a later commit can share the umem
between multiple HW queues. There is one buffer pool per dev and
queue id, so these variables should belong to the buffer pool, not
the umem. Need_wakeup is also something that is set on a per napi
level, so there is usually one per device and queue id. So move
this to the buffer pool too.

Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-6-git-send-email-magnus.karlsson@intel.com
parent 7361f9c3
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -26,11 +26,8 @@ struct xdp_umem {
	refcount_t users;
	struct page **pgs;
	u32 npgs;
	u16 queue_id;
	u8 need_wakeup;
	u8 flags;
	int id;
	struct net_device *dev;
	bool zc;
	spinlock_t xsk_tx_list_lock;
	struct list_head xsk_tx_list;
+4 −0
Original line number Diff line number Diff line
@@ -43,11 +43,15 @@ struct xsk_buff_pool {
	u32 headroom;
	u32 chunk_size;
	u32 frame_len;
	u16 queue_id;
	u8 cached_need_wakeup;
	bool uses_need_wakeup;
	bool dma_need_sync;
	bool unaligned;
	struct xdp_umem *umem;
	void *addrs;
	struct device *dev;
	struct net_device *netdev;
	refcount_t users;
	struct work_struct work;
	struct xdp_buff_xsk *free_heads[];
+2 −20
Original line number Diff line number Diff line
@@ -63,26 +63,9 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
	}
}

void xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
			 u16 queue_id)
{
	umem->dev = dev;
	umem->queue_id = queue_id;

	dev_hold(dev);
}

void xdp_umem_clear_dev(struct xdp_umem *umem)
{
	dev_put(umem->dev);
	umem->dev = NULL;
	umem->zc = false;
}

static void xdp_umem_release(struct xdp_umem *umem)
{
	xdp_umem_clear_dev(umem);

	umem->zc = false;
	ida_simple_remove(&umem_ida, umem->id);

	xdp_umem_unpin_pages(umem);
@@ -181,8 +164,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
		return -EINVAL;
	}

	if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG |
			XDP_UMEM_USES_NEED_WAKEUP))
	if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
		return -EINVAL;

	if (!unaligned_chunks && !is_power_of_2(chunk_size))
+0 −4
Original line number Diff line number Diff line
@@ -8,10 +8,6 @@

#include <net/xdp_sock_drv.h>

void xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
			 u16 queue_id);
void xdp_umem_clear_dev(struct xdp_umem *umem);
bool xdp_umem_validate_queues(struct xdp_umem *umem);
void xdp_get_umem(struct xdp_umem *umem);
void xdp_put_umem(struct xdp_umem *umem);
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
+13 −21
Original line number Diff line number Diff line
@@ -41,13 +41,11 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)

void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
	struct xdp_umem *umem = pool->umem;

	if (umem->need_wakeup & XDP_WAKEUP_RX)
	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
		return;

	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
	umem->need_wakeup |= XDP_WAKEUP_RX;
	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_set_rx_need_wakeup);

@@ -56,7 +54,7 @@ void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
	struct xdp_umem *umem = pool->umem;
	struct xdp_sock *xs;

	if (umem->need_wakeup & XDP_WAKEUP_TX)
	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
		return;

	rcu_read_lock();
@@ -65,19 +63,17 @@ void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
	}
	rcu_read_unlock();

	umem->need_wakeup |= XDP_WAKEUP_TX;
	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_set_tx_need_wakeup);

void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{
	struct xdp_umem *umem = pool->umem;

	if (!(umem->need_wakeup & XDP_WAKEUP_RX))
	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
		return;

	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
	umem->need_wakeup &= ~XDP_WAKEUP_RX;
	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);

@@ -86,7 +82,7 @@ void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
	struct xdp_umem *umem = pool->umem;
	struct xdp_sock *xs;

	if (!(umem->need_wakeup & XDP_WAKEUP_TX))
	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
		return;

	rcu_read_lock();
@@ -95,13 +91,13 @@ void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
	}
	rcu_read_unlock();

	umem->need_wakeup &= ~XDP_WAKEUP_TX;
	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);

bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{
	return pool->umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
	return pool->uses_need_wakeup;
}
EXPORT_SYMBOL(xsk_uses_need_wakeup);

@@ -478,16 +474,16 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
	__poll_t mask = datagram_poll(file, sock, wait);
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
	struct xdp_umem *umem;
	struct xsk_buff_pool *pool;

	if (unlikely(!xsk_is_bound(xs)))
		return mask;

	umem = xs->umem;
	pool = xs->pool;

	if (umem->need_wakeup) {
	if (pool->cached_need_wakeup) {
		if (xs->zc)
			xsk_wakeup(xs, umem->need_wakeup);
			xsk_wakeup(xs, pool->cached_need_wakeup);
		else
			/* Poll needs to drive Tx also in copy mode */
			__xsk_sendmsg(sk);
@@ -731,11 +727,9 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
		goto out_unlock;
	} else {
		/* This xsk has its own umem. */
		xdp_umem_assign_dev(xs->umem, dev, qid);
		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
		if (!xs->pool) {
			err = -ENOMEM;
			xdp_umem_clear_dev(xs->umem);
			goto out_unlock;
		}

@@ -743,7 +737,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
		if (err) {
			xp_destroy(xs->pool);
			xs->pool = NULL;
			xdp_umem_clear_dev(xs->umem);
			goto out_unlock;
		}
	}
@@ -1089,7 +1082,6 @@ static int xsk_notifier(struct notifier_block *this,

				/* Clear device references. */
				xp_clear_dev(xs->pool);
				xdp_umem_clear_dev(xs->umem);
			}
			mutex_unlock(&xs->mutex);
		}
Loading