Commit a5aa8e52 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Daniel Borkmann
Browse files

xsk: Move xsk_tx_list and its lock to buffer pool



Move the xsk_tx_list and the xsk_tx_list_lock from the umem to
the buffer pool. This so that we in a later commit can share the
umem between multiple HW queues. There is one xsk_tx_list per
device and queue id, so it should be located in the buffer pool.

Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-7-git-send-email-magnus.karlsson@intel.com
parent c2d3d6a4
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -29,8 +29,6 @@ struct xdp_umem {
	u8 flags;
	int id;
	bool zc;
	spinlock_t xsk_tx_list_lock;
	struct list_head xsk_tx_list;
};

struct xsk_map {
@@ -57,7 +55,7 @@ struct xdp_sock {
	/* Protects multiple processes in the control path */
	struct mutex mutex;
	struct xsk_queue *tx ____cacheline_aligned_in_smp;
	struct list_head list;
	struct list_head tx_list;
	/* Mutual exclusion of NAPI TX thread and sendmsg error paths
	 * in the SKB destructor callback.
	 */
+5 −0
Original line number Diff line number Diff line
@@ -52,6 +52,9 @@ struct xsk_buff_pool {
	void *addrs;
	struct device *dev;
	struct net_device *netdev;
	struct list_head xsk_tx_list;
	/* Protects modifications to the xsk_tx_list */
	spinlock_t xsk_tx_list_lock;
	refcount_t users;
	struct work_struct work;
	struct xdp_buff_xsk *free_heads[];
@@ -67,6 +70,8 @@ void xp_release(struct xdp_buff_xsk *xskb);
void xp_get_pool(struct xsk_buff_pool *pool);
void xp_put_pool(struct xsk_buff_pool *pool);
void xp_clear_dev(struct xsk_buff_pool *pool);
void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);

/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);
+0 −26
Original line number Diff line number Diff line
@@ -23,30 +23,6 @@

static DEFINE_IDA(umem_ida);

void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
	unsigned long flags;

	if (!xs->tx)
		return;

	spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
	list_add_rcu(&xs->list, &umem->xsk_tx_list);
	spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
}

void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
	unsigned long flags;

	if (!xs->tx)
		return;

	spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
	list_del_rcu(&xs->list);
	spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
}

static void xdp_umem_unpin_pages(struct xdp_umem *umem)
{
	unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
@@ -205,8 +181,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
	umem->pgs = NULL;
	umem->user = NULL;
	umem->flags = mr->flags;
	INIT_LIST_HEAD(&umem->xsk_tx_list);
	spin_lock_init(&umem->xsk_tx_list_lock);

	refcount_set(&umem->users, 1);

+0 −2
Original line number Diff line number Diff line
@@ -10,8 +10,6 @@

void xdp_get_umem(struct xdp_umem *umem);
void xdp_put_umem(struct xdp_umem *umem);
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);

#endif /* XDP_UMEM_H_ */
+6 −9
Original line number Diff line number Diff line
@@ -51,14 +51,13 @@ EXPORT_SYMBOL(xsk_set_rx_need_wakeup);

void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{
	struct xdp_umem *umem = pool->umem;
	struct xdp_sock *xs;

	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
	}
	rcu_read_unlock();
@@ -79,14 +78,13 @@ EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);

void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{
	struct xdp_umem *umem = pool->umem;
	struct xdp_sock *xs;

	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
	}
	rcu_read_unlock();
@@ -302,7 +300,7 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
	struct xdp_sock *xs;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &pool->umem->xsk_tx_list, list) {
	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
		__xskq_cons_release(xs->tx);
		xs->sk.sk_write_space(&xs->sk);
	}
@@ -312,11 +310,10 @@ EXPORT_SYMBOL(xsk_tx_release);

bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
{
	struct xdp_umem *umem = pool->umem;
	struct xdp_sock *xs;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
			xs->tx->queue_empty_descs++;
			continue;
@@ -524,7 +521,7 @@ static void xsk_unbind_dev(struct xdp_sock *xs)
	WRITE_ONCE(xs->state, XSK_UNBOUND);

	/* Wait for driver to stop using the xdp socket. */
	xdp_del_sk_umem(xs->umem, xs);
	xp_del_xsk(xs->pool, xs);
	xs->dev = NULL;
	synchronize_net();
	dev_put(dev);
@@ -744,7 +741,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
	xs->dev = dev;
	xs->zc = xs->umem->zc;
	xs->queue_id = qid;
	xdp_add_sk_umem(xs->umem, xs);
	xp_add_xsk(xs->pool, xs);

out_unlock:
	if (err) {
Loading