Commit 2b43470a authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov
Browse files

xsk: Introduce AF_XDP buffer allocation API



In order to simplify AF_XDP zero-copy enablement for NIC driver
developers, a new AF_XDP buffer allocation API is added. The
implementation is based on a single core (single producer/consumer)
buffer pool for the AF_XDP UMEM.

A buffer is allocated using the xsk_buff_alloc() function, and
returned using xsk_buff_free(). If a buffer is disassociated with the
pool, e.g. when a buffer is passed to an AF_XDP socket, a buffer is
said to be released. Currently, the release function is only used by
the AF_XDP internals and not visible to the driver.

Drivers using this API should register the XDP memory model with the
new MEM_TYPE_XSK_BUFF_POOL type.

The API is defined in net/xdp_sock_drv.h.

The buffer type is struct xdp_buff, and follows the lifetime of
regular xdp_buffs, i.e.  the lifetime of an xdp_buff is restricted to
a NAPI context. In other words, the API is not replacing xdp_frames.

In addition to introducing the API and implementations, the AF_XDP
core is migrated to use the new APIs.

rfc->v1: Fixed build errors/warnings for m68k and riscv. (kbuild test
         robot)
         Added headroom/chunk size getter. (Maxim/Björn)

v1->v2: Swapped SoBs. (Maxim)

v2->v3: Initialize struct xdp_buff member frame_sz. (Björn)
        Add API to query the DMA address of a frame. (Maxim)
        Do DMA sync for CPU till the end of the frame to handle
        possible growth (frame_sz). (Maxim)

Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200520192103.355233-6-bjorn.topel@gmail.com
parent 89e4a376
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@ enum xdp_mem_type {
	MEM_TYPE_PAGE_ORDER0,     /* Orig XDP full page model */
	MEM_TYPE_PAGE_POOL,
	MEM_TYPE_ZERO_COPY,
	MEM_TYPE_XSK_BUFF_POOL,
	MEM_TYPE_MAX,
};

@@ -119,7 +120,8 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
	int metasize;
	int headroom;

	if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY)
	if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY ||
	    xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
		return xdp_convert_zc_to_xdp_frame(xdp);

	/* Assure headroom is available for storing info */
+2 −0
Original line number Diff line number Diff line
@@ -31,11 +31,13 @@ struct xdp_umem_fq_reuse {
struct xdp_umem {
	struct xsk_queue *fq;
	struct xsk_queue *cq;
	struct xsk_buff_pool *pool;
	struct xdp_umem_page *pages;
	u64 chunk_mask;
	u64 size;
	u32 headroom;
	u32 chunk_size_nohr;
	u32 chunk_size;
	struct user_struct *user;
	refcount_t users;
	struct work_struct work;
+164 −0
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@
#define _LINUX_XDP_SOCK_DRV_H

#include <net/xdp_sock.h>
#include <net/xsk_buff_pool.h>

#ifdef CONFIG_XDP_SOCKETS

@@ -101,6 +102,94 @@ static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
	return umem->chunk_size_nohr;
}

static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
{
	return XDP_PACKET_HEADROOM + umem->headroom;
}

static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
{
	return umem->chunk_size;
}

static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
{
	return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem);
}

static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
					 struct xdp_rxq_info *rxq)
{
	xp_set_rxq_info(umem->pool, rxq);
}

static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
				      unsigned long attrs)
{
	xp_dma_unmap(umem->pool, attrs);
}

static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
				   unsigned long attrs)
{
	return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs);
}

static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
{
	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);

	return xp_get_dma(xskb);
}

static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
{
	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);

	return xp_get_frame_dma(xskb);
}

static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
{
	return xp_alloc(umem->pool);
}

static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
{
	return xp_can_alloc(umem->pool, count);
}

static inline void xsk_buff_free(struct xdp_buff *xdp)
{
	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);

	xp_free(xskb);
}

static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
{
	return xp_raw_get_dma(umem->pool, addr);
}

static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
{
	return xp_raw_get_data(umem->pool, addr);
}

static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);

	xp_dma_sync_for_cpu(xskb);
}

static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
						    dma_addr_t dma,
						    size_t size)
{
	xp_dma_sync_for_device(umem->pool, dma, size);
}

#else

static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
@@ -212,6 +301,81 @@ static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
	return 0;
}

static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
{
	return 0;
}

static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
{
	return 0;
}

static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
{
	return 0;
}

static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
					 struct xdp_rxq_info *rxq)
{
}

static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
				      unsigned long attrs)
{
}

static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
				   unsigned long attrs)
{
	return 0;
}

static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
{
	return 0;
}

static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
{
	return 0;
}

static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
{
	return NULL;
}

static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
{
	return false;
}

static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}

static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
{
	return 0;
}

static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
{
	return NULL;
}

static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
}

static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
						    dma_addr_t dma,
						    size_t size)
{
}

#endif /* CONFIG_XDP_SOCKETS */

#endif /* _LINUX_XDP_SOCK_DRV_H */
+56 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2020 Intel Corporation. */

#ifndef XSK_BUFF_POOL_H_
#define XSK_BUFF_POOL_H_

#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <net/xdp.h>

struct xsk_buff_pool;
struct xdp_rxq_info;
struct xsk_queue;
struct xdp_desc;
struct device;
struct page;

struct xdp_buff_xsk {
	struct xdp_buff xdp;
	dma_addr_t dma;
	dma_addr_t frame_dma;
	struct xsk_buff_pool *pool;
	bool unaligned;
	u64 orig_addr;
	struct list_head free_list_node;
};

/* AF_XDP core. */
struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
				u32 chunk_size, u32 headroom, u64 size,
				bool unaligned);
void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq);
void xp_destroy(struct xsk_buff_pool *pool);
void xp_release(struct xdp_buff_xsk *xskb);
u64 xp_get_handle(struct xdp_buff_xsk *xskb);
bool xp_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);

/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);

/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
	       unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb);
dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb);
void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb);
void xp_dma_sync_for_device(struct xsk_buff_pool *pool, dma_addr_t dma,
			    size_t size);

#endif /* XSK_BUFF_POOL_H_ */
+2 −1
Original line number Diff line number Diff line
@@ -287,7 +287,8 @@ TRACE_EVENT(xdp_devmap_xmit,
	FN(PAGE_SHARED)		\
	FN(PAGE_ORDER0)		\
	FN(PAGE_POOL)		\
	FN(ZERO_COPY)
	FN(ZERO_COPY)		\
	FN(XSK_BUFF_POOL)

#define __MEM_TYPE_TP_FN(x)	\
	TRACE_DEFINE_ENUM(MEM_TYPE_##x);
Loading