Commit a412ef54 authored by Björn Töpel's avatar Björn Töpel Committed by Daniel Borkmann
Browse files

samples/bpf: adapted to new uapi



Here, the xdpsock sample application is adjusted to the new descriptor
format.

Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent bbff2f32
Loading
Loading
Loading
Loading
+36 −48
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@

#define NUM_FRAMES 131072
#define FRAME_HEADROOM 0
#define FRAME_SHIFT 11
#define FRAME_SIZE 2048
#define NUM_DESCS 1024
#define BATCH_SIZE 16
@@ -55,6 +56,7 @@

#define DEBUG_HEXDUMP 0

typedef __u64 u64;
typedef __u32 u32;

static unsigned long prev_time;
@@ -81,12 +83,12 @@ struct xdp_umem_uqueue {
	u32 size;
	u32 *producer;
	u32 *consumer;
	u32 *ring;
	u64 *ring;
	void *map;
};

struct xdp_umem {
	char (*frames)[FRAME_SIZE];
	char *frames;
	struct xdp_umem_uqueue fq;
	struct xdp_umem_uqueue cq;
	int fd;
@@ -214,7 +216,7 @@ static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
	for (i = 0; i < nb; i++) {
		u32 idx = fq->cached_prod++ & fq->mask;

		fq->ring[idx] = d[i].idx;
		fq->ring[idx] = d[i].addr;
	}

	u_smp_wmb();
@@ -224,7 +226,7 @@ static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
	return 0;
}

static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u32 *d,
static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
				      size_t nb)
{
	u32 i;
@@ -246,7 +248,7 @@ static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u32 *d,
}

static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
					       u32 *d, size_t nb)
					       u64 *d, size_t nb)
{
	u32 idx, i, entries = umem_nb_avail(cq, nb);

@@ -266,10 +268,9 @@ static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
	return entries;
}

static inline void *xq_get_data(struct xdpsock *xsk, __u32 idx, __u32 off)
static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
{
	lassert(idx < NUM_FRAMES);
	return &xsk->umem->frames[idx][off];
	return &xsk->umem->frames[addr];
}

static inline int xq_enq(struct xdp_uqueue *uq,
@@ -285,9 +286,8 @@ static inline int xq_enq(struct xdp_uqueue *uq,
	for (i = 0; i < ndescs; i++) {
		u32 idx = uq->cached_prod++ & uq->mask;

		r[idx].idx = descs[i].idx;
		r[idx].addr = descs[i].addr;
		r[idx].len = descs[i].len;
		r[idx].offset = descs[i].offset;
	}

	u_smp_wmb();
@@ -297,7 +297,7 @@ static inline int xq_enq(struct xdp_uqueue *uq,
}

static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
				 __u32 idx, unsigned int ndescs)
				 unsigned int id, unsigned int ndescs)
{
	struct xdp_desc *r = uq->ring;
	unsigned int i;
@@ -308,9 +308,8 @@ static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
	for (i = 0; i < ndescs; i++) {
		u32 idx = uq->cached_prod++ & uq->mask;

		r[idx].idx	= idx + i;
		r[idx].addr	= (id + i) << FRAME_SHIFT;
		r[idx].len	= sizeof(pkt_data) - 1;
		r[idx].offset	= 0;
	}

	u_smp_wmb();
@@ -357,17 +356,21 @@ static void swap_mac_addresses(void *data)
	*dst_addr = tmp;
}

#if DEBUG_HEXDUMP
static void hex_dump(void *pkt, size_t length, const char *prefix)
static void hex_dump(void *pkt, size_t length, u64 addr)
{
	int i = 0;
	const unsigned char *address = (unsigned char *)pkt;
	const unsigned char *line = address;
	size_t line_size = 32;
	unsigned char c;
	char buf[32];
	int i = 0;

	if (!DEBUG_HEXDUMP)
		return;

	sprintf(buf, "addr=%llu", addr);
	printf("length = %zu\n", length);
	printf("%s | ", prefix);
	printf("%s | ", buf);
	while (length-- > 0) {
		printf("%02X ", *address++);
		if (!(++i % line_size) || (length == 0 && i % line_size)) {
@@ -382,12 +385,11 @@ static void hex_dump(void *pkt, size_t length, const char *prefix)
			}
			printf("\n");
			if (length > 0)
				printf("%s | ", prefix);
				printf("%s | ", buf);
		}
	}
	printf("\n");
}
#endif

static size_t gen_eth_frame(char *frame)
{
@@ -412,8 +414,8 @@ static struct xdp_umem *xdp_umem_configure(int sfd)

	mr.addr = (__u64)bufs;
	mr.len = NUM_FRAMES * FRAME_SIZE;
	mr.frame_size = FRAME_SIZE;
	mr.frame_headroom = FRAME_HEADROOM;
	mr.chunk_size = FRAME_SIZE;
	mr.headroom = FRAME_HEADROOM;

	lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
	lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
@@ -426,7 +428,7 @@ static struct xdp_umem *xdp_umem_configure(int sfd)
			   &optlen) == 0);

	umem->fq.map = mmap(0, off.fr.desc +
			    FQ_NUM_DESCS * sizeof(u32),
			    FQ_NUM_DESCS * sizeof(u64),
			    PROT_READ | PROT_WRITE,
			    MAP_SHARED | MAP_POPULATE, sfd,
			    XDP_UMEM_PGOFF_FILL_RING);
@@ -439,7 +441,7 @@ static struct xdp_umem *xdp_umem_configure(int sfd)
	umem->fq.ring = umem->fq.map + off.fr.desc;

	umem->cq.map = mmap(0, off.cr.desc +
			     CQ_NUM_DESCS * sizeof(u32),
			     CQ_NUM_DESCS * sizeof(u64),
			     PROT_READ | PROT_WRITE,
			     MAP_SHARED | MAP_POPULATE, sfd,
			     XDP_UMEM_PGOFF_COMPLETION_RING);
@@ -451,14 +453,14 @@ static struct xdp_umem *xdp_umem_configure(int sfd)
	umem->cq.consumer = umem->cq.map + off.cr.consumer;
	umem->cq.ring = umem->cq.map + off.cr.desc;

	umem->frames = (char (*)[FRAME_SIZE])bufs;
	umem->frames = bufs;
	umem->fd = sfd;

	if (opt_bench == BENCH_TXONLY) {
		int i;

		for (i = 0; i < NUM_FRAMES; i++)
			(void)gen_eth_frame(&umem->frames[i][0]);
		for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
			(void)gen_eth_frame(&umem->frames[i]);
	}

	return umem;
@@ -472,7 +474,7 @@ static struct xdpsock *xsk_configure(struct xdp_umem *umem)
	struct xdpsock *xsk;
	bool shared = true;
	socklen_t optlen;
	u32 i;
	u64 i;

	sfd = socket(PF_XDP, SOCK_RAW, 0);
	lassert(sfd >= 0);
@@ -508,7 +510,7 @@ static struct xdpsock *xsk_configure(struct xdp_umem *umem)
	lassert(xsk->rx.map != MAP_FAILED);

	if (!shared) {
		for (i = 0; i < NUM_DESCS / 2; i++)
		for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
			lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
				== 0);
	}
@@ -727,7 +729,7 @@ static void kick_tx(int fd)

static inline void complete_tx_l2fwd(struct xdpsock *xsk)
{
	u32 descs[BATCH_SIZE];
	u64 descs[BATCH_SIZE];
	unsigned int rcvd;
	size_t ndescs;

@@ -749,7 +751,7 @@ static inline void complete_tx_l2fwd(struct xdpsock *xsk)

static inline void complete_tx_only(struct xdpsock *xsk)
{
	u32 descs[BATCH_SIZE];
	u64 descs[BATCH_SIZE];
	unsigned int rcvd;

	if (!xsk->outstanding_tx)
@@ -774,17 +776,9 @@ static void rx_drop(struct xdpsock *xsk)
		return;

	for (i = 0; i < rcvd; i++) {
		u32 idx = descs[i].idx;

		lassert(idx < NUM_FRAMES);
#if DEBUG_HEXDUMP
		char *pkt;
		char buf[32];
		char *pkt = xq_get_data(xsk, descs[i].addr);

		pkt = xq_get_data(xsk, idx, descs[i].offset);
		sprintf(buf, "idx=%d", idx);
		hex_dump(pkt, descs[i].len, buf);
#endif
		hex_dump(pkt, descs[i].len, descs[i].addr);
	}

	xsk->rx_npkts += rcvd;
@@ -867,17 +861,11 @@ static void l2fwd(struct xdpsock *xsk)
		}

		for (i = 0; i < rcvd; i++) {
			char *pkt = xq_get_data(xsk, descs[i].idx,
						descs[i].offset);
			char *pkt = xq_get_data(xsk, descs[i].addr);

			swap_mac_addresses(pkt);
#if DEBUG_HEXDUMP
			char buf[32];
			u32 idx = descs[i].idx;

			sprintf(buf, "idx=%d", idx);
			hex_dump(pkt, descs[i].len, buf);
#endif
			hex_dump(pkt, descs[i].len, descs[i].addr);
		}

		xsk->rx_npkts += rcvd;