Commit 52b07e56 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'samples: bpf: Refactor XDP programs with libbpf'



"Daniel T. Lee" says:

====================
To avoid confusion caused by the increasing fragmentation of the BPF
Loader program, this commit would like to convert the previous bpf_load
loader with the libbpf loader.

Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
program is much easier. bpf_program__attach_tracepoint manages the
enable of tracepoint event and attach of BPF programs to it with a
single interface bpf_link, so there is no need to manage event_fd and
prog_fd separately.

And due to addition of generic bpf_program__attach() to libbpf, it is
now possible to attach BPF programs with __attach() instead of
explicitly calling __attach_<type>().

This patchset refactors xdp_monitor with using this libbpf API, and the
bpf_load is removed and migrated to libbpf. Also, attach_tracepoint()
is replaced with the generic __attach() method in xdp_redirect_cpu.
Moreover, maps in kern program have been converted to BTF-defined map.
---
Changes in v2:
 - added cleanup logic for bpf_link and bpf_object in xdp_monitor
 - program section match with bpf_program__is_<type> instead of strncmp
 - revert BTF key/val type to default of BPF_MAP_TYPE_PERF_EVENT_ARRAY
 - split increment into seperate satement
 - refactor pointer array initialization
 - error code cleanup
====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 673e3752 321f6324
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -98,8 +98,8 @@ test_map_in_map-objs := test_map_in_map_user.o
per_socket_stats_example-objs := cookie_uid_helper_example.o
xdp_redirect-objs := xdp_redirect_user.o
xdp_redirect_map-objs := xdp_redirect_map_user.o
xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o
xdp_monitor-objs := xdp_monitor_user.o
xdp_rxq_info-objs := xdp_rxq_info_user.o
syscall_tp-objs := syscall_tp_user.o
cpustat-objs := cpustat_user.o
+30 −30
Original line number Diff line number Diff line
@@ -6,21 +6,21 @@
#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>

struct bpf_map_def SEC("maps") redirect_err_cnt = {
	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
	.key_size = sizeof(u32),
	.value_size = sizeof(u64),
	.max_entries = 2,
struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__type(key, u32);
	__type(value, u64);
	__uint(max_entries, 2);
	/* TODO: have entries for all possible errno's */
};
} redirect_err_cnt SEC(".maps");

#define XDP_UNKNOWN	XDP_REDIRECT + 1
struct bpf_map_def SEC("maps") exception_cnt = {
	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
	.key_size	= sizeof(u32),
	.value_size	= sizeof(u64),
	.max_entries	= XDP_UNKNOWN + 1,
};
struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__type(key, u32);
	__type(value, u64);
	__uint(max_entries, XDP_UNKNOWN + 1);
} exception_cnt SEC(".maps");

/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
 * Code in:                kernel/include/trace/events/xdp.h
@@ -129,19 +129,19 @@ struct datarec {
};
#define MAX_CPUS 64

struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
	.key_size	= sizeof(u32),
	.value_size	= sizeof(struct datarec),
	.max_entries	= MAX_CPUS,
};
struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__type(key, u32);
	__type(value, struct datarec);
	__uint(max_entries, MAX_CPUS);
} cpumap_enqueue_cnt SEC(".maps");

struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
	.key_size	= sizeof(u32),
	.value_size	= sizeof(struct datarec),
	.max_entries	= 1,
};
struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__type(key, u32);
	__type(value, struct datarec);
	__uint(max_entries, 1);
} cpumap_kthread_cnt SEC(".maps");

/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
 * Code in:         kernel/include/trace/events/xdp.h
@@ -210,12 +210,12 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
	return 0;
}

struct bpf_map_def SEC("maps") devmap_xmit_cnt = {
	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
	.key_size	= sizeof(u32),
	.value_size	= sizeof(struct datarec),
	.max_entries	= 1,
};
struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__type(key, u32);
	__type(value, struct datarec);
	__uint(max_entries, 1);
} devmap_xmit_cnt SEC(".maps");

/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
 * Code in:         kernel/include/trace/events/xdp.h
+120 −39
Original line number Diff line number Diff line
@@ -26,12 +26,37 @@ static const char *__doc_err_only__=
#include <net/if.h>
#include <time.h>

#include <signal.h>
#include <bpf/bpf.h>
#include "bpf_load.h"
#include <bpf/libbpf.h>
#include "bpf_util.h"

enum map_type {
	REDIRECT_ERR_CNT,
	EXCEPTION_CNT,
	CPUMAP_ENQUEUE_CNT,
	CPUMAP_KTHREAD_CNT,
	DEVMAP_XMIT_CNT,
};

static const char *const map_type_strings[] = {
	[REDIRECT_ERR_CNT] = "redirect_err_cnt",
	[EXCEPTION_CNT] = "exception_cnt",
	[CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
	[CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
	[DEVMAP_XMIT_CNT] = "devmap_xmit_cnt",
};

#define NUM_MAP 5
#define NUM_TP 8

static int tp_cnt;
static int map_cnt;
static int verbose = 1;
static bool debug = false;
struct bpf_map *map_data[NUM_MAP] = {};
struct bpf_link *tp_links[NUM_TP] = {};
struct bpf_object *obj;

static const struct option long_options[] = {
	{"help",	no_argument,		NULL, 'h' },
@@ -41,6 +66,16 @@ static const struct option long_options[] = {
	{0, 0, NULL,  0 }
};

static void int_exit(int sig)
{
	/* Detach tracepoints */
	while (tp_cnt)
		bpf_link__destroy(tp_links[--tp_cnt]);

	bpf_object__close(obj);
	exit(0);
}

/* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
#define EXIT_FAIL_MEM	5

@@ -483,23 +518,23 @@ static bool stats_collect(struct stats_record *rec)
	 * this can happen by someone running perf-record -e
	 */

	fd = map_data[0].fd; /* map0: redirect_err_cnt */
	fd = bpf_map__fd(map_data[REDIRECT_ERR_CNT]);
	for (i = 0; i < REDIR_RES_MAX; i++)
		map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);

	fd = map_data[1].fd; /* map1: exception_cnt */
	fd = bpf_map__fd(map_data[EXCEPTION_CNT]);
	for (i = 0; i < XDP_ACTION_MAX; i++) {
		map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
	}

	fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
	fd = bpf_map__fd(map_data[CPUMAP_ENQUEUE_CNT]);
	for (i = 0; i < MAX_CPUS; i++)
		map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);

	fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
	fd = bpf_map__fd(map_data[CPUMAP_KTHREAD_CNT]);
	map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);

	fd = map_data[4].fd; /* map4: devmap_xmit_cnt */
	fd = bpf_map__fd(map_data[DEVMAP_XMIT_CNT]);
	map_collect_record(fd, 0, &rec->xdp_devmap_xmit);

	return true;
@@ -598,8 +633,8 @@ static void stats_poll(int interval, bool err_only)

	/* TODO Need more advanced stats on error types */
	if (verbose) {
		printf(" - Stats map0: %s\n", map_data[0].name);
		printf(" - Stats map1: %s\n", map_data[1].name);
		printf(" - Stats map0: %s\n", bpf_map__name(map_data[0]));
		printf(" - Stats map1: %s\n", bpf_map__name(map_data[1]));
		printf("\n");
	}
	fflush(stdout);
@@ -618,44 +653,51 @@ static void stats_poll(int interval, bool err_only)

static void print_bpf_prog_info(void)
{
	int i;
	struct bpf_program *prog;
	struct bpf_map *map;
	int i = 0;

	/* Prog info */
	printf("Loaded BPF prog have %d bpf program(s)\n", prog_cnt);
	for (i = 0; i < prog_cnt; i++) {
		printf(" - prog_fd[%d] = fd(%d)\n", i, prog_fd[i]);
	printf("Loaded BPF prog have %d bpf program(s)\n", tp_cnt);
	bpf_object__for_each_program(prog, obj) {
		printf(" - prog_fd[%d] = fd(%d)\n", i, bpf_program__fd(prog));
		i++;
	}

	i = 0;
	/* Maps info */
	printf("Loaded BPF prog have %d map(s)\n", map_data_count);
	for (i = 0; i < map_data_count; i++) {
		char *name = map_data[i].name;
		int fd     = map_data[i].fd;
	printf("Loaded BPF prog have %d map(s)\n", map_cnt);
	bpf_object__for_each_map(map, obj) {
		const char *name = bpf_map__name(map);
		int fd		 = bpf_map__fd(map);

		printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name);
		i++;
	}

	/* Event info */
	printf("Searching for (max:%d) event file descriptor(s)\n", prog_cnt);
	for (i = 0; i < prog_cnt; i++) {
		if (event_fd[i] != -1)
			printf(" - event_fd[%d] = fd(%d)\n", i, event_fd[i]);
	printf("Searching for (max:%d) event file descriptor(s)\n", tp_cnt);
	for (i = 0; i < tp_cnt; i++) {
		int fd = bpf_link__fd(tp_links[i]);

		if (fd != -1)
			printf(" - event_fd[%d] = fd(%d)\n", i, fd);
	}
}

int main(int argc, char **argv)
{
	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
	struct bpf_program *prog;
	int longindex = 0, opt;
	int ret = EXIT_SUCCESS;
	char bpf_obj_file[256];
	int ret = EXIT_FAILURE;
	enum map_type type;
	char filename[256];

	/* Default settings: */
	bool errors_only = true;
	int interval = 2;

	snprintf(bpf_obj_file, sizeof(bpf_obj_file), "%s_kern.o", argv[0]);

	/* Parse commands line args */
	while ((opt = getopt_long(argc, argv, "hDSs:",
				  long_options, &longindex)) != -1) {
@@ -672,40 +714,79 @@ int main(int argc, char **argv)
		case 'h':
		default:
			usage(argv);
			return EXIT_FAILURE;
			return ret;
		}
	}

	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
	if (setrlimit(RLIMIT_MEMLOCK, &r)) {
		perror("setrlimit(RLIMIT_MEMLOCK)");
		return EXIT_FAILURE;
		return ret;
	}

	/* Remove tracepoint program when program is interrupted or killed */
	signal(SIGINT, int_exit);
	signal(SIGTERM, int_exit);

	obj = bpf_object__open_file(filename, NULL);
	if (libbpf_get_error(obj)) {
		printf("ERROR: opening BPF object file failed\n");
		obj = NULL;
		goto cleanup;
	}

	if (load_bpf_file(bpf_obj_file)) {
		printf("ERROR - bpf_log_buf: %s", bpf_log_buf);
		return EXIT_FAILURE;
	/* load BPF program */
	if (bpf_object__load(obj)) {
		printf("ERROR: loading BPF object file failed\n");
		goto cleanup;
	}

	for (type = 0; type < NUM_MAP; type++) {
		map_data[type] =
			bpf_object__find_map_by_name(obj, map_type_strings[type]);

		if (libbpf_get_error(map_data[type])) {
			printf("ERROR: finding a map in obj file failed\n");
			goto cleanup;
		}
	if (!prog_fd[0]) {
		printf("ERROR - load_bpf_file: %s\n", strerror(errno));
		return EXIT_FAILURE;
		map_cnt++;
	}

	bpf_object__for_each_program(prog, obj) {
		tp_links[tp_cnt] = bpf_program__attach(prog);
		if (libbpf_get_error(tp_links[tp_cnt])) {
			printf("ERROR: bpf_program__attach failed\n");
			tp_links[tp_cnt] = NULL;
			goto cleanup;
		}
		tp_cnt++;
	}

	if (debug) {
		print_bpf_prog_info();
	}

	/* Unload/stop tracepoint event by closing fd's */
	/* Unload/stop tracepoint event by closing bpf_link's */
	if (errors_only) {
		/* The prog_fd[i] and event_fd[i] depend on the
		 * order the functions was defined in _kern.c
		/* The bpf_link[i] depend on the order of
		 * the functions was defined in _kern.c
		 */
		close(event_fd[2]); /* tracepoint/xdp/xdp_redirect */
		close(prog_fd[2]);  /* func: trace_xdp_redirect */
		close(event_fd[3]); /* tracepoint/xdp/xdp_redirect_map */
		close(prog_fd[3]);  /* func: trace_xdp_redirect_map */
		bpf_link__destroy(tp_links[2]);	/* tracepoint/xdp/xdp_redirect */
		tp_links[2] = NULL;

		bpf_link__destroy(tp_links[3]);	/* tracepoint/xdp/xdp_redirect_map */
		tp_links[3] = NULL;
	}

	stats_poll(interval, errors_only);

	ret = EXIT_SUCCESS;

cleanup:
	/* Detach tracepoints */
	while (tp_cnt)
		bpf_link__destroy(tp_links[--tp_cnt]);

	bpf_object__close(obj);
	return ret;
}
+72 −81
Original line number Diff line number Diff line
@@ -37,18 +37,35 @@ static __u32 prog_id;

static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static int n_cpus;
static int cpu_map_fd;
static int rx_cnt_map_fd;
static int redirect_err_cnt_map_fd;
static int cpumap_enqueue_cnt_map_fd;
static int cpumap_kthread_cnt_map_fd;
static int cpus_available_map_fd;
static int cpus_count_map_fd;
static int cpus_iterator_map_fd;
static int exception_cnt_map_fd;

enum map_type {
	CPU_MAP,
	RX_CNT,
	REDIRECT_ERR_CNT,
	CPUMAP_ENQUEUE_CNT,
	CPUMAP_KTHREAD_CNT,
	CPUS_AVAILABLE,
	CPUS_COUNT,
	CPUS_ITERATOR,
	EXCEPTION_CNT,
};

static const char *const map_type_strings[] = {
	[CPU_MAP] = "cpu_map",
	[RX_CNT] = "rx_cnt",
	[REDIRECT_ERR_CNT] = "redirect_err_cnt",
	[CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
	[CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
	[CPUS_AVAILABLE] = "cpus_available",
	[CPUS_COUNT] = "cpus_count",
	[CPUS_ITERATOR] = "cpus_iterator",
	[EXCEPTION_CNT] = "exception_cnt",
};

#define NUM_TP 5
struct bpf_link *tp_links[NUM_TP] = { 0 };
#define NUM_MAP 9
struct bpf_link *tp_links[NUM_TP] = {};
static int map_fds[NUM_MAP];
static int tp_cnt = 0;

/* Exit return codes */
@@ -527,20 +544,20 @@ static void stats_collect(struct stats_record *rec)
{
	int fd, i;

	fd = rx_cnt_map_fd;
	fd = map_fds[RX_CNT];
	map_collect_percpu(fd, 0, &rec->rx_cnt);

	fd = redirect_err_cnt_map_fd;
	fd = map_fds[REDIRECT_ERR_CNT];
	map_collect_percpu(fd, 1, &rec->redir_err);

	fd = cpumap_enqueue_cnt_map_fd;
	fd = map_fds[CPUMAP_ENQUEUE_CNT];
	for (i = 0; i < n_cpus; i++)
		map_collect_percpu(fd, i, &rec->enq[i]);

	fd = cpumap_kthread_cnt_map_fd;
	fd = map_fds[CPUMAP_KTHREAD_CNT];
	map_collect_percpu(fd, 0, &rec->kthread);

	fd = exception_cnt_map_fd;
	fd = map_fds[EXCEPTION_CNT];
	map_collect_percpu(fd, 0, &rec->exception);
}

@@ -565,7 +582,7 @@ static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
	/* Add a CPU entry to cpumap, as this allocate a cpu entry in
	 * the kernel for the cpu.
	 */
	ret = bpf_map_update_elem(cpu_map_fd, &cpu, value, 0);
	ret = bpf_map_update_elem(map_fds[CPU_MAP], &cpu, value, 0);
	if (ret) {
		fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
		exit(EXIT_FAIL_BPF);
@@ -574,21 +591,21 @@ static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
	/* Inform bpf_prog's that a new CPU is available to select
	 * from via some control maps.
	 */
	ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
	ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &avail_idx, &cpu, 0);
	if (ret) {
		fprintf(stderr, "Add to avail CPUs failed\n");
		exit(EXIT_FAIL_BPF);
	}

	/* When not replacing/updating existing entry, bump the count */
	ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
	ret = bpf_map_lookup_elem(map_fds[CPUS_COUNT], &key, &curr_cpus_count);
	if (ret) {
		fprintf(stderr, "Failed reading curr cpus_count\n");
		exit(EXIT_FAIL_BPF);
	}
	if (new) {
		curr_cpus_count++;
		ret = bpf_map_update_elem(cpus_count_map_fd, &key,
		ret = bpf_map_update_elem(map_fds[CPUS_COUNT], &key,
					  &curr_cpus_count, 0);
		if (ret) {
			fprintf(stderr, "Failed write curr cpus_count\n");
@@ -612,7 +629,7 @@ static void mark_cpus_unavailable(void)
	int ret, i;

	for (i = 0; i < n_cpus; i++) {
		ret = bpf_map_update_elem(cpus_available_map_fd, &i,
		ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &i,
					  &invalid_cpu, 0);
		if (ret) {
			fprintf(stderr, "Failed marking CPU unavailable\n");
@@ -665,68 +682,37 @@ static void stats_poll(int interval, bool use_separators, char *prog_name,
	free_stats_record(prev);
}

static struct bpf_link * attach_tp(struct bpf_object *obj,
				   const char *tp_category,
				   const char* tp_name)
static int init_tracepoints(struct bpf_object *obj)
{
	struct bpf_program *prog;
	struct bpf_link *link;
	char sec_name[PATH_MAX];
	int len;

	len = snprintf(sec_name, PATH_MAX, "tracepoint/%s/%s",
		       tp_category, tp_name);
	if (len < 0)
		exit(EXIT_FAIL);
	bpf_object__for_each_program(prog, obj) {
		if (bpf_program__is_tracepoint(prog) != true)
			continue;

	prog = bpf_object__find_program_by_title(obj, sec_name);
	if (!prog) {
		fprintf(stderr, "ERR: finding progsec: %s\n", sec_name);
		exit(EXIT_FAIL_BPF);
		tp_links[tp_cnt] = bpf_program__attach(prog);
		if (libbpf_get_error(tp_links[tp_cnt])) {
			tp_links[tp_cnt] = NULL;
			return -EINVAL;
		}

	link = bpf_program__attach_tracepoint(prog, tp_category, tp_name);
	if (libbpf_get_error(link))
		exit(EXIT_FAIL_BPF);

	return link;
		tp_cnt++;
	}

static void init_tracepoints(struct bpf_object *obj) {
	tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_err");
	tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_map_err");
	tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_exception");
	tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_enqueue");
	tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_kthread");
	return 0;
}

static int init_map_fds(struct bpf_object *obj)
{
	/* Maps updated by tracepoints */
	redirect_err_cnt_map_fd =
		bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt");
	exception_cnt_map_fd =
		bpf_object__find_map_fd_by_name(obj, "exception_cnt");
	cpumap_enqueue_cnt_map_fd =
		bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
	cpumap_kthread_cnt_map_fd =
		bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt");

	/* Maps used by XDP */
	rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
	cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
	cpus_available_map_fd =
		bpf_object__find_map_fd_by_name(obj, "cpus_available");
	cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
	cpus_iterator_map_fd =
		bpf_object__find_map_fd_by_name(obj, "cpus_iterator");

	if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
	    redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
	    cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
	    cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
	    exception_cnt_map_fd < 0)
	enum map_type type;

	for (type = 0; type < NUM_MAP; type++) {
		map_fds[type] =
			bpf_object__find_map_fd_by_name(obj,
							map_type_strings[type]);

		if (map_fds[type] < 0)
			return -ENOENT;
	}

	return 0;
}
@@ -795,13 +781,13 @@ int main(int argc, char **argv)
	bool stress_mode = false;
	struct bpf_program *prog;
	struct bpf_object *obj;
	int err = EXIT_FAIL;
	char filename[256];
	int added_cpus = 0;
	int longindex = 0;
	int interval = 2;
	int add_cpu = -1;
	int opt, err;
	int prog_fd;
	int opt, prog_fd;
	int *cpu, i;
	__u32 qsize;

@@ -824,24 +810,29 @@ int main(int argc, char **argv)
	}

	if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
		return EXIT_FAIL;
		return err;

	if (prog_fd < 0) {
		fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
			strerror(errno));
		return EXIT_FAIL;
		return err;
	}

	if (init_tracepoints(obj) < 0) {
		fprintf(stderr, "ERR: bpf_program__attach failed\n");
		return err;
	}
	init_tracepoints(obj);

	if (init_map_fds(obj) < 0) {
		fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
		return EXIT_FAIL;
		return err;
	}
	mark_cpus_unavailable();

	cpu = malloc(n_cpus * sizeof(int));
	if (!cpu) {
		fprintf(stderr, "failed to allocate cpu array\n");
		return EXIT_FAIL;
		return err;
	}
	memset(cpu, 0, n_cpus * sizeof(int));

@@ -960,14 +951,12 @@ int main(int argc, char **argv)
	prog = bpf_object__find_program_by_title(obj, prog_name);
	if (!prog) {
		fprintf(stderr, "bpf_object__find_program_by_title failed\n");
		err = EXIT_FAIL;
		goto out;
	}

	prog_fd = bpf_program__fd(prog);
	if (prog_fd < 0) {
		fprintf(stderr, "bpf_program__fd failed\n");
		err = EXIT_FAIL;
		goto out;
	}

@@ -986,6 +975,8 @@ int main(int argc, char **argv)

	stats_poll(interval, use_separators, prog_name, mprog_name,
		   &value, stress_mode);

	err = EXIT_OK;
out:
	free(cpu);
	return err;
+6 −8
Original line number Diff line number Diff line
@@ -5,14 +5,12 @@
#include <bpf/bpf_helpers.h>

#define SAMPLE_SIZE 64ul
#define MAX_CPUS 128

struct bpf_map_def SEC("maps") my_map = {
	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
	.key_size = sizeof(int),
	.value_size = sizeof(u32),
	.max_entries = MAX_CPUS,
};

struct {
	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
	__uint(key_size, sizeof(int));
	__uint(value_size, sizeof(u32));
} my_map SEC(".maps");

SEC("xdp_sample")
int xdp_sample_prog(struct xdp_md *ctx)
Loading