Commit 7bcfea96 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'bpftool-improvements'



Martin Lau says:

====================
When a map is storing a kernel's struct, its
map_info->btf_vmlinux_value_type_id is set.  The first map type
supporting it is BPF_MAP_TYPE_STRUCT_OPS.

This series adds support to dump this kind of map with BTF.
The first two patches are bug fixes which are only applicable to
bpf-next.

Please see individual patches for details.

v3:
- Remove unnecessary #include "libbpf_internal.h" from patch 5

v2:
- Expose bpf_find_kernel_btf() as a LIBBPF_API in patch 3 (Andrii)
- Cache btf_vmlinux in bpftool/map.c (Andrii)
====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 990bca1f 4e1ea332
Loading
Loading
Loading
Loading
+64 −38
Original line number Diff line number Diff line
@@ -48,6 +48,7 @@ const char * const map_type_name[] = {
	[BPF_MAP_TYPE_QUEUE]			= "queue",
	[BPF_MAP_TYPE_STACK]			= "stack",
	[BPF_MAP_TYPE_SK_STORAGE]		= "sk_storage",
	[BPF_MAP_TYPE_STRUCT_OPS]		= "struct_ops",
};

const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
@@ -251,6 +252,7 @@ static int do_dump_btf(const struct btf_dumper *d,
		       struct bpf_map_info *map_info, void *key,
		       void *value)
{
	__u32 value_id;
	int ret;

	/* start of key-value pair */
@@ -264,9 +266,12 @@ static int do_dump_btf(const struct btf_dumper *d,
			goto err_end_obj;
	}

	value_id = map_info->btf_vmlinux_value_type_id ?
		: map_info->btf_value_type_id;

	if (!map_is_per_cpu(map_info->type)) {
		jsonw_name(d->jw, "value");
		ret = btf_dumper_type(d, map_info->btf_value_type_id, value);
		ret = btf_dumper_type(d, value_id, value);
	} else {
		unsigned int i, n, step;

@@ -278,8 +283,7 @@ static int do_dump_btf(const struct btf_dumper *d,
			jsonw_start_object(d->jw);
			jsonw_int_field(d->jw, "cpu", i);
			jsonw_name(d->jw, "value");
			ret = btf_dumper_type(d, map_info->btf_value_type_id,
					      value + i * step);
			ret = btf_dumper_type(d, value_id, value + i * step);
			jsonw_end_object(d->jw);
			if (ret)
				break;
@@ -915,37 +919,63 @@ static int maps_have_btf(int *fds, int nb_fds)
{
	struct bpf_map_info info = {};
	__u32 len = sizeof(info);
	struct btf *btf = NULL;
	int err, i;

	for (i = 0; i < nb_fds; i++) {
		err = bpf_obj_get_info_by_fd(fds[i], &info, &len);
		if (err) {
			p_err("can't get map info: %s", strerror(errno));
			goto err_close;
			return -1;
		}

		err = btf__get_from_id(info.btf_id, &btf);
		if (err) {
		if (!info.btf_id)
			return 0;
	}

	return 1;
}

static struct btf *btf_vmlinux;

static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
{
	struct btf *btf = NULL;

	if (info->btf_vmlinux_value_type_id) {
		if (!btf_vmlinux) {
			btf_vmlinux = libbpf_find_kernel_btf();
			if (IS_ERR(btf_vmlinux))
				p_err("failed to get kernel btf");
		}
		return btf_vmlinux;
	} else if (info->btf_value_type_id) {
		int err;

		err = btf__get_from_id(info->btf_id, &btf);
		if (err || !btf) {
			p_err("failed to get btf");
			goto err_close;
			btf = err ? ERR_PTR(err) : ERR_PTR(-ESRCH);
		}
	}

		if (!btf)
			return 0;
	return btf;
}

	return 1;
static void free_map_kv_btf(struct btf *btf)
{
	if (!IS_ERR(btf) && btf != btf_vmlinux)
		btf__free(btf);
}

err_close:
	for (; i < nb_fds; i++)
		close(fds[i]);
	return -1;
static void free_btf_vmlinux(void)
{
	if (!IS_ERR(btf_vmlinux))
		btf__free(btf_vmlinux);
}

static int
map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
	 bool enable_btf, bool show_header)
	 bool show_header)
{
	void *key, *value, *prev_key;
	unsigned int num_elems = 0;
@@ -962,18 +992,13 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,

	prev_key = NULL;

	if (enable_btf) {
		err = btf__get_from_id(info->btf_id, &btf);
		if (err || !btf) {
			/* enable_btf is true only if we've already checked
			 * that all maps have BTF information.
			 */
			p_err("failed to get btf");
	if (wtr) {
		btf = get_map_kv_btf(info);
		if (IS_ERR(btf)) {
			err = PTR_ERR(btf);
			goto exit_free;
		}
	}

	if (wtr) {
		if (show_header) {
			jsonw_start_object(wtr);	/* map object */
			show_map_header_json(info, wtr);
@@ -1012,7 +1037,7 @@ exit_free:
	free(key);
	free(value);
	close(fd);
	btf__free(btf);
	free_map_kv_btf(btf);

	return err;
}
@@ -1021,7 +1046,7 @@ static int do_dump(int argc, char **argv)
{
	json_writer_t *wtr = NULL, *btf_wtr = NULL;
	struct bpf_map_info info = {};
	int nb_fds, i = 0, btf = 0;
	int nb_fds, i = 0;
	__u32 len = sizeof(info);
	int *fds = NULL;
	int err = -1;
@@ -1041,17 +1066,17 @@ static int do_dump(int argc, char **argv)
	if (json_output) {
		wtr = json_wtr;
	} else {
		btf = maps_have_btf(fds, nb_fds);
		if (btf < 0)
		int do_plain_btf;

		do_plain_btf = maps_have_btf(fds, nb_fds);
		if (do_plain_btf < 0)
			goto exit_close;
		if (btf) {

		if (do_plain_btf) {
			btf_wtr = get_btf_writer();
			if (btf_wtr) {
			wtr = btf_wtr;
			} else {
			if (!btf_wtr)
				p_info("failed to create json writer for btf. falling back to plain output");
				btf = 0;
			}
		}
	}

@@ -1062,7 +1087,7 @@ static int do_dump(int argc, char **argv)
			p_err("can't get map info: %s", strerror(errno));
			break;
		}
		err = map_dump(fds[i], &info, wtr, btf, nb_fds > 1);
		err = map_dump(fds[i], &info, wtr, nb_fds > 1);
		if (!wtr && i != nb_fds - 1)
			printf("\n");

@@ -1073,13 +1098,14 @@ static int do_dump(int argc, char **argv)
	if (wtr && nb_fds > 1)
		jsonw_end_array(wtr);	/* root array */

	if (btf)
	if (btf_wtr)
		jsonw_destroy(&btf_wtr);
exit_close:
	for (; i < nb_fds; i++)
		close(fds[i]);
exit_free:
	free(fds);
	free_btf_vmlinux();
	return err;
}

+96 −6
Original line number Diff line number Diff line
@@ -8,6 +8,10 @@
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <sys/utsname.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/btf.h>
#include <gelf.h>
@@ -20,8 +24,8 @@
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64

#define BTF_MAX_NR_TYPES 0x7fffffff
#define BTF_MAX_STR_OFFSET 0x7fffffff
#define BTF_MAX_NR_TYPES 0x7fffffffU
#define BTF_MAX_STR_OFFSET 0x7fffffffU

static struct btf_type btf_void;

@@ -53,7 +57,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
		if (btf->types_size == BTF_MAX_NR_TYPES)
			return -E2BIG;

		expand_by = max(btf->types_size >> 2, 16);
		expand_by = max(btf->types_size >> 2, 16U);
		new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);

		new_types = realloc(btf->types, sizeof(*new_types) * new_size);
@@ -289,7 +293,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
	switch (kind) {
	case BTF_KIND_INT:
	case BTF_KIND_ENUM:
		return min(sizeof(void *), t->size);
		return min(sizeof(void *), (size_t)t->size);
	case BTF_KIND_PTR:
		return sizeof(void *);
	case BTF_KIND_TYPEDEF:
@@ -1401,7 +1405,7 @@ static int btf_dedup_hypot_map_add(struct btf_dedup *d,
	if (d->hypot_cnt == d->hypot_cap) {
		__u32 *new_list;

		d->hypot_cap += max(16, d->hypot_cap / 2);
		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
		new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
		if (!new_list)
			return -ENOMEM;
@@ -1697,7 +1701,7 @@ static int btf_dedup_strings(struct btf_dedup *d)
		if (strs.cnt + 1 > strs.cap) {
			struct btf_str_ptr *new_ptrs;

			strs.cap += max(strs.cnt / 2, 16);
			strs.cap += max(strs.cnt / 2, 16U);
			new_ptrs = realloc(strs.ptrs,
					   sizeof(strs.ptrs[0]) * strs.cap);
			if (!new_ptrs) {
@@ -2931,3 +2935,89 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
	}
	return 0;
}

static struct btf *btf_load_raw(const char *path)
{
	struct btf *btf;
	size_t read_cnt;
	struct stat st;
	void *data;
	FILE *f;

	if (stat(path, &st))
		return ERR_PTR(-errno);

	data = malloc(st.st_size);
	if (!data)
		return ERR_PTR(-ENOMEM);

	f = fopen(path, "rb");
	if (!f) {
		btf = ERR_PTR(-errno);
		goto cleanup;
	}

	read_cnt = fread(data, 1, st.st_size, f);
	fclose(f);
	if (read_cnt < st.st_size) {
		btf = ERR_PTR(-EBADF);
		goto cleanup;
	}

	btf = btf__new(data, read_cnt);

cleanup:
	free(data);
	return btf;
}

/*
 * Probe few well-known locations for vmlinux kernel image and try to load BTF
 * data out of it to use for target BTF.
 */
struct btf *libbpf_find_kernel_btf(void)
{
	struct {
		const char *path_fmt;
		bool raw_btf;
	} locations[] = {
		/* try canonical vmlinux BTF through sysfs first */
		{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
		/* fall back to trying to find vmlinux ELF on disk otherwise */
		{ "/boot/vmlinux-%1$s" },
		{ "/lib/modules/%1$s/vmlinux-%1$s" },
		{ "/lib/modules/%1$s/build/vmlinux" },
		{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
		{ "/usr/lib/debug/boot/vmlinux-%1$s" },
		{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
		{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
	};
	char path[PATH_MAX + 1];
	struct utsname buf;
	struct btf *btf;
	int i;

	uname(&buf);

	for (i = 0; i < ARRAY_SIZE(locations); i++) {
		snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);

		if (access(path, R_OK))
			continue;

		if (locations[i].raw_btf)
			btf = btf_load_raw(path);
		else
			btf = btf__parse_elf(path, NULL);

		pr_debug("loading kernel BTF '%s': %ld\n",
			 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
		if (IS_ERR(btf))
			continue;

		return btf;
	}

	pr_warn("failed to find valid kernel BTF\n");
	return ERR_PTR(-ESRCH);
}
+2 −0
Original line number Diff line number Diff line
@@ -102,6 +102,8 @@ LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);

LIBBPF_API struct btf *libbpf_find_kernel_btf(void);

struct btf_dedup_opts {
	unsigned int dedup_table_size;
	bool dont_resolve_fwds;
+3 −90
Original line number Diff line number Diff line
@@ -73,7 +73,6 @@

#define __printf(a, b)	__attribute__((format(printf, a, b)))

static struct btf *bpf_find_kernel_btf(void);
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
static struct bpf_program *bpf_object__find_prog_by_idx(struct bpf_object *obj,
							int idx);
@@ -848,7 +847,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
			continue;

		if (!kern_btf) {
			kern_btf = bpf_find_kernel_btf();
			kern_btf = libbpf_find_kernel_btf();
			if (IS_ERR(kern_btf))
				return PTR_ERR(kern_btf);
		}
@@ -4300,92 +4299,6 @@ static int bpf_core_reloc_insn(struct bpf_program *prog,
	return 0;
}

static struct btf *btf_load_raw(const char *path)
{
	struct btf *btf;
	size_t read_cnt;
	struct stat st;
	void *data;
	FILE *f;

	if (stat(path, &st))
		return ERR_PTR(-errno);

	data = malloc(st.st_size);
	if (!data)
		return ERR_PTR(-ENOMEM);

	f = fopen(path, "rb");
	if (!f) {
		btf = ERR_PTR(-errno);
		goto cleanup;
	}

	read_cnt = fread(data, 1, st.st_size, f);
	fclose(f);
	if (read_cnt < st.st_size) {
		btf = ERR_PTR(-EBADF);
		goto cleanup;
	}

	btf = btf__new(data, read_cnt);

cleanup:
	free(data);
	return btf;
}

/*
 * Probe few well-known locations for vmlinux kernel image and try to load BTF
 * data out of it to use for target BTF.
 */
static struct btf *bpf_find_kernel_btf(void)
{
	struct {
		const char *path_fmt;
		bool raw_btf;
	} locations[] = {
		/* try canonical vmlinux BTF through sysfs first */
		{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
		/* fall back to trying to find vmlinux ELF on disk otherwise */
		{ "/boot/vmlinux-%1$s" },
		{ "/lib/modules/%1$s/vmlinux-%1$s" },
		{ "/lib/modules/%1$s/build/vmlinux" },
		{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
		{ "/usr/lib/debug/boot/vmlinux-%1$s" },
		{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
		{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
	};
	char path[PATH_MAX + 1];
	struct utsname buf;
	struct btf *btf;
	int i;

	uname(&buf);

	for (i = 0; i < ARRAY_SIZE(locations); i++) {
		snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);

		if (access(path, R_OK))
			continue;

		if (locations[i].raw_btf)
			btf = btf_load_raw(path);
		else
			btf = btf__parse_elf(path, NULL);

		pr_debug("loading kernel BTF '%s': %ld\n",
			 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
		if (IS_ERR(btf))
			continue;

		return btf;
	}

	pr_warn("failed to find valid kernel BTF\n");
	return ERR_PTR(-ESRCH);
}

/* Output spec definition in the format:
 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
@@ -4620,7 +4533,7 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
	if (targ_btf_path)
		targ_btf = btf__parse_elf(targ_btf_path, NULL);
	else
		targ_btf = bpf_find_kernel_btf();
		targ_btf = libbpf_find_kernel_btf();
	if (IS_ERR(targ_btf)) {
		pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
		return PTR_ERR(targ_btf);
@@ -6595,7 +6508,7 @@ invalid_prog:
int libbpf_find_vmlinux_btf_id(const char *name,
			       enum bpf_attach_type attach_type)
{
	struct btf *btf = bpf_find_kernel_btf();
	struct btf *btf = libbpf_find_kernel_btf();
	char raw_tp_btf[128] = BTF_PREFIX;
	char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1;
	const char *btf_name;
+1 −0
Original line number Diff line number Diff line
@@ -231,4 +231,5 @@ LIBBPF_0.0.7 {
		bpf_program__is_struct_ops;
		bpf_program__set_struct_ops;
		btf__align_of;
		libbpf_find_kernel_btf;
} LIBBPF_0.0.6;