Commit 192b6638 authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Daniel Borkmann
Browse files

libbpf: Prevent loading vmlinux BTF twice



Prevent loading/parsing vmlinux BTF twice in some cases: for CO-RE relocations
and for BTF-aware hooks (tp_btf, fentry/fexit, etc).

Fixes: a6ed02ca ("libbpf: Load btf_vmlinux only once per object.")
Signed-off-by: default avatarAndrii Nakryiko <andriin@fb.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200624043805.1794620-1-andriin@fb.com
parent 135c783f
Loading
Loading
Loading
Loading
+22 −11
Original line number Diff line number Diff line
@@ -2504,11 +2504,24 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)

static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
{
	bool need_vmlinux_btf = false;
	struct bpf_program *prog;
	int err;

	/* CO-RE relocations need kernel BTF */
	if (obj->btf_ext && obj->btf_ext->field_reloc_info.len)
		need_vmlinux_btf = true;

	bpf_object__for_each_program(prog, obj) {
		if (libbpf_prog_needs_vmlinux_btf(prog)) {
			need_vmlinux_btf = true;
			break;
		}
	}

	if (!need_vmlinux_btf)
		return 0;

	obj->btf_vmlinux = libbpf_find_kernel_btf();
	if (IS_ERR(obj->btf_vmlinux)) {
		err = PTR_ERR(obj->btf_vmlinux);
@@ -2518,10 +2531,6 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
	}
	return 0;
}
	}

	return 0;
}

static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{
@@ -4945,8 +4954,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
	if (targ_btf_path)
		targ_btf = btf__parse_elf(targ_btf_path, NULL);
	else
		targ_btf = libbpf_find_kernel_btf();
	if (IS_ERR(targ_btf)) {
		targ_btf = obj->btf_vmlinux;
	if (IS_ERR_OR_NULL(targ_btf)) {
		pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
		return PTR_ERR(targ_btf);
	}
@@ -4987,6 +4996,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
	}

out:
	/* obj->btf_vmlinux is freed at the end of object load phase */
	if (targ_btf != obj->btf_vmlinux)
		btf__free(targ_btf);
	if (!IS_ERR_OR_NULL(cand_cache)) {
		hashmap__for_each_entry(cand_cache, entry, i) {