Commit 5b92a28a authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann
Browse files

bpf: Support attaching tracing BPF program to other BPF programs



Allow FENTRY/FEXIT BPF programs to attach to other BPF programs of any type
including their subprograms. This feature allows snooping on input and output
packets in XDP, TC programs including their return values. In order to do that
the verifier needs to track types not only of vmlinux, but types of other BPF
programs as well. The verifier also needs to translate uapi/linux/bpf.h types
used by networking programs into kernel internal BTF types used by FENTRY/FEXIT
BPF programs. In some cases LLVM optimizations can remove arguments from BPF
subprograms without adjusting BTF info that LLVM backend knows. When BTF info
disagrees with actual types that the verifiers sees the BPF trampoline has to
fallback to conservative and treat all arguments as u64. The FENTRY/FEXIT
program can still attach to such subprograms, but it won't be able to recognize
pointer types like 'struct sk_buff *' and it won't be able to pass them to
bpf_skb_output() for dumping packets to user space. The FENTRY/FEXIT program
would need to use bpf_probe_read_kernel() instead.

The BPF_PROG_LOAD command is extended with attach_prog_fd field. When it's set
to zero the attach_btf_id is one vmlinux BTF type ids. When attach_prog_fd
points to previously loaded BPF program the attach_btf_id is BTF type id of
main function or one of its subprograms.

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20191114185720.1641606-18-ast@kernel.org
parent 8c1b6e69
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -504,7 +504,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
	u8 *prog;
	int ret;

	if (!is_kernel_text((long)ip))
	if (!is_kernel_text((long)ip) &&
	    !is_bpf_text_address((long)ip))
		/* BPF trampoline in modules is not supported */
		return -EINVAL;

+1 −0
Original line number Diff line number Diff line
@@ -495,6 +495,7 @@ struct bpf_prog_aux {
	u32 func_cnt; /* used by non-func prog as the number of func progs */
	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
	struct bpf_prog *linked_prog;
	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
	bool offload_requested;
	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+1 −0
Original line number Diff line number Diff line
@@ -88,6 +88,7 @@ static inline bool btf_type_is_func_proto(const struct btf_type *t)
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
						    u32 type_id)
+1 −0
Original line number Diff line number Diff line
@@ -425,6 +425,7 @@ union bpf_attr {
		__aligned_u64	line_info;	/* line info */
		__u32		line_info_cnt;	/* number of bpf_line_info records */
		__u32		attach_btf_id;	/* in-kernel BTF type id to attach to */
		__u32		attach_prog_fd; /* 0 to attach to vmlinux */
	};

	struct { /* anonymous struct used by BPF_OBJ_* commands */
+62 −8
Original line number Diff line number Diff line
@@ -3530,6 +3530,20 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
	return ctx_type;
}

static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
				     struct btf *btf,
				     const struct btf_type *t,
				     enum bpf_prog_type prog_type)
{
	const struct btf_member *prog_ctx_type, *kern_ctx_type;

	prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type);
	if (!prog_ctx_type)
		return -ENOENT;
	kern_ctx_type = prog_ctx_type + 1;
	return kern_ctx_type->type;
}

struct btf *btf_parse_vmlinux(void)
{
	struct btf_verifier_env *env = NULL;
@@ -3602,15 +3616,29 @@ errout:
	return ERR_PTR(err);
}

struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
{
	struct bpf_prog *tgt_prog = prog->aux->linked_prog;

	if (tgt_prog) {
		return tgt_prog->aux->btf;
	} else {
		return btf_vmlinux;
	}
}

bool btf_ctx_access(int off, int size, enum bpf_access_type type,
		    const struct bpf_prog *prog,
		    struct bpf_insn_access_aux *info)
{
	const struct btf_type *t = prog->aux->attach_func_proto;
	struct bpf_prog *tgt_prog = prog->aux->linked_prog;
	struct btf *btf = bpf_prog_get_target_btf(prog);
	const char *tname = prog->aux->attach_func_name;
	struct bpf_verifier_log *log = info->log;
	const struct btf_param *args;
	u32 nr_args, arg;
	int ret;

	if (off % 8) {
		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
@@ -3619,7 +3647,8 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
	}
	arg = off / 8;
	args = (const struct btf_param *)(t + 1);
	nr_args = btf_type_vlen(t);
	/* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */
	nr_args = t ? btf_type_vlen(t) : 5;
	if (prog->aux->attach_btf_trace) {
		/* skip first 'void *__data' argument in btf_trace_##name typedef */
		args++;
@@ -3628,18 +3657,24 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,

	if (prog->expected_attach_type == BPF_TRACE_FEXIT &&
	    arg == nr_args) {
		if (!t)
			/* Default prog with 5 args. 6th arg is retval. */
			return true;
		/* function return type */
		t = btf_type_by_id(btf_vmlinux, t->type);
		t = btf_type_by_id(btf, t->type);
	} else if (arg >= nr_args) {
		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
			tname, arg + 1);
		return false;
	} else {
		t = btf_type_by_id(btf_vmlinux, args[arg].type);
		if (!t)
			/* Default prog with 5 args */
			return true;
		t = btf_type_by_id(btf, args[arg].type);
	}
	/* skip modifiers */
	while (btf_type_is_modifier(t))
		t = btf_type_by_id(btf_vmlinux, t->type);
		t = btf_type_by_id(btf, t->type);
	if (btf_type_is_int(t))
		/* accessing a scalar */
		return true;
@@ -3647,7 +3682,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
		bpf_log(log,
			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
			tname, arg,
			__btf_name_by_offset(btf_vmlinux, t->name_off),
			__btf_name_by_offset(btf, t->name_off),
			btf_kind_str[BTF_INFO_KIND(t->info)]);
		return false;
	}
@@ -3662,10 +3697,19 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
	info->reg_type = PTR_TO_BTF_ID;
	info->btf_id = t->type;

	t = btf_type_by_id(btf_vmlinux, t->type);
	if (tgt_prog) {
		ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type);
		if (ret > 0) {
			info->btf_id = ret;
			return true;
		} else {
			return false;
		}
	}
	t = btf_type_by_id(btf, t->type);
	/* skip modifiers */
	while (btf_type_is_modifier(t))
		t = btf_type_by_id(btf_vmlinux, t->type);
		t = btf_type_by_id(btf, t->type);
	if (!btf_type_is_struct(t)) {
		bpf_log(log,
			"func '%s' arg%d type %s is not a struct\n",
@@ -3674,7 +3718,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
	}
	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
		tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
		__btf_name_by_offset(btf_vmlinux, t->name_off));
		__btf_name_by_offset(btf, t->name_off));
	return true;
}

@@ -3954,6 +3998,16 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
	u32 i, nargs;
	int ret;

	if (!func) {
		/* BTF function prototype doesn't match the verifier types.
		 * Fall back to 5 u64 args.
		 */
		for (i = 0; i < 5; i++)
			m->arg_size[i] = 8;
		m->ret_size = 8;
		m->nr_args = 5;
		return 0;
	}
	args = (const struct btf_param *)(func + 1);
	nargs = btf_type_vlen(func);
	if (nargs >= MAX_BPF_FUNC_ARGS) {
Loading