Commit 148709bc authored by David S. Miller's avatar David S. Miller
Browse files


Alexei Starovoitov says:

====================
pull-request: bpf 2019-12-11

The following pull-request contains BPF updates for your *net* tree.

We've added 8 non-merge commits during the last 1 day(s) which contain
a total of 10 files changed, 126 insertions(+), 18 deletions(-).

The main changes are:

1) Make BPF trampoline co-exist with ftrace-based tracers, from Alexei.

2) Fix build in minimal configurations, from Arnd.

3) Fix mips, riscv bpf_tail_call limit, from Paul.

4) Fix bpftool segfault, from Toke.

5) Fix samples/bpf, from Daniel.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 24dee0c7 fe330089
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
{
	int off, b_off;
	int tcc_reg;

	ctx->flags |= EBPF_SEEN_TC;
	/*
@@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
	b_off = b_imm(this_idx + 1, ctx);
	emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
	/*
	 * if (--TCC < 0)
	 * if (TCC-- < 0)
	 *     goto out;
	 */
	/* Delay slot */
	emit_instr(ctx, daddiu, MIPS_R_T5,
		   (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
	tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
	emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
	b_off = b_imm(this_idx + 1, ctx);
	emit_instr(ctx, bltz, MIPS_R_T5, b_off);
	emit_instr(ctx, bltz, tcc_reg, b_off);
	/*
	 * prog = array->ptrs[index];
	 * if (prog == NULL)
+2 −2
Original line number Diff line number Diff line
@@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
		return -1;
	emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx);

	/* if (--TCC < 0)
	/* if (TCC-- < 0)
	 *     goto out;
	 */
	emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
	off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
	if (is_13b_check(off, insn))
		return -1;
	emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx);
	emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx);

	/* prog = array->ptrs[index];
	 * if (!prog)
+1 −0
Original line number Diff line number Diff line
@@ -461,6 +461,7 @@ struct bpf_trampoline {
	struct {
		struct btf_func_model model;
		void *addr;
		bool ftrace_managed;
	} func;
	/* list of BPF programs using this trampoline */
	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
+1 −0
Original line number Diff line number Diff line
@@ -3470,6 +3470,7 @@ static u8 bpf_ctx_convert_map[] = {
	[_id] = __ctx_convert##_id,
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
	0, /* avoid empty array */
};
#undef BPF_MAP_TYPE

+58 −6
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@
#include <linux/hash.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/ftrace.h>

/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
#define TRAMPOLINE_HASH_BITS 10
@@ -59,6 +60,60 @@ out:
	return tr;
}

static int is_ftrace_location(void *ip)
{
	long addr;

	addr = ftrace_location((long)ip);
	if (!addr)
		return 0;
	if (WARN_ON_ONCE(addr != (long)ip))
		return -EFAULT;
	return 1;
}

static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
{
	void *ip = tr->func.addr;
	int ret;

	if (tr->func.ftrace_managed)
		ret = unregister_ftrace_direct((long)ip, (long)old_addr);
	else
		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
	return ret;
}

static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
{
	void *ip = tr->func.addr;
	int ret;

	if (tr->func.ftrace_managed)
		ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
	else
		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
	return ret;
}

/* first time registering */
static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
{
	void *ip = tr->func.addr;
	int ret;

	ret = is_ftrace_location(ip);
	if (ret < 0)
		return ret;
	tr->func.ftrace_managed = ret;

	if (tr->func.ftrace_managed)
		ret = register_ftrace_direct((long)ip, (long)new_addr);
	else
		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
	return ret;
}

/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
 * bytes on x86.  Pick a number to fit into PAGE_SIZE / 2
 */
@@ -77,8 +132,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
	int err;

	if (fentry_cnt + fexit_cnt == 0) {
		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
					 old_image, NULL);
		err = unregister_fentry(tr, old_image);
		tr->selector = 0;
		goto out;
	}
@@ -105,12 +159,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)

	if (tr->selector)
		/* progs already running at this address */
		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
					 old_image, new_image);
		err = modify_fentry(tr, old_image, new_image);
	else
		/* first time registering */
		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL,
					 new_image);
		err = register_fentry(tr, new_image);
	if (err)
		goto out;
	tr->selector++;
Loading