Commit 5f0d736e authored by David S. Miller's avatar David S. Miller
Browse files


Daniel Borkmann says:

====================
pull-request: bpf-next 2019-04-28

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) Introduce BPF socket local storage map so that BPF programs can store
   private data they associate with a socket (instead of e.g. separate hash
   table), from Martin.

2) Add support for bpftool to dump BTF types. This is done through a new
   `bpftool btf dump` sub-command, from Andrii.

3) Enable BPF-based flow dissector for skb-less eth_get_headlen() calls which
   was currently not supported since skb was used to lookup netns, from Stanislav.

4) Add an opt-in interface for tracepoints to expose a writable context
   for attached BPF programs, used here for NBD sockets, from Matt.

5) BPF xadd related arm64 JIT fixes and scalability improvements, from Daniel.

6) Change the skb->protocol for bpf_skb_adjust_room() helper in order to
   support tunnels such as sit. Add selftests as well, from Willem.

7) Various smaller misc fixes.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b1a79360 9076c49b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -10742,6 +10742,7 @@ L: linux-block@vger.kernel.org
L:	nbd@other.debian.org
F:	Documentation/blockdev/nbd.txt
F:	drivers/block/nbd.c
F:	include/trace/events/nbd.h
F:	include/uapi/linux/nbd.h

NETWORK DROP MONITOR
+8 −0
Original line number Diff line number Diff line
@@ -277,6 +277,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm,	0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit,	0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg,	0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd,	0x3F20FC00, 0xB8200000)
__AARCH64_INSN_FUNCS(ldr_reg,	0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit,	0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit,	0xFF000000, 0x98000000)
@@ -394,6 +395,13 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
				   enum aarch64_insn_register state,
				   enum aarch64_insn_size_type size,
				   enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
			   enum aarch64_insn_register address,
			   enum aarch64_insn_register value,
			   enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
			   enum aarch64_insn_register value,
			   enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
				 enum aarch64_insn_register src,
				 int imm, enum aarch64_insn_variant variant,
+40 −0
Original line number Diff line number Diff line
@@ -734,6 +734,46 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
					    state);
}

u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
			   enum aarch64_insn_register address,
			   enum aarch64_insn_register value,
			   enum aarch64_insn_size_type size)
{
	u32 insn = aarch64_insn_get_ldadd_value();

	switch (size) {
	case AARCH64_INSN_SIZE_32:
	case AARCH64_INSN_SIZE_64:
		break;
	default:
		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
		return AARCH64_BREAK_FAULT;
	}

	insn = aarch64_insn_encode_ldst_size(size, insn);

	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
					    result);

	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
					    address);

	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
					    value);
}

u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
			   enum aarch64_insn_register value,
			   enum aarch64_insn_size_type size)
{
	/*
	 * STADD is simply encoded as an alias for LDADD with XZR as
	 * the destination register.
	 */
	return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
				      value, size);
}

static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
					enum aarch64_insn_prfm_target target,
					enum aarch64_insn_prfm_policy policy,
+3 −5
Original line number Diff line number Diff line
@@ -100,11 +100,9 @@
#define A64_STXR(sf, Rt, Rn, Rs) \
	A64_LSX(sf, Rt, Rn, Rs, STORE_EX)

/* Prefetch */
#define A64_PRFM(Rn, type, target, policy) \
	aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
				  AARCH64_INSN_PRFM_TARGET_##target, \
				  AARCH64_INSN_PRFM_POLICY_##policy)
/* LSE atomics */
#define A64_STADD(sf, Rn, Rs) \
	aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))

/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
+19 −10
Original line number Diff line number Diff line
@@ -365,7 +365,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
	const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
			  BPF_CLASS(code) == BPF_JMP;
	const bool isdw = BPF_SIZE(code) == BPF_DW;
	u8 jmp_cond;
	u8 jmp_cond, reg;
	s32 jmp_offset;

#define check_imm(bits, imm) do {				\
@@ -756,19 +756,28 @@ emit_cond_jmp:
			break;
		}
		break;

	/* STX XADD: lock *(u32 *)(dst + off) += src */
	case BPF_STX | BPF_XADD | BPF_W:
	/* STX XADD: lock *(u64 *)(dst + off) += src */
	case BPF_STX | BPF_XADD | BPF_DW:
		if (!off) {
			reg = dst;
		} else {
			emit_a64_mov_i(1, tmp, off, ctx);
			emit(A64_ADD(1, tmp, tmp, dst), ctx);
		emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
		emit(A64_LDXR(isdw, tmp2, tmp), ctx);
			reg = tmp;
		}
		if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
			emit(A64_STADD(isdw, reg, src), ctx);
		} else {
			emit(A64_LDXR(isdw, tmp2, reg), ctx);
			emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
		emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
			emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
			jmp_offset = -3;
			check_imm19(jmp_offset);
			emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
		}
		break;

	default:
Loading