Commit fe83963b authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov
Browse files

bpf, sparc64: remove ld_abs/ld_ind



Since LD_ABS/LD_IND instructions are now removed from the core and
reimplemented through a combination of inlined BPF instructions and
a slow-path helper, we can get rid of the complexity from sparc64 JIT.

Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 816d9ef3
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
#
# Arch-specific network modules
#
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm_$(BITS).o bpf_jit_comp_$(BITS).o
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp_$(BITS).o
ifeq ($(BITS),32)
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm_32.o
endif
+0 −29
Original line number Diff line number Diff line
@@ -33,35 +33,6 @@
#define I5		0x1d
#define FP		0x1e
#define I7		0x1f

#define r_SKB		L0
#define r_HEADLEN	L4
#define r_SKB_DATA	L5
#define r_TMP		G1
#define r_TMP2		G3

/* assembly code in arch/sparc/net/bpf_jit_asm_64.S */
extern u32 bpf_jit_load_word[];
extern u32 bpf_jit_load_half[];
extern u32 bpf_jit_load_byte[];
extern u32 bpf_jit_load_byte_msh[];
extern u32 bpf_jit_load_word_positive_offset[];
extern u32 bpf_jit_load_half_positive_offset[];
extern u32 bpf_jit_load_byte_positive_offset[];
extern u32 bpf_jit_load_byte_msh_positive_offset[];
extern u32 bpf_jit_load_word_negative_offset[];
extern u32 bpf_jit_load_half_negative_offset[];
extern u32 bpf_jit_load_byte_negative_offset[];
extern u32 bpf_jit_load_byte_msh_negative_offset[];

#else
#define r_RESULT	%o0
#define r_SKB		%o0
#define r_OFF		%o1
#define r_HEADLEN	%l4
#define r_SKB_DATA	%l5
#define r_TMP		%g1
#define r_TMP2		%g3
#endif

#endif /* _BPF_JIT_H */

arch/sparc/net/bpf_jit_asm_64.S

deleted100644 → 0
+0 −162
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/ptrace.h>

#include "bpf_jit_64.h"

#define SAVE_SZ		176
#define SCRATCH_OFF	STACK_BIAS + 128
#define BE_PTR(label)	be,pn %xcc, label
#define SIGN_EXTEND(reg)	sra reg, 0, reg

#define SKF_MAX_NEG_OFF	(-0x200000) /* SKF_LL_OFF from filter.h */

	.text
	.globl	bpf_jit_load_word
bpf_jit_load_word:
	cmp	r_OFF, 0
	bl	bpf_slow_path_word_neg
	 nop
	.globl	bpf_jit_load_word_positive_offset
bpf_jit_load_word_positive_offset:
	sub	r_HEADLEN, r_OFF, r_TMP
	cmp	r_TMP, 3
	ble	bpf_slow_path_word
	 add	r_SKB_DATA, r_OFF, r_TMP
	andcc	r_TMP, 3, %g0
	bne	load_word_unaligned
	 nop
	retl
	 ld	[r_TMP], r_RESULT
load_word_unaligned:
	ldub	[r_TMP + 0x0], r_OFF
	ldub	[r_TMP + 0x1], r_TMP2
	sll	r_OFF, 8, r_OFF
	or	r_OFF, r_TMP2, r_OFF
	ldub	[r_TMP + 0x2], r_TMP2
	sll	r_OFF, 8, r_OFF
	or	r_OFF, r_TMP2, r_OFF
	ldub	[r_TMP + 0x3], r_TMP2
	sll	r_OFF, 8, r_OFF
	retl
	 or	r_OFF, r_TMP2, r_RESULT

	.globl	bpf_jit_load_half
bpf_jit_load_half:
	cmp	r_OFF, 0
	bl	bpf_slow_path_half_neg
	 nop
	.globl	bpf_jit_load_half_positive_offset
bpf_jit_load_half_positive_offset:
	sub	r_HEADLEN, r_OFF, r_TMP
	cmp	r_TMP, 1
	ble	bpf_slow_path_half
	 add	r_SKB_DATA, r_OFF, r_TMP
	andcc	r_TMP, 1, %g0
	bne	load_half_unaligned
	 nop
	retl
	 lduh	[r_TMP], r_RESULT
load_half_unaligned:
	ldub	[r_TMP + 0x0], r_OFF
	ldub	[r_TMP + 0x1], r_TMP2
	sll	r_OFF, 8, r_OFF
	retl
	 or	r_OFF, r_TMP2, r_RESULT

	.globl	bpf_jit_load_byte
bpf_jit_load_byte:
	cmp	r_OFF, 0
	bl	bpf_slow_path_byte_neg
	 nop
	.globl	bpf_jit_load_byte_positive_offset
bpf_jit_load_byte_positive_offset:
	cmp	r_OFF, r_HEADLEN
	bge	bpf_slow_path_byte
	 nop
	retl
	 ldub	[r_SKB_DATA + r_OFF], r_RESULT

#define bpf_slow_path_common(LEN)	\
	save	%sp, -SAVE_SZ, %sp;	\
	mov	%i0, %o0;		\
	mov	%i1, %o1;		\
	add	%fp, SCRATCH_OFF, %o2;	\
	call	skb_copy_bits;		\
	 mov	(LEN), %o3;		\
	cmp	%o0, 0;			\
	restore;

bpf_slow_path_word:
	bpf_slow_path_common(4)
	bl	bpf_error
	 ld	[%sp + SCRATCH_OFF], r_RESULT
	retl
	 nop
bpf_slow_path_half:
	bpf_slow_path_common(2)
	bl	bpf_error
	 lduh	[%sp + SCRATCH_OFF], r_RESULT
	retl
	 nop
bpf_slow_path_byte:
	bpf_slow_path_common(1)
	bl	bpf_error
	 ldub	[%sp + SCRATCH_OFF], r_RESULT
	retl
	 nop

#define bpf_negative_common(LEN)			\
	save	%sp, -SAVE_SZ, %sp;			\
	mov	%i0, %o0;				\
	mov	%i1, %o1;				\
	SIGN_EXTEND(%o1);				\
	call	bpf_internal_load_pointer_neg_helper;	\
	 mov	(LEN), %o2;				\
	mov	%o0, r_TMP;				\
	cmp	%o0, 0;					\
	BE_PTR(bpf_error);				\
	 restore;

bpf_slow_path_word_neg:
	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
	cmp	r_OFF, r_TMP
	bl	bpf_error
	 nop
	.globl	bpf_jit_load_word_negative_offset
bpf_jit_load_word_negative_offset:
	bpf_negative_common(4)
	andcc	r_TMP, 3, %g0
	bne	load_word_unaligned
	 nop
	retl
	 ld	[r_TMP], r_RESULT

bpf_slow_path_half_neg:
	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
	cmp	r_OFF, r_TMP
	bl	bpf_error
	 nop
	.globl	bpf_jit_load_half_negative_offset
bpf_jit_load_half_negative_offset:
	bpf_negative_common(2)
	andcc	r_TMP, 1, %g0
	bne	load_half_unaligned
	 nop
	retl
	 lduh	[r_TMP], r_RESULT

bpf_slow_path_byte_neg:
	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
	cmp	r_OFF, r_TMP
	bl	bpf_error
	 nop
	.globl	bpf_jit_load_byte_negative_offset
bpf_jit_load_byte_negative_offset:
	bpf_negative_common(1)
	retl
	 ldub	[r_TMP], r_RESULT

bpf_error:
	/* Make the JIT program itself return zero. */
	ret
	restore	%g0, %g0, %o0
+2 −77
Original line number Diff line number Diff line
@@ -48,10 +48,6 @@ static void bpf_flush_icache(void *start_, void *end_)
	}
}

#define SEEN_DATAREF 1 /* might call external helpers */
#define SEEN_XREG    2 /* ebx is used */
#define SEEN_MEM     4 /* use mem[] for temporary storage */

#define S13(X)		((X) & 0x1fff)
#define S5(X)		((X) & 0x1f)
#define IMMED		0x00002000
@@ -198,7 +194,6 @@ struct jit_ctx {
	bool 			tmp_1_used;
	bool 			tmp_2_used;
	bool 			tmp_3_used;
	bool			saw_ld_abs_ind;
	bool			saw_frame_pointer;
	bool			saw_call;
	bool			saw_tail_call;
@@ -207,9 +202,7 @@ struct jit_ctx {

#define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
#define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
#define SKB_HLEN_REG	(MAX_BPF_JIT_REG + 2)
#define SKB_DATA_REG	(MAX_BPF_JIT_REG + 3)
#define TMP_REG_3	(MAX_BPF_JIT_REG + 4)
#define TMP_REG_3	(MAX_BPF_JIT_REG + 2)

/* Map BPF registers to SPARC registers */
static const int bpf2sparc[] = {
@@ -238,9 +231,6 @@ static const int bpf2sparc[] = {
	[TMP_REG_1] = G1,
	[TMP_REG_2] = G2,
	[TMP_REG_3] = G3,

	[SKB_HLEN_REG] = L4,
	[SKB_DATA_REG] = L5,
};

static void emit(const u32 insn, struct jit_ctx *ctx)
@@ -800,25 +790,6 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
	return 0;
}

static void load_skb_regs(struct jit_ctx *ctx, u8 r_skb)
{
	const u8 r_headlen = bpf2sparc[SKB_HLEN_REG];
	const u8 r_data = bpf2sparc[SKB_DATA_REG];
	const u8 r_tmp = bpf2sparc[TMP_REG_1];
	unsigned int off;

	off = offsetof(struct sk_buff, len);
	emit(LD32I | RS1(r_skb) | S13(off) | RD(r_headlen), ctx);

	off = offsetof(struct sk_buff, data_len);
	emit(LD32I | RS1(r_skb) | S13(off) | RD(r_tmp), ctx);

	emit(SUB | RS1(r_headlen) | RS2(r_tmp) | RD(r_headlen), ctx);

	off = offsetof(struct sk_buff, data);
	emit(LDPTRI | RS1(r_skb) | S13(off) | RD(r_data), ctx);
}

/* Just skip the save instruction and the ctx register move.  */
#define BPF_TAILCALL_PROLOGUE_SKIP	16
#define BPF_TAILCALL_CNT_SP_OFF		(STACK_BIAS + 128)
@@ -857,9 +828,6 @@ static void build_prologue(struct jit_ctx *ctx)

	emit_reg_move(I0, O0, ctx);
	/* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */

	if (ctx->saw_ld_abs_ind)
		load_skb_regs(ctx, bpf2sparc[BPF_REG_1]);
}

static void build_epilogue(struct jit_ctx *ctx)
@@ -1225,16 +1193,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
		u8 *func = ((u8 *)__bpf_call_base) + imm;

		ctx->saw_call = true;
		if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
			emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);

		emit_call((u32 *)func, ctx);
		emit_nop(ctx);

		emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);

		if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
			load_skb_regs(ctx, L7);
		break;
	}

@@ -1412,43 +1375,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
		emit_nop(ctx);
		break;
	}
#define CHOOSE_LOAD_FUNC(K, func) \
		((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)

	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
	case BPF_LD | BPF_ABS | BPF_W:
		func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_word);
		goto common_load;
	case BPF_LD | BPF_ABS | BPF_H:
		func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_half);
		goto common_load;
	case BPF_LD | BPF_ABS | BPF_B:
		func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_byte);
		goto common_load;
	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
	case BPF_LD | BPF_IND | BPF_W:
		func = bpf_jit_load_word;
		goto common_load;
	case BPF_LD | BPF_IND | BPF_H:
		func = bpf_jit_load_half;
		goto common_load;

	case BPF_LD | BPF_IND | BPF_B:
		func = bpf_jit_load_byte;
	common_load:
		ctx->saw_ld_abs_ind = true;

		emit_reg_move(bpf2sparc[BPF_REG_6], O0, ctx);
		emit_loadimm(imm, O1, ctx);

		if (BPF_MODE(code) == BPF_IND)
			emit_alu(ADD, src, O1, ctx);

		emit_call(func, ctx);
		emit_alu_K(SRA, O1, 0, ctx);

		emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
		break;

	default:
		pr_err_once("unknown opcode %02x\n", code);
@@ -1583,12 +1509,11 @@ skip_init_ctx:
		build_epilogue(&ctx);

		if (bpf_jit_enable > 1)
			pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c%c]\n", pass,
			pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass,
				image_size - (ctx.idx * 4),
				ctx.tmp_1_used ? '1' : ' ',
				ctx.tmp_2_used ? '2' : ' ',
				ctx.tmp_3_used ? '3' : ' ',
				ctx.saw_ld_abs_ind ? 'L' : ' ',
				ctx.saw_frame_pointer ? 'F' : ' ',
				ctx.saw_call ? 'C' : ' ',
				ctx.saw_tail_call ? 'T' : ' ');