Commit ca349a6a authored by Luke Nelson's avatar Luke Nelson Committed by Daniel Borkmann
Browse files

bpf, riscv: Optimize BPF_JMP BPF_K when imm == 0 on RV64



This patch adds an optimization to BPF_JMP (32- and 64-bit) BPF_K for
when the BPF immediate is zero.

When the immediate is zero, the code can directly use the RISC-V zero
register instead of loading a zero immediate to a temporary register
first.

Co-developed-by: default avatarXi Wang <xi.wang@gmail.com>
Signed-off-by: default avatarXi Wang <xi.wang@gmail.com>
Signed-off-by: default avatarLuke Nelson <luke.r.nels@gmail.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarBjörn Töpel <bjorn.topel@gmail.com>
Acked-by: default avatarBjörn Töpel <bjorn.topel@gmail.com>
Link: https://lore.kernel.org/bpf/20200506000320.28965-4-luke.r.nels@gmail.com
parent 21a099ab
Loading
Loading
Loading
Loading
+10 −5
Original line number Diff line number Diff line
@@ -796,7 +796,13 @@ out_be:
	case BPF_JMP32 | BPF_JSET | BPF_K:
		rvoff = rv_offset(i, off, ctx);
		s = ctx->ninsns;
		if (imm) {
			emit_imm(RV_REG_T1, imm, ctx);
			rs = RV_REG_T1;
		} else {
			/* If imm is 0, simply use zero register. */
			rs = RV_REG_ZERO;
		}
		if (!is64) {
			if (is_signed_bpf_cond(BPF_OP(code)))
				emit_sext_32_rd(&rd, ctx);
@@ -811,11 +817,10 @@ out_be:
		if (BPF_OP(code) == BPF_JSET) {
			/* Adjust for and */
			rvoff -= 4;
			emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx);
			emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
				    ctx);
			emit(rv_and(rs, rd, rs), ctx);
			emit_branch(BPF_JNE, rs, RV_REG_ZERO, rvoff, ctx);
		} else {
			emit_branch(BPF_OP(code), rd, RV_REG_T1, rvoff, ctx);
			emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
		}
		break;