bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Yonghong Song <yhs@fb.com>
To: <bpf@vger.kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>,
	Andrii Nakryiko <andrii@kernel.org>, <bpf@ietf.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Fangrui Song <maskray@google.com>, <kernel-team@fb.com>
Subject: [PATCH bpf-next v3 07/17] bpf: Support new 32bit offset jmp instruction
Date: Wed, 19 Jul 2023 17:01:40 -0700	[thread overview]
Message-ID: <20230720000140.105530-1-yhs@fb.com> (raw)
In-Reply-To: <20230720000103.99949-1-yhs@fb.com>

Add interpreter/jit/verifier support for 32bit offset jmp instruction.
If a conditional jmp instruction needs more than 16bit offset,
it can be simulated with a conditional jmp + a 32bit jmp insn.

Signed-off-by: Yonghong Song <yhs@fb.com>
---
 arch/x86/net/bpf_jit_comp.c | 28 ++++++++++++++++++----------
 kernel/bpf/core.c           | 19 ++++++++++++++++---
 kernel/bpf/verifier.c       | 32 ++++++++++++++++++++++----------
 3 files changed, 56 insertions(+), 23 deletions(-)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a89b62eb2b40..a5930042139d 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1815,16 +1815,24 @@ st:			if (is_imm8(insn->off))
 			break;
 
 		case BPF_JMP | BPF_JA:
-			if (insn->off == -1)
-				/* -1 jmp instructions will always jump
-				 * backwards two bytes. Explicitly handling
-				 * this case avoids wasting too many passes
-				 * when there are long sequences of replaced
-				 * dead code.
-				 */
-				jmp_offset = -2;
-			else
-				jmp_offset = addrs[i + insn->off] - addrs[i];
+		case BPF_JMP32 | BPF_JA:
+			if (BPF_CLASS(insn->code) == BPF_JMP) {
+				if (insn->off == -1)
+					/* -1 jmp instructions will always jump
+					 * backwards two bytes. Explicitly handling
+					 * this case avoids wasting too many passes
+					 * when there are long sequences of replaced
+					 * dead code.
+					 */
+					jmp_offset = -2;
+				else
+					jmp_offset = addrs[i + insn->off] - addrs[i];
+			} else {
+				if (insn->imm == -1)
+					jmp_offset = -2;
+				else
+					jmp_offset = addrs[i + insn->imm] - addrs[i];
+			}
 
 			if (!jmp_offset) {
 				/*
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 646d2fe537be..db0b631908c2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -373,7 +373,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 {
 	const s32 off_min = S16_MIN, off_max = S16_MAX;
 	s32 delta = end_new - end_old;
-	s32 off = insn->off;
+	s32 off;
+
+	if (insn->code == (BPF_JMP32 | BPF_JA))
+		off = insn->imm;
+	else
+		off = insn->off;
 
 	if (curr < pos && curr + off + 1 >= end_old)
 		off += delta;
@@ -381,8 +386,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 		off -= delta;
 	if (off < off_min || off > off_max)
 		return -ERANGE;
-	if (!probe_pass)
-		insn->off = off;
+	if (!probe_pass) {
+		if (insn->code == (BPF_JMP32 | BPF_JA))
+			insn->imm = off;
+		else
+			insn->off = off;
+	}
 	return 0;
 }
 
@@ -1593,6 +1602,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
 	INSN_3(JMP, JSLE, K),			\
 	INSN_3(JMP, JSET, K),			\
 	INSN_2(JMP, JA),			\
+	INSN_2(JMP32, JA),			\
 	/* Store instructions. */		\
 	/*   Register based. */			\
 	INSN_3(STX, MEM,  B),			\
@@ -1989,6 +1999,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 	JMP_JA:
 		insn += insn->off;
 		CONT;
+	JMP32_JA:
+		insn += insn->imm;
+		CONT;
 	JMP_EXIT:
 		return BPF_R0;
 	/* JMP */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9436fa795f39..daf23c5da2ee 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2855,7 +2855,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
 			goto next;
 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
 			goto next;
-		off = i + insn[i].off + 1;
+		if (code == (BPF_JMP32 | BPF_JA))
+			off = i + insn[i].imm + 1;
+		else
+			off = i + insn[i].off + 1;
 		if (off < subprog_start || off >= subprog_end) {
 			verbose(env, "jump out of range from insn %d to %d\n", i, off);
 			return -EINVAL;
@@ -2867,6 +2870,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
 			 * or unconditional jump back
 			 */
 			if (code != (BPF_JMP | BPF_EXIT) &&
+			    code != (BPF_JMP32 | BPF_JA) &&
 			    code != (BPF_JMP | BPF_JA)) {
 				verbose(env, "last insn is not an exit or jmp\n");
 				return -EINVAL;
@@ -14792,7 +14796,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
 static int visit_insn(int t, struct bpf_verifier_env *env)
 {
 	struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
-	int ret;
+	int ret, off;
 
 	if (bpf_pseudo_func(insn))
 		return visit_func_call_insn(t, insns, env, true);
@@ -14840,14 +14844,19 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
 		if (BPF_SRC(insn->code) != BPF_K)
 			return -EINVAL;
 
+		if (BPF_CLASS(insn->code) == BPF_JMP)
+			off = insn->off;
+		else
+			off = insn->imm;
+
 		/* unconditional jump with single edge */
-		ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
+		ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
 				true);
 		if (ret)
 			return ret;
 
-		mark_prune_point(env, t + insn->off + 1);
-		mark_jmp_point(env, t + insn->off + 1);
+		mark_prune_point(env, t + off + 1);
+		mark_jmp_point(env, t + off + 1);
 
 		return ret;
 
@@ -16643,15 +16652,18 @@ static int do_check(struct bpf_verifier_env *env)
 				mark_reg_scratched(env, BPF_REG_0);
 			} else if (opcode == BPF_JA) {
 				if (BPF_SRC(insn->code) != BPF_K ||
-				    insn->imm != 0 ||
 				    insn->src_reg != BPF_REG_0 ||
 				    insn->dst_reg != BPF_REG_0 ||
-				    class == BPF_JMP32) {
+				    (class == BPF_JMP && insn->imm != 0) ||
+				    (class == BPF_JMP32 && insn->off != 0)) {
 					verbose(env, "BPF_JA uses reserved fields\n");
 					return -EINVAL;
 				}
 
-				env->insn_idx += insn->off + 1;
+				if (class == BPF_JMP)
+					env->insn_idx += insn->off + 1;
+				else
+					env->insn_idx += insn->imm + 1;
 				continue;
 
 			} else if (opcode == BPF_EXIT) {
@@ -17498,13 +17510,13 @@ static bool insn_is_cond_jump(u8 code)
 {
 	u8 op;
 
+	op = BPF_OP(code);
 	if (BPF_CLASS(code) == BPF_JMP32)
-		return true;
+		return op != BPF_JA;
 
 	if (BPF_CLASS(code) != BPF_JMP)
 		return false;
 
-	op = BPF_OP(code);
 	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
 }
 
-- 
2.34.1


  parent reply	other threads:[~2023-07-20  0:02 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-07-20  0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns Yonghong Song
2023-07-20 20:33   ` Eduard Zingerman
2023-07-24  4:03     ` [Bpf] " Yonghong Song
2023-07-24  4:03       ` Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 02/17] bpf: Support new sign-extension mov insns Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 03/17] bpf: Handle sign-extenstin ctx member accesses Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 04/17] bpf: Support new unconditional bswap instruction Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 05/17] bpf: Support new signed div/mod instructions Yonghong Song
2023-07-20  0:01   ` [Bpf] " Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 06/17] bpf: Fix jit blinding with new sdiv/smov insns Yonghong Song
2023-07-20  0:01 ` Yonghong Song [this message]
2023-07-20  0:01 ` [PATCH bpf-next v3 08/17] bpf: Add kernel/bpftool asm support for new instructions Yonghong Song
2023-07-21 14:36   ` Quentin Monnet
2023-07-21 14:36     ` [Bpf] " Quentin Monnet
2023-07-20  0:01 ` [PATCH bpf-next v3 09/17] selftests/bpf: Fix a test_verifier failure Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 10/17] selftests/bpf: Add a cpuv4 test runner for cpu=v4 testing Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns Yonghong Song
2023-07-20  6:31   ` [Bpf] " Yonghong Song
2023-07-20  6:31     ` Yonghong Song
2023-07-20  6:36     ` [Bpf] " Yonghong Song
2023-07-20  6:36       ` Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 12/17] selftests/bpf: Add unit tests for new sign-extension mov insns Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 13/17] selftests/bpf: Add unit tests for new bswap insns Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 14/17] selftests/bpf: Add unit tests for new sdiv/smod insns Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 15/17] selftests/bpf: Add unit tests for new gotol insn Yonghong Song
2023-07-20  0:02   ` [Bpf] " Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 16/17] selftests/bpf: Test ldsx with more complex cases Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 17/17] docs/bpf: Add documentation for new instructions Yonghong Song
2023-07-20 20:35 ` [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Eduard Zingerman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230720000140.105530-1-yhs@fb.com \
    --to=yhs@fb.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@ietf.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kernel-team@fb.com \
    --cc=maskray@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).