bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end.
@ 2020-11-11  3:12 Alexei Starovoitov
  2020-11-11  3:12 ` [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
                   ` (3 more replies)
  0 siblings, 4 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2020-11-11  3:12 UTC (permalink / raw)
  To: davem; +Cc: daniel, netdev, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

v1->v2:
- removed set-but-unused variable.
- added Jiri's Tested-by.

In some cases LLVM uses the knowledge that branch is taken to optimze the code
which causes the verifier to reject valid programs.
Teach the verifier to recognize that
r1 = skb->data;
r1 += 10;
r2 = skb->data_end;
if (r1 > r2) {
  here r1 points beyond packet_end and subsequent
  if (r1 > r2) // always evaluates to "true".
}

Alexei Starovoitov (3):
  bpf: Support for pointers beyond pkt_end.
  selftests/bpf: Add skb_pkt_end test
  selftests/bpf: Add asm tests for pkt vs pkt_end comparison.

 include/linux/bpf_verifier.h                  |   2 +-
 kernel/bpf/verifier.c                         | 129 +++++++++++++++---
 .../bpf/prog_tests/test_skb_pkt_end.c         |  41 ++++++
 .../testing/selftests/bpf/progs/skb_pkt_end.c |  54 ++++++++
 .../testing/selftests/bpf/verifier/ctx_skb.c  |  42 ++++++
 5 files changed, 245 insertions(+), 23 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
 create mode 100644 tools/testing/selftests/bpf/progs/skb_pkt_end.c

-- 
2.24.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-11-11  3:12 [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
@ 2020-11-11  3:12 ` Alexei Starovoitov
  2020-11-12 19:16   ` John Fastabend
  2020-11-11  3:12 ` [PATCH v2 bpf-next 2/3] selftests/bpf: Add skb_pkt_end test Alexei Starovoitov
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 14+ messages in thread
From: Alexei Starovoitov @ 2020-11-11  3:12 UTC (permalink / raw)
  To: davem; +Cc: daniel, netdev, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

This patch adds the verifier support to recognize inlined branch conditions.
The LLVM knows that the branch evaluates to the same value, but the verifier
couldn't track it. Hence causing valid programs to be rejected.
The potential LLVM workaround: https://reviews.llvm.org/D87428
can have undesired side effects, since LLVM doesn't know that
skb->data/data_end are being compared. LLVM has to introduce extra boolean
variable and use inline_asm trick to force easier for the verifier assembly.

Instead teach the verifier to recognize that
r1 = skb->data;
r1 += 10;
r2 = skb->data_end;
if (r1 > r2) {
  here r1 points beyond packet_end and
  subsequent
  if (r1 > r2) // always evaluates to "true".
}

Tested-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 include/linux/bpf_verifier.h |   2 +-
 kernel/bpf/verifier.c        | 129 +++++++++++++++++++++++++++++------
 2 files changed, 108 insertions(+), 23 deletions(-)

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index e83ef6f6bf43..306869d4743b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -45,7 +45,7 @@ struct bpf_reg_state {
 	enum bpf_reg_type type;
 	union {
 		/* valid when type == PTR_TO_PACKET */
-		u16 range;
+		int range;
 
 		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
 		 *   PTR_TO_MAP_VALUE_OR_NULL
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 10da26e55130..7b1f85aa9741 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2739,7 +2739,9 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
 			regno);
 		return -EACCES;
 	}
-	err = __check_mem_access(env, regno, off, size, reg->range,
+
+	err = reg->range < 0 ? -EINVAL :
+	      __check_mem_access(env, regno, off, size, reg->range,
 				 zero_size_allowed);
 	if (err) {
 		verbose(env, "R%d offset is outside of the packet\n", regno);
@@ -4697,6 +4699,32 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
 		__clear_all_pkt_pointers(env, vstate->frame[i]);
 }
 
+enum {
+	AT_PKT_END = -1,
+	BEYOND_PKT_END = -2,
+};
+
+static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
+{
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+	struct bpf_reg_state *reg = &state->regs[regn];
+
+	if (reg->type != PTR_TO_PACKET)
+		/* PTR_TO_PACKET_META is not supported yet */
+		return;
+
+	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
+	 * How far beyond pkt_end it goes is unknown.
+	 * if (!range_open) it's the case of pkt >= pkt_end
+	 * if (range_open) it's the case of pkt > pkt_end
+	 * hence this pointer is at least 1 byte bigger than pkt_end
+	 */
+	if (range_open)
+		reg->range = BEYOND_PKT_END;
+	else
+		reg->range = AT_PKT_END;
+}
+
 static void release_reg_references(struct bpf_verifier_env *env,
 				   struct bpf_func_state *state,
 				   int ref_obj_id)
@@ -6708,7 +6736,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
 static void __find_good_pkt_pointers(struct bpf_func_state *state,
 				     struct bpf_reg_state *dst_reg,
-				     enum bpf_reg_type type, u16 new_range)
+				     enum bpf_reg_type type, int new_range)
 {
 	struct bpf_reg_state *reg;
 	int i;
@@ -6733,8 +6761,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
 				   enum bpf_reg_type type,
 				   bool range_right_open)
 {
-	u16 new_range;
-	int i;
+	int new_range, i;
 
 	if (dst_reg->off < 0 ||
 	    (dst_reg->off == 0 && range_right_open))
@@ -6985,6 +7012,67 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
 	return is_branch64_taken(reg, val, opcode);
 }
 
+static int flip_opcode(u32 opcode)
+{
+	/* How can we transform "a <op> b" into "b <op> a"? */
+	static const u8 opcode_flip[16] = {
+		/* these stay the same */
+		[BPF_JEQ  >> 4] = BPF_JEQ,
+		[BPF_JNE  >> 4] = BPF_JNE,
+		[BPF_JSET >> 4] = BPF_JSET,
+		/* these swap "lesser" and "greater" (L and G in the opcodes) */
+		[BPF_JGE  >> 4] = BPF_JLE,
+		[BPF_JGT  >> 4] = BPF_JLT,
+		[BPF_JLE  >> 4] = BPF_JGE,
+		[BPF_JLT  >> 4] = BPF_JGT,
+		[BPF_JSGE >> 4] = BPF_JSLE,
+		[BPF_JSGT >> 4] = BPF_JSLT,
+		[BPF_JSLE >> 4] = BPF_JSGE,
+		[BPF_JSLT >> 4] = BPF_JSGT
+	};
+	return opcode_flip[opcode >> 4];
+}
+
+static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
+				   struct bpf_reg_state *src_reg,
+				   u8 opcode)
+{
+	struct bpf_reg_state *pkt;
+
+	if (src_reg->type == PTR_TO_PACKET_END) {
+		pkt = dst_reg;
+	} else if (dst_reg->type == PTR_TO_PACKET_END) {
+		pkt = src_reg;
+		opcode = flip_opcode(opcode);
+	} else {
+		return -1;
+	}
+
+	if (pkt->range >= 0)
+		return -1;
+
+	switch (opcode) {
+	case BPF_JLE:
+		/* pkt <= pkt_end */
+		fallthrough;
+	case BPF_JGT:
+		/* pkt > pkt_end */
+		if (pkt->range == BEYOND_PKT_END)
+			/* pkt has at last one extra byte beyond pkt_end */
+			return opcode == BPF_JGT;
+		break;
+	case BPF_JLT:
+		/* pkt < pkt_end */
+		fallthrough;
+	case BPF_JGE:
+		/* pkt >= pkt_end */
+		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
+			return opcode == BPF_JGE;
+		break;
+	}
+	return -1;
+}
+
 /* Adjusts the register min/max values in the case that the dst_reg is the
  * variable register that we are working on, and src_reg is a constant or we're
  * simply doing a BPF_K check.
@@ -7148,23 +7236,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
 				u64 val, u32 val32,
 				u8 opcode, bool is_jmp32)
 {
-	/* How can we transform "a <op> b" into "b <op> a"? */
-	static const u8 opcode_flip[16] = {
-		/* these stay the same */
-		[BPF_JEQ  >> 4] = BPF_JEQ,
-		[BPF_JNE  >> 4] = BPF_JNE,
-		[BPF_JSET >> 4] = BPF_JSET,
-		/* these swap "lesser" and "greater" (L and G in the opcodes) */
-		[BPF_JGE  >> 4] = BPF_JLE,
-		[BPF_JGT  >> 4] = BPF_JLT,
-		[BPF_JLE  >> 4] = BPF_JGE,
-		[BPF_JLT  >> 4] = BPF_JGT,
-		[BPF_JSGE >> 4] = BPF_JSLE,
-		[BPF_JSGT >> 4] = BPF_JSLT,
-		[BPF_JSLE >> 4] = BPF_JSGE,
-		[BPF_JSLT >> 4] = BPF_JSGT
-	};
-	opcode = opcode_flip[opcode >> 4];
+	opcode = flip_opcode(opcode);
 	/* This uses zero as "not present in table"; luckily the zero opcode,
 	 * BPF_JA, can't get here.
 	 */
@@ -7346,6 +7418,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
 			find_good_pkt_pointers(this_branch, dst_reg,
 					       dst_reg->type, false);
+			mark_pkt_end(other_branch, insn->dst_reg, true);
 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
 			    src_reg->type == PTR_TO_PACKET) ||
 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7353,6 +7426,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
 			find_good_pkt_pointers(other_branch, src_reg,
 					       src_reg->type, true);
+			mark_pkt_end(this_branch, insn->src_reg, false);
 		} else {
 			return false;
 		}
@@ -7365,6 +7439,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
 			find_good_pkt_pointers(other_branch, dst_reg,
 					       dst_reg->type, true);
+			mark_pkt_end(this_branch, insn->dst_reg, false);
 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
 			    src_reg->type == PTR_TO_PACKET) ||
 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7372,6 +7447,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
 			find_good_pkt_pointers(this_branch, src_reg,
 					       src_reg->type, false);
+			mark_pkt_end(other_branch, insn->src_reg, true);
 		} else {
 			return false;
 		}
@@ -7384,6 +7460,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
 			find_good_pkt_pointers(this_branch, dst_reg,
 					       dst_reg->type, true);
+			mark_pkt_end(other_branch, insn->dst_reg, false);
 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
 			    src_reg->type == PTR_TO_PACKET) ||
 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7391,6 +7468,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
 			find_good_pkt_pointers(other_branch, src_reg,
 					       src_reg->type, false);
+			mark_pkt_end(this_branch, insn->src_reg, true);
 		} else {
 			return false;
 		}
@@ -7403,6 +7481,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
 			find_good_pkt_pointers(other_branch, dst_reg,
 					       dst_reg->type, false);
+			mark_pkt_end(this_branch, insn->dst_reg, true);
 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
 			    src_reg->type == PTR_TO_PACKET) ||
 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7410,6 +7489,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
 			find_good_pkt_pointers(this_branch, src_reg,
 					       src_reg->type, true);
+			mark_pkt_end(other_branch, insn->src_reg, false);
 		} else {
 			return false;
 		}
@@ -7509,6 +7589,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 				       src_reg->var_off.value,
 				       opcode,
 				       is_jmp32);
+	} else if (reg_is_pkt_pointer_any(dst_reg) &&
+		   reg_is_pkt_pointer_any(src_reg) &&
+		   !is_jmp32) {
+		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
 	}
 
 	if (pred >= 0) {
@@ -7517,7 +7601,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 		 */
 		if (!__is_pointer_value(false, dst_reg))
 			err = mark_chain_precision(env, insn->dst_reg);
-		if (BPF_SRC(insn->code) == BPF_X && !err)
+		if (BPF_SRC(insn->code) == BPF_X && !err &&
+		    !__is_pointer_value(false, src_reg))
 			err = mark_chain_precision(env, insn->src_reg);
 		if (err)
 			return err;
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v2 bpf-next 2/3] selftests/bpf: Add skb_pkt_end test
  2020-11-11  3:12 [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
  2020-11-11  3:12 ` [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
@ 2020-11-11  3:12 ` Alexei Starovoitov
  2020-11-12 19:19   ` John Fastabend
  2020-11-11  3:12 ` [PATCH v2 bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison Alexei Starovoitov
  2020-11-13  0:50 ` [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end patchwork-bot+netdevbpf
  3 siblings, 1 reply; 14+ messages in thread
From: Alexei Starovoitov @ 2020-11-11  3:12 UTC (permalink / raw)
  To: davem; +Cc: daniel, netdev, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Add a test that currently makes LLVM generate assembly code:

$ llvm-objdump -S skb_pkt_end.o
0000000000000000 <main_prog>:
; 	if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
       0:	61 12 50 00 00 00 00 00	r2 = *(u32 *)(r1 + 80)
       1:	61 14 4c 00 00 00 00 00	r4 = *(u32 *)(r1 + 76)
       2:	bf 43 00 00 00 00 00 00	r3 = r4
       3:	07 03 00 00 36 00 00 00	r3 += 54
       4:	b7 01 00 00 00 00 00 00	r1 = 0
       5:	2d 23 02 00 00 00 00 00	if r3 > r2 goto +2 <LBB0_2>
       6:	07 04 00 00 0e 00 00 00	r4 += 14
; 	if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
       7:	bf 41 00 00 00 00 00 00	r1 = r4
0000000000000040 <LBB0_2>:
       8:	b4 00 00 00 ff ff ff ff	w0 = -1
; 	if (!(ip = get_iphdr(skb)))
       9:	2d 23 05 00 00 00 00 00	if r3 > r2 goto +5 <LBB0_6>
; 	proto = ip->protocol;
      10:	71 12 09 00 00 00 00 00	r2 = *(u8 *)(r1 + 9)
; 	if (proto != IPPROTO_TCP)
      11:	56 02 03 00 06 00 00 00	if w2 != 6 goto +3 <LBB0_6>
; 	if (tcp->dest != 0)
      12:	69 12 16 00 00 00 00 00	r2 = *(u16 *)(r1 + 22)
      13:	56 02 01 00 00 00 00 00	if w2 != 0 goto +1 <LBB0_6>
; 	return tcp->urg_ptr;
      14:	69 10 26 00 00 00 00 00	r0 = *(u16 *)(r1 + 38)
0000000000000078 <LBB0_6>:
; }
      15:	95 00 00 00 00 00 00 00	exit

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 .../bpf/prog_tests/test_skb_pkt_end.c         | 41 ++++++++++++++
 .../testing/selftests/bpf/progs/skb_pkt_end.c | 54 +++++++++++++++++++
 2 files changed, 95 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
 create mode 100644 tools/testing/selftests/bpf/progs/skb_pkt_end.c

diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
new file mode 100644
index 000000000000..cf1215531920
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "skb_pkt_end.skel.h"
+
+static int sanity_run(struct bpf_program *prog)
+{
+	__u32 duration, retval;
+	int err, prog_fd;
+
+	prog_fd = bpf_program__fd(prog);
+	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+				NULL, NULL, &retval, &duration);
+	if (CHECK(err || retval != 123, "test_run",
+		  "err %d errno %d retval %d duration %d\n",
+		  err, errno, retval, duration))
+		return -1;
+	return 0;
+}
+
+void test_test_skb_pkt_end(void)
+{
+	struct skb_pkt_end *skb_pkt_end_skel = NULL;
+	__u32 duration = 0;
+	int err;
+
+	skb_pkt_end_skel = skb_pkt_end__open_and_load();
+	if (CHECK(!skb_pkt_end_skel, "skb_pkt_end_skel_load", "skb_pkt_end skeleton failed\n"))
+		goto cleanup;
+
+	err = skb_pkt_end__attach(skb_pkt_end_skel);
+	if (CHECK(err, "skb_pkt_end_attach", "skb_pkt_end attach failed: %d\n", err))
+		goto cleanup;
+
+	if (sanity_run(skb_pkt_end_skel->progs.main_prog))
+		goto cleanup;
+
+cleanup:
+	skb_pkt_end__destroy(skb_pkt_end_skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
new file mode 100644
index 000000000000..cf6823f42e80
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_PRESERVE_ACCESS_INDEX
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+#define NULL 0
+#define INLINE __always_inline
+
+#define skb_shorter(skb, len) ((void *)(long)(skb)->data + (len) > (void *)(long)skb->data_end)
+
+#define ETH_IPV4_TCP_SIZE (14 + sizeof(struct iphdr) + sizeof(struct tcphdr))
+
+static INLINE struct iphdr *get_iphdr(struct __sk_buff *skb)
+{
+	struct iphdr *ip = NULL;
+	struct ethhdr *eth;
+
+	if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
+		goto out;
+
+	eth = (void *)(long)skb->data;
+	ip = (void *)(eth + 1);
+
+out:
+	return ip;
+}
+
+SEC("classifier/cls")
+int main_prog(struct __sk_buff *skb)
+{
+	struct iphdr *ip = NULL;
+	struct tcphdr *tcp;
+	__u8 proto = 0;
+
+	if (!(ip = get_iphdr(skb)))
+		goto out;
+
+	proto = ip->protocol;
+
+	if (proto != IPPROTO_TCP)
+		goto out;
+
+	tcp = (void*)(ip + 1);
+	if (tcp->dest != 0)
+		goto out;
+	if (!tcp)
+		goto out;
+
+	return tcp->urg_ptr;
+out:
+	return -1;
+}
+char _license[] SEC("license") = "GPL";
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v2 bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison.
  2020-11-11  3:12 [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
  2020-11-11  3:12 ` [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
  2020-11-11  3:12 ` [PATCH v2 bpf-next 2/3] selftests/bpf: Add skb_pkt_end test Alexei Starovoitov
@ 2020-11-11  3:12 ` Alexei Starovoitov
  2020-11-12 19:24   ` John Fastabend
  2021-01-20 20:51   ` Jiri Olsa
  2020-11-13  0:50 ` [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end patchwork-bot+netdevbpf
  3 siblings, 2 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2020-11-11  3:12 UTC (permalink / raw)
  To: davem; +Cc: daniel, netdev, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Add few assembly tests for packet comparison.

Tested-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 .../testing/selftests/bpf/verifier/ctx_skb.c  | 42 +++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
index 2e16b8e268f2..2022c0f2cd75 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -1089,3 +1089,45 @@
 	.errstr_unpriv = "R1 leaks addr",
 	.result = REJECT,
 },
+{
+       "pkt > pkt_end taken check",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                //  0. r2 = *(u32 *)(r1 + data_end)
+                   offsetof(struct __sk_buff, data_end)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,                //  1. r4 = *(u32 *)(r1 + data)
+                   offsetof(struct __sk_buff, data)),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),                    //  2. r3 = r4
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),                  //  3. r3 += 42
+       BPF_MOV64_IMM(BPF_REG_1, 0),                            //  4. r1 = 0
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),          //  5. if r3 > r2 goto 8
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),                  //  6. r4 += 14
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),                    //  7. r1 = r4
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),          //  8. if r3 > r2 goto 10
+       BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),            //  9. r2 = *(u8 *)(r1 + 9)
+       BPF_MOV64_IMM(BPF_REG_0, 0),                            // 10. r0 = 0
+       BPF_EXIT_INSN(),                                        // 11. exit
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+       "pkt_end < pkt taken check",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                //  0. r2 = *(u32 *)(r1 + data_end)
+                   offsetof(struct __sk_buff, data_end)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,                //  1. r4 = *(u32 *)(r1 + data)
+                   offsetof(struct __sk_buff, data)),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),                    //  2. r3 = r4
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),                  //  3. r3 += 42
+       BPF_MOV64_IMM(BPF_REG_1, 0),                            //  4. r1 = 0
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),          //  5. if r3 > r2 goto 8
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),                  //  6. r4 += 14
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),                    //  7. r1 = r4
+       BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1),          //  8. if r2 < r3 goto 10
+       BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),            //  9. r2 = *(u8 *)(r1 + 9)
+       BPF_MOV64_IMM(BPF_REG_0, 0),                            // 10. r0 = 0
+       BPF_EXIT_INSN(),                                        // 11. exit
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* RE: [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-11-11  3:12 ` [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
@ 2020-11-12 19:16   ` John Fastabend
  2020-11-12 23:56     ` Daniel Borkmann
  2020-11-12 23:59     ` Alexei Starovoitov
  0 siblings, 2 replies; 14+ messages in thread
From: John Fastabend @ 2020-11-12 19:16 UTC (permalink / raw)
  To: Alexei Starovoitov, davem; +Cc: daniel, netdev, bpf, kernel-team

Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> This patch adds the verifier support to recognize inlined branch conditions.
> The LLVM knows that the branch evaluates to the same value, but the verifier
> couldn't track it. Hence causing valid programs to be rejected.
> The potential LLVM workaround: https://reviews.llvm.org/D87428
> can have undesired side effects, since LLVM doesn't know that
> skb->data/data_end are being compared. LLVM has to introduce extra boolean
> variable and use inline_asm trick to force easier for the verifier assembly.
> 
> Instead teach the verifier to recognize that
> r1 = skb->data;
> r1 += 10;
> r2 = skb->data_end;
> if (r1 > r2) {
>   here r1 points beyond packet_end and
>   subsequent
>   if (r1 > r2) // always evaluates to "true".
> }
> 
> Tested-by: Jiri Olsa <jolsa@redhat.com>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---
>  include/linux/bpf_verifier.h |   2 +-
>  kernel/bpf/verifier.c        | 129 +++++++++++++++++++++++++++++------
>  2 files changed, 108 insertions(+), 23 deletions(-)
> 

Thanks, we can remove another set of inline asm logic.

Acked-by: John Fastabend <john.fastabend@gmail.com>
 
>  	if (pred >= 0) {
> @@ -7517,7 +7601,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
>  		 */
>  		if (!__is_pointer_value(false, dst_reg))
>  			err = mark_chain_precision(env, insn->dst_reg);
> -		if (BPF_SRC(insn->code) == BPF_X && !err)
> +		if (BPF_SRC(insn->code) == BPF_X && !err &&
> +		    !__is_pointer_value(false, src_reg))

This could have been more specific with !type_is_pkt_pointer() correct? I
think its fine as is though.

>  			err = mark_chain_precision(env, insn->src_reg);
>  		if (err)
>  			return err;
> -- 
> 2.24.1
> 



^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH v2 bpf-next 2/3] selftests/bpf: Add skb_pkt_end test
  2020-11-11  3:12 ` [PATCH v2 bpf-next 2/3] selftests/bpf: Add skb_pkt_end test Alexei Starovoitov
@ 2020-11-12 19:19   ` John Fastabend
  0 siblings, 0 replies; 14+ messages in thread
From: John Fastabend @ 2020-11-12 19:19 UTC (permalink / raw)
  To: Alexei Starovoitov, davem; +Cc: daniel, netdev, bpf, kernel-team

Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> Add a test that currently makes LLVM generate assembly code:
> 
> $ llvm-objdump -S skb_pkt_end.o
> 0000000000000000 <main_prog>:
> ; 	if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
>        0:	61 12 50 00 00 00 00 00	r2 = *(u32 *)(r1 + 80)
>        1:	61 14 4c 00 00 00 00 00	r4 = *(u32 *)(r1 + 76)
>        2:	bf 43 00 00 00 00 00 00	r3 = r4
>        3:	07 03 00 00 36 00 00 00	r3 += 54
>        4:	b7 01 00 00 00 00 00 00	r1 = 0
>        5:	2d 23 02 00 00 00 00 00	if r3 > r2 goto +2 <LBB0_2>
>        6:	07 04 00 00 0e 00 00 00	r4 += 14
> ; 	if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
>        7:	bf 41 00 00 00 00 00 00	r1 = r4
> 0000000000000040 <LBB0_2>:
>        8:	b4 00 00 00 ff ff ff ff	w0 = -1
> ; 	if (!(ip = get_iphdr(skb)))
>        9:	2d 23 05 00 00 00 00 00	if r3 > r2 goto +5 <LBB0_6>
> ; 	proto = ip->protocol;
>       10:	71 12 09 00 00 00 00 00	r2 = *(u8 *)(r1 + 9)
> ; 	if (proto != IPPROTO_TCP)
>       11:	56 02 03 00 06 00 00 00	if w2 != 6 goto +3 <LBB0_6>
> ; 	if (tcp->dest != 0)
>       12:	69 12 16 00 00 00 00 00	r2 = *(u16 *)(r1 + 22)
>       13:	56 02 01 00 00 00 00 00	if w2 != 0 goto +1 <LBB0_6>
> ; 	return tcp->urg_ptr;
>       14:	69 10 26 00 00 00 00 00	r0 = *(u16 *)(r1 + 38)
> 0000000000000078 <LBB0_6>:
> ; }
>       15:	95 00 00 00 00 00 00 00	exit
> 
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---

Acked-by: John Fastabend <john.fastabend@gmail.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH v2 bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison.
  2020-11-11  3:12 ` [PATCH v2 bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison Alexei Starovoitov
@ 2020-11-12 19:24   ` John Fastabend
  2021-01-20 20:51   ` Jiri Olsa
  1 sibling, 0 replies; 14+ messages in thread
From: John Fastabend @ 2020-11-12 19:24 UTC (permalink / raw)
  To: Alexei Starovoitov, davem; +Cc: daniel, netdev, bpf, kernel-team

Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> Add few assembly tests for packet comparison.
> 
> Tested-by: Jiri Olsa <jolsa@redhat.com>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---
>  .../testing/selftests/bpf/verifier/ctx_skb.c  | 42 +++++++++++++++++++
>  1 file changed, 42 insertions(+)

Acked-by: John Fastabend <john.fastabend@gmail.com>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-11-12 19:16   ` John Fastabend
@ 2020-11-12 23:56     ` Daniel Borkmann
  2020-11-13  0:09       ` Alexei Starovoitov
  2020-11-12 23:59     ` Alexei Starovoitov
  1 sibling, 1 reply; 14+ messages in thread
From: Daniel Borkmann @ 2020-11-12 23:56 UTC (permalink / raw)
  To: John Fastabend, Alexei Starovoitov, davem; +Cc: netdev, bpf, kernel-team

On 11/12/20 8:16 PM, John Fastabend wrote:
> Alexei Starovoitov wrote:
>> From: Alexei Starovoitov <ast@kernel.org>
>>
>> This patch adds the verifier support to recognize inlined branch conditions.
>> The LLVM knows that the branch evaluates to the same value, but the verifier
>> couldn't track it. Hence causing valid programs to be rejected.
>> The potential LLVM workaround: https://reviews.llvm.org/D87428
>> can have undesired side effects, since LLVM doesn't know that
>> skb->data/data_end are being compared. LLVM has to introduce extra boolean
>> variable and use inline_asm trick to force easier for the verifier assembly.
>>
>> Instead teach the verifier to recognize that
>> r1 = skb->data;
>> r1 += 10;
>> r2 = skb->data_end;
>> if (r1 > r2) {
>>    here r1 points beyond packet_end and
>>    subsequent
>>    if (r1 > r2) // always evaluates to "true".
>> }
>>
>> Tested-by: Jiri Olsa <jolsa@redhat.com>
>> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
>> ---
>>   include/linux/bpf_verifier.h |   2 +-
>>   kernel/bpf/verifier.c        | 129 +++++++++++++++++++++++++++++------
>>   2 files changed, 108 insertions(+), 23 deletions(-)
>>
> 
> Thanks, we can remove another set of inline asm logic.
> 
> Acked-by: John Fastabend <john.fastabend@gmail.com>
>   
>>   	if (pred >= 0) {
>> @@ -7517,7 +7601,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
>>   		 */
>>   		if (!__is_pointer_value(false, dst_reg))
>>   			err = mark_chain_precision(env, insn->dst_reg);
>> -		if (BPF_SRC(insn->code) == BPF_X && !err)
>> +		if (BPF_SRC(insn->code) == BPF_X && !err &&
>> +		    !__is_pointer_value(false, src_reg))
> 
> This could have been more specific with !type_is_pkt_pointer() correct? I
> think its fine as is though.
> 
>>   			err = mark_chain_precision(env, insn->src_reg);
>>   		if (err)
>>   			return err;

Given the reg->range could now be negative, I wonder whether for the regsafe()
pruning logic we should now better add a >=0 sanity check in there before we
attempt to test on rold->range > rcur->range?

Thanks,
Daniel

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-11-12 19:16   ` John Fastabend
  2020-11-12 23:56     ` Daniel Borkmann
@ 2020-11-12 23:59     ` Alexei Starovoitov
  2020-11-13  0:35       ` John Fastabend
  1 sibling, 1 reply; 14+ messages in thread
From: Alexei Starovoitov @ 2020-11-12 23:59 UTC (permalink / raw)
  To: John Fastabend; +Cc: davem, daniel, netdev, bpf, kernel-team

On Thu, Nov 12, 2020 at 11:16:11AM -0800, John Fastabend wrote:
> Alexei Starovoitov wrote:
> > From: Alexei Starovoitov <ast@kernel.org>
> > 
> > This patch adds the verifier support to recognize inlined branch conditions.
> > The LLVM knows that the branch evaluates to the same value, but the verifier
> > couldn't track it. Hence causing valid programs to be rejected.
> > The potential LLVM workaround: https://reviews.llvm.org/D87428
> > can have undesired side effects, since LLVM doesn't know that
> > skb->data/data_end are being compared. LLVM has to introduce extra boolean
> > variable and use inline_asm trick to force easier for the verifier assembly.
> > 
> > Instead teach the verifier to recognize that
> > r1 = skb->data;
> > r1 += 10;
> > r2 = skb->data_end;
> > if (r1 > r2) {
> >   here r1 points beyond packet_end and
> >   subsequent
> >   if (r1 > r2) // always evaluates to "true".
> > }
> > 
> > Tested-by: Jiri Olsa <jolsa@redhat.com>
> > Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> > ---
> >  include/linux/bpf_verifier.h |   2 +-
> >  kernel/bpf/verifier.c        | 129 +++++++++++++++++++++++++++++------
> >  2 files changed, 108 insertions(+), 23 deletions(-)
> > 
> 
> Thanks, we can remove another set of inline asm logic.

Awesome! Please contribute your C examples to selftests when possible.

> Acked-by: John Fastabend <john.fastabend@gmail.com>
>  
> >  	if (pred >= 0) {
> > @@ -7517,7 +7601,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
> >  		 */
> >  		if (!__is_pointer_value(false, dst_reg))
> >  			err = mark_chain_precision(env, insn->dst_reg);
> > -		if (BPF_SRC(insn->code) == BPF_X && !err)
> > +		if (BPF_SRC(insn->code) == BPF_X && !err &&
> > +		    !__is_pointer_value(false, src_reg))
> 
> This could have been more specific with !type_is_pkt_pointer() correct? I
> think its fine as is though.

I actually meant to use __is_pointer_value() here for two reasons:
1. to match dst_reg check just few lines above.
2. mark_chain_precision() is for scalars only. If in the future
  is_*_branch_taken() will support other kinds of pointers the more
  precise !type_is_pkt_pointer() check would need to be modified.
  That would be unnecessary code churn.

Thanks for the review!

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-11-12 23:56     ` Daniel Borkmann
@ 2020-11-13  0:09       ` Alexei Starovoitov
  2020-11-13  0:50         ` Daniel Borkmann
  0 siblings, 1 reply; 14+ messages in thread
From: Alexei Starovoitov @ 2020-11-13  0:09 UTC (permalink / raw)
  To: Daniel Borkmann; +Cc: John Fastabend, davem, netdev, bpf, kernel-team

On Fri, Nov 13, 2020 at 12:56:52AM +0100, Daniel Borkmann wrote:
> On 11/12/20 8:16 PM, John Fastabend wrote:
> > Alexei Starovoitov wrote:
> > > From: Alexei Starovoitov <ast@kernel.org>
> > > 
> > > This patch adds the verifier support to recognize inlined branch conditions.
> > > The LLVM knows that the branch evaluates to the same value, but the verifier
> > > couldn't track it. Hence causing valid programs to be rejected.
> > > The potential LLVM workaround: https://reviews.llvm.org/D87428
> > > can have undesired side effects, since LLVM doesn't know that
> > > skb->data/data_end are being compared. LLVM has to introduce extra boolean
> > > variable and use inline_asm trick to force easier for the verifier assembly.
> > > 
> > > Instead teach the verifier to recognize that
> > > r1 = skb->data;
> > > r1 += 10;
> > > r2 = skb->data_end;
> > > if (r1 > r2) {
> > >    here r1 points beyond packet_end and
> > >    subsequent
> > >    if (r1 > r2) // always evaluates to "true".
> > > }
> > > 
> > > Tested-by: Jiri Olsa <jolsa@redhat.com>
> > > Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> > > ---
> > >   include/linux/bpf_verifier.h |   2 +-
> > >   kernel/bpf/verifier.c        | 129 +++++++++++++++++++++++++++++------
> > >   2 files changed, 108 insertions(+), 23 deletions(-)
> > > 
> > 
> > Thanks, we can remove another set of inline asm logic.
> > 
> > Acked-by: John Fastabend <john.fastabend@gmail.com>
> > >   	if (pred >= 0) {
> > > @@ -7517,7 +7601,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
> > >   		 */
> > >   		if (!__is_pointer_value(false, dst_reg))
> > >   			err = mark_chain_precision(env, insn->dst_reg);
> > > -		if (BPF_SRC(insn->code) == BPF_X && !err)
> > > +		if (BPF_SRC(insn->code) == BPF_X && !err &&
> > > +		    !__is_pointer_value(false, src_reg))
> > 
> > This could have been more specific with !type_is_pkt_pointer() correct? I
> > think its fine as is though.
> > 
> > >   			err = mark_chain_precision(env, insn->src_reg);
> > >   		if (err)
> > >   			return err;
> 
> Given the reg->range could now be negative, I wonder whether for the regsafe()
> pruning logic we should now better add a >=0 sanity check in there before we
> attempt to test on rold->range > rcur->range?

I thought about it and specifically picked negative range value to keep
regsafe() check as-is.
The check is this:
                if (rold->range > rcur->range)
                        return false;
rold is the one that was safe in the past.
If rold was positive and the current is negative we fail here
which is ok. State pruning is conservative.

If rold was negative it means the previous state was safe even though that pointer
was pointing beyond packet end. So it's ok for rcur->range to be anything.
Whether rcur is positive or negative doesn't matter. Everything is still ok.
If rold->range == -1 and rcur->range == -2 we fail here.
It's minor annoyance. State pruning is tiny bit more conservative than necessary.

So I think no extra checks in regsafe() are neeeded.
Does it make sense?

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-11-12 23:59     ` Alexei Starovoitov
@ 2020-11-13  0:35       ` John Fastabend
  0 siblings, 0 replies; 14+ messages in thread
From: John Fastabend @ 2020-11-13  0:35 UTC (permalink / raw)
  To: Alexei Starovoitov, John Fastabend
  Cc: davem, daniel, netdev, bpf, kernel-team

Alexei Starovoitov wrote:
> On Thu, Nov 12, 2020 at 11:16:11AM -0800, John Fastabend wrote:
> > Alexei Starovoitov wrote:
> > > From: Alexei Starovoitov <ast@kernel.org>
> > > 
> > > This patch adds the verifier support to recognize inlined branch conditions.
> > > The LLVM knows that the branch evaluates to the same value, but the verifier
> > > couldn't track it. Hence causing valid programs to be rejected.
> > > The potential LLVM workaround: https://reviews.llvm.org/D87428
> > > can have undesired side effects, since LLVM doesn't know that
> > > skb->data/data_end are being compared. LLVM has to introduce extra boolean
> > > variable and use inline_asm trick to force easier for the verifier assembly.
> > > 
> > > Instead teach the verifier to recognize that
> > > r1 = skb->data;
> > > r1 += 10;
> > > r2 = skb->data_end;
> > > if (r1 > r2) {
> > >   here r1 points beyond packet_end and
> > >   subsequent
> > >   if (r1 > r2) // always evaluates to "true".
> > > }
> > > 
> > > Tested-by: Jiri Olsa <jolsa@redhat.com>
> > > Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> > > ---
> > >  include/linux/bpf_verifier.h |   2 +-
> > >  kernel/bpf/verifier.c        | 129 +++++++++++++++++++++++++++++------
> > >  2 files changed, 108 insertions(+), 23 deletions(-)
> > > 
> > 
> > Thanks, we can remove another set of inline asm logic.
> 
> Awesome! Please contribute your C examples to selftests when possible.

Sure will do, its just some mundane header parsing iirc.

> 
> > Acked-by: John Fastabend <john.fastabend@gmail.com>
> >  
> > >  	if (pred >= 0) {
> > > @@ -7517,7 +7601,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
> > >  		 */
> > >  		if (!__is_pointer_value(false, dst_reg))
> > >  			err = mark_chain_precision(env, insn->dst_reg);
> > > -		if (BPF_SRC(insn->code) == BPF_X && !err)
> > > +		if (BPF_SRC(insn->code) == BPF_X && !err &&
> > > +		    !__is_pointer_value(false, src_reg))
> > 
> > This could have been more specific with !type_is_pkt_pointer() correct? I
> > think its fine as is though.
> 
> I actually meant to use __is_pointer_value() here for two reasons:
> 1. to match dst_reg check just few lines above.

Agree.

> 2. mark_chain_precision() is for scalars only. If in the future
>   is_*_branch_taken() will support other kinds of pointers the more
>   precise !type_is_pkt_pointer() check would need to be modified.
>   That would be unnecessary code churn.

Agree.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-11-13  0:09       ` Alexei Starovoitov
@ 2020-11-13  0:50         ` Daniel Borkmann
  0 siblings, 0 replies; 14+ messages in thread
From: Daniel Borkmann @ 2020-11-13  0:50 UTC (permalink / raw)
  To: Alexei Starovoitov; +Cc: John Fastabend, davem, netdev, bpf, kernel-team

On 11/13/20 1:09 AM, Alexei Starovoitov wrote:
> On Fri, Nov 13, 2020 at 12:56:52AM +0100, Daniel Borkmann wrote:
>> On 11/12/20 8:16 PM, John Fastabend wrote:
>>> Alexei Starovoitov wrote:
>>>> From: Alexei Starovoitov <ast@kernel.org>
>>>>
>>>> This patch adds the verifier support to recognize inlined branch conditions.
>>>> The LLVM knows that the branch evaluates to the same value, but the verifier
>>>> couldn't track it. Hence causing valid programs to be rejected.
>>>> The potential LLVM workaround: https://reviews.llvm.org/D87428
>>>> can have undesired side effects, since LLVM doesn't know that
>>>> skb->data/data_end are being compared. LLVM has to introduce extra boolean
>>>> variable and use inline_asm trick to force easier for the verifier assembly.
>>>>
>>>> Instead teach the verifier to recognize that
>>>> r1 = skb->data;
>>>> r1 += 10;
>>>> r2 = skb->data_end;
>>>> if (r1 > r2) {
>>>>     here r1 points beyond packet_end and
>>>>     subsequent
>>>>     if (r1 > r2) // always evaluates to "true".
>>>> }
>>>>
>>>> Tested-by: Jiri Olsa <jolsa@redhat.com>
>>>> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
>>>> ---
>>>>    include/linux/bpf_verifier.h |   2 +-
>>>>    kernel/bpf/verifier.c        | 129 +++++++++++++++++++++++++++++------
>>>>    2 files changed, 108 insertions(+), 23 deletions(-)
>>>
>>> Thanks, we can remove another set of inline asm logic.
>>>
>>> Acked-by: John Fastabend <john.fastabend@gmail.com>
>>>>    	if (pred >= 0) {
>>>> @@ -7517,7 +7601,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
>>>>    		 */
>>>>    		if (!__is_pointer_value(false, dst_reg))
>>>>    			err = mark_chain_precision(env, insn->dst_reg);
>>>> -		if (BPF_SRC(insn->code) == BPF_X && !err)
>>>> +		if (BPF_SRC(insn->code) == BPF_X && !err &&
>>>> +		    !__is_pointer_value(false, src_reg))
>>>
>>> This could have been more specific with !type_is_pkt_pointer() correct? I
>>> think its fine as is though.
>>>
>>>>    			err = mark_chain_precision(env, insn->src_reg);
>>>>    		if (err)
>>>>    			return err;
>>
>> Given the reg->range could now be negative, I wonder whether for the regsafe()
>> pruning logic we should now better add a >=0 sanity check in there before we
>> attempt to test on rold->range > rcur->range?
> 
> I thought about it and specifically picked negative range value to keep
> regsafe() check as-is.
> The check is this:
>                  if (rold->range > rcur->range)
>                          return false;
> rold is the one that was safe in the past.
> If rold was positive and the current is negative we fail here
> which is ok. State pruning is conservative.
> 
> If rold was negative it means the previous state was safe even though that pointer
> was pointing beyond packet end. So it's ok for rcur->range to be anything.
> Whether rcur is positive or negative doesn't matter. Everything is still ok.
> If rold->range == -1 and rcur->range == -2 we fail here.
> It's minor annoyance. State pruning is tiny bit more conservative than necessary.
> 
> So I think no extra checks in regsafe() are neeeded.
> Does it make sense?

Yeah, same conclusion here. We still might want to add more BPF asm based tests
on this in general, but either way logic lgtm, so applied, thanks.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end.
  2020-11-11  3:12 [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
                   ` (2 preceding siblings ...)
  2020-11-11  3:12 ` [PATCH v2 bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison Alexei Starovoitov
@ 2020-11-13  0:50 ` patchwork-bot+netdevbpf
  3 siblings, 0 replies; 14+ messages in thread
From: patchwork-bot+netdevbpf @ 2020-11-13  0:50 UTC (permalink / raw)
  To: Alexei Starovoitov; +Cc: davem, daniel, netdev, bpf, kernel-team

Hello:

This series was applied to bpf/bpf-next.git (refs/heads/master):

On Tue, 10 Nov 2020 19:12:10 -0800 you wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> v1->v2:
> - removed set-but-unused variable.
> - added Jiri's Tested-by.
> 
> In some cases LLVM uses the knowledge that branch is taken to optimze the code
> which causes the verifier to reject valid programs.
> Teach the verifier to recognize that
> r1 = skb->data;
> r1 += 10;
> r2 = skb->data_end;
> if (r1 > r2) {
>   here r1 points beyond packet_end and subsequent
>   if (r1 > r2) // always evaluates to "true".
> }
> 
> [...]

Here is the summary with links:
  - [v2,bpf-next,1/3] bpf: Support for pointers beyond pkt_end.
    https://git.kernel.org/bpf/bpf-next/c/6d94e741a8ff
  - [v2,bpf-next,2/3] selftests/bpf: Add skb_pkt_end test
    https://git.kernel.org/bpf/bpf-next/c/9cc873e85800
  - [v2,bpf-next,3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison.
    https://git.kernel.org/bpf/bpf-next/c/cb62d34019d9

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison.
  2020-11-11  3:12 ` [PATCH v2 bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison Alexei Starovoitov
  2020-11-12 19:24   ` John Fastabend
@ 2021-01-20 20:51   ` Jiri Olsa
  1 sibling, 0 replies; 14+ messages in thread
From: Jiri Olsa @ 2021-01-20 20:51 UTC (permalink / raw)
  To: Alexei Starovoitov; +Cc: davem, daniel, netdev, bpf, kernel-team

On Tue, Nov 10, 2020 at 07:12:13PM -0800, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> Add few assembly tests for packet comparison.
> 
> Tested-by: Jiri Olsa <jolsa@redhat.com>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

hi,
I'm now getting error when running this test:

#347/p pkt_end < pkt taken check Did not run the program (not supported) OK
Summary: 1 PASSED, 0 SKIPPED, 0 FAILED

it looks like my kernel does not have prog->aux->ops->test_run
defined for BPF_PROG_TYPE_SK_SKB for some reason

do I miss some config option? I recall running this
back in November, so I'm confused ;-)

thanks,
jirka

> ---
>  .../testing/selftests/bpf/verifier/ctx_skb.c  | 42 +++++++++++++++++++
>  1 file changed, 42 insertions(+)
> 
> diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
> index 2e16b8e268f2..2022c0f2cd75 100644
> --- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
> +++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
> @@ -1089,3 +1089,45 @@
>  	.errstr_unpriv = "R1 leaks addr",
>  	.result = REJECT,
>  },
> +{
> +       "pkt > pkt_end taken check",
> +       .insns = {
> +       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                //  0. r2 = *(u32 *)(r1 + data_end)
> +                   offsetof(struct __sk_buff, data_end)),
> +       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,                //  1. r4 = *(u32 *)(r1 + data)
> +                   offsetof(struct __sk_buff, data)),
> +       BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),                    //  2. r3 = r4
> +       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),                  //  3. r3 += 42
> +       BPF_MOV64_IMM(BPF_REG_1, 0),                            //  4. r1 = 0
> +       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),          //  5. if r3 > r2 goto 8
> +       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),                  //  6. r4 += 14
> +       BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),                    //  7. r1 = r4
> +       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),          //  8. if r3 > r2 goto 10
> +       BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),            //  9. r2 = *(u8 *)(r1 + 9)
> +       BPF_MOV64_IMM(BPF_REG_0, 0),                            // 10. r0 = 0
> +       BPF_EXIT_INSN(),                                        // 11. exit
> +       },
> +       .result = ACCEPT,
> +       .prog_type = BPF_PROG_TYPE_SK_SKB,
> +},
> +{
> +       "pkt_end < pkt taken check",
> +       .insns = {
> +       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                //  0. r2 = *(u32 *)(r1 + data_end)
> +                   offsetof(struct __sk_buff, data_end)),
> +       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,                //  1. r4 = *(u32 *)(r1 + data)
> +                   offsetof(struct __sk_buff, data)),
> +       BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),                    //  2. r3 = r4
> +       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),                  //  3. r3 += 42
> +       BPF_MOV64_IMM(BPF_REG_1, 0),                            //  4. r1 = 0
> +       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),          //  5. if r3 > r2 goto 8
> +       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),                  //  6. r4 += 14
> +       BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),                    //  7. r1 = r4
> +       BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1),          //  8. if r2 < r3 goto 10
> +       BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),            //  9. r2 = *(u8 *)(r1 + 9)
> +       BPF_MOV64_IMM(BPF_REG_0, 0),                            // 10. r0 = 0
> +       BPF_EXIT_INSN(),                                        // 11. exit
> +       },
> +       .result = ACCEPT,
> +       .prog_type = BPF_PROG_TYPE_SK_SKB,
> +},
> -- 
> 2.24.1
> 


^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2021-01-20 23:42 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-11  3:12 [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
2020-11-11  3:12 ` [PATCH v2 bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
2020-11-12 19:16   ` John Fastabend
2020-11-12 23:56     ` Daniel Borkmann
2020-11-13  0:09       ` Alexei Starovoitov
2020-11-13  0:50         ` Daniel Borkmann
2020-11-12 23:59     ` Alexei Starovoitov
2020-11-13  0:35       ` John Fastabend
2020-11-11  3:12 ` [PATCH v2 bpf-next 2/3] selftests/bpf: Add skb_pkt_end test Alexei Starovoitov
2020-11-12 19:19   ` John Fastabend
2020-11-11  3:12 ` [PATCH v2 bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison Alexei Starovoitov
2020-11-12 19:24   ` John Fastabend
2021-01-20 20:51   ` Jiri Olsa
2020-11-13  0:50 ` [PATCH v2 bpf-next 0/3] bpf: Pointers beyond packet end patchwork-bot+netdevbpf

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).