bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH bpf-next 0/3] bpf: Pointers beyond packet end.
@ 2020-10-21 18:20 Alexei Starovoitov
  2020-10-21 18:20 ` [PATCH bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Alexei Starovoitov @ 2020-10-21 18:20 UTC (permalink / raw)
  To: davem; +Cc: daniel, john.fastabend, jolsa, netdev, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

In some cases LLVM uses the knowledge that branch is taken to optimze the code
which causes the verifier to reject valid programs.
Teach the verifier to recognize that
r1 = skb->data;
r1 += 10;
r2 = skb->data_end;
if (r1 > r2) {
  here r1 points beyond packet_end and subsequent
  if (r1 > r2) // always evaluates to "true".
}

Alexei Starovoitov (3):
  bpf: Support for pointers beyond pkt_end.
  selftests/bpf: Add skb_pkt_end test
  selftests/bpf: Add asm tests for pkt vs pkt_end comparison.

 include/linux/bpf_verifier.h                  |   2 +-
 kernel/bpf/verifier.c                         | 131 +++++++++++++++---
 .../bpf/prog_tests/test_skb_pkt_end.c         |  41 ++++++
 .../testing/selftests/bpf/progs/skb_pkt_end.c |  54 ++++++++
 .../testing/selftests/bpf/verifier/ctx_skb.c  |  42 ++++++
 5 files changed, 247 insertions(+), 23 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
 create mode 100644 tools/testing/selftests/bpf/progs/skb_pkt_end.c

-- 
2.23.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-10-21 18:20 [PATCH bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
@ 2020-10-21 18:20 ` Alexei Starovoitov
  2020-10-21 22:07   ` kernel test robot
  2020-10-21 18:20 ` [PATCH bpf-next 2/3] selftests/bpf: Add skb_pkt_end test Alexei Starovoitov
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 6+ messages in thread
From: Alexei Starovoitov @ 2020-10-21 18:20 UTC (permalink / raw)
  To: davem; +Cc: daniel, john.fastabend, jolsa, netdev, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

This patch adds the verifier support to recognize inlined branch conditions.
The LLVM knows that the branch evaluates to the same value, but the verifier
couldn't track it. Hence causing valid programs to be rejected.
The potential LLVM workaround: https://reviews.llvm.org/D87428
can have undesired side effects, since LLVM doesn't know that
skb->data/data_end are being compared. LLVM has to introduce extra boolean
variable and use inline_asm trick to force easier for the verifier assembly.

Instead teach the verifier to recognize that
r1 = skb->data;
r1 += 10;
r2 = skb->data_end;
if (r1 > r2) {
  here r1 points beyond packet_end and
  subsequent
  if (r1 > r2) // always evaluates to "true".
}

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 include/linux/bpf_verifier.h |   2 +-
 kernel/bpf/verifier.c        | 131 +++++++++++++++++++++++++++++------
 2 files changed, 110 insertions(+), 23 deletions(-)

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index e83ef6f6bf43..306869d4743b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -45,7 +45,7 @@ struct bpf_reg_state {
 	enum bpf_reg_type type;
 	union {
 		/* valid when type == PTR_TO_PACKET */
-		u16 range;
+		int range;
 
 		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
 		 *   PTR_TO_MAP_VALUE_OR_NULL
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 39d7f44e7c92..303c5fc1701b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2739,7 +2739,9 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
 			regno);
 		return -EACCES;
 	}
-	err = __check_mem_access(env, regno, off, size, reg->range,
+
+	err = reg->range < 0 ? -EINVAL :
+	      __check_mem_access(env, regno, off, size, reg->range,
 				 zero_size_allowed);
 	if (err) {
 		verbose(env, "R%d offset is outside of the packet\n", regno);
@@ -4687,6 +4689,32 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
 		__clear_all_pkt_pointers(env, vstate->frame[i]);
 }
 
+enum {
+	AT_PKT_END = -1,
+	BEYOND_PKT_END = -2,
+};
+
+static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
+{
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+	struct bpf_reg_state *reg = &state->regs[regn];
+
+	if (reg->type != PTR_TO_PACKET)
+		/* PTR_TO_PACKET_META is not supported yet */
+		return;
+
+	/* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
+	 * How far beyond pkt_end it goes is unknown.
+	 * if (!range_open) it's the case of pkt >= pkt_end
+	 * if (range_open) it's the case of pkt > pkt_end
+	 * hence this pointer is at least 1 byte bigger than pkt_end
+	 */
+	if (range_open)
+		reg->range = BEYOND_PKT_END;
+	else
+		reg->range = AT_PKT_END;
+}
+
 static void release_reg_references(struct bpf_verifier_env *env,
 				   struct bpf_func_state *state,
 				   int ref_obj_id)
@@ -6697,7 +6725,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
 static void __find_good_pkt_pointers(struct bpf_func_state *state,
 				     struct bpf_reg_state *dst_reg,
-				     enum bpf_reg_type type, u16 new_range)
+				     enum bpf_reg_type type, int new_range)
 {
 	struct bpf_reg_state *reg;
 	int i;
@@ -6722,8 +6750,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
 				   enum bpf_reg_type type,
 				   bool range_right_open)
 {
-	u16 new_range;
-	int i;
+	int new_range, i;
 
 	if (dst_reg->off < 0 ||
 	    (dst_reg->off == 0 && range_right_open))
@@ -6974,6 +7001,69 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
 	return is_branch64_taken(reg, val, opcode);
 }
 
+static int flip_opcode(u32 opcode)
+{
+	/* How can we transform "a <op> b" into "b <op> a"? */
+	static const u8 opcode_flip[16] = {
+		/* these stay the same */
+		[BPF_JEQ  >> 4] = BPF_JEQ,
+		[BPF_JNE  >> 4] = BPF_JNE,
+		[BPF_JSET >> 4] = BPF_JSET,
+		/* these swap "lesser" and "greater" (L and G in the opcodes) */
+		[BPF_JGE  >> 4] = BPF_JLE,
+		[BPF_JGT  >> 4] = BPF_JLT,
+		[BPF_JLE  >> 4] = BPF_JGE,
+		[BPF_JLT  >> 4] = BPF_JGT,
+		[BPF_JSGE >> 4] = BPF_JSLE,
+		[BPF_JSGT >> 4] = BPF_JSLT,
+		[BPF_JSLE >> 4] = BPF_JSGE,
+		[BPF_JSLT >> 4] = BPF_JSGT
+	};
+	return opcode_flip[opcode >> 4];
+}
+
+static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
+				   struct bpf_reg_state *src_reg,
+				   u8 opcode)
+{
+	struct bpf_reg_state *pkt_end, *pkt;
+
+	if (src_reg->type == PTR_TO_PACKET_END) {
+		pkt_end = src_reg;
+		pkt = dst_reg;
+	} else if (dst_reg->type == PTR_TO_PACKET_END) {
+		pkt_end = dst_reg;
+		pkt = src_reg;
+		opcode = flip_opcode(opcode);
+	} else {
+		return -1;
+	}
+
+	if (pkt->range >= 0)
+		return -1;
+
+	switch (opcode) {
+	case BPF_JLE:
+		/* pkt <= pkt_end */
+		fallthrough;
+	case BPF_JGT:
+		/* pkt > pkt_end */
+		if (pkt->range == BEYOND_PKT_END)
+			/* pkt has at last one extra byte beyond pkt_end */
+			return opcode == BPF_JGT;
+		break;
+	case BPF_JLT:
+		/* pkt < pkt_end */
+		fallthrough;
+	case BPF_JGE:
+		/* pkt >= pkt_end */
+		if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
+			return opcode == BPF_JGE;
+		break;
+	}
+	return -1;
+}
+
 /* Adjusts the register min/max values in the case that the dst_reg is the
  * variable register that we are working on, and src_reg is a constant or we're
  * simply doing a BPF_K check.
@@ -7137,23 +7227,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
 				u64 val, u32 val32,
 				u8 opcode, bool is_jmp32)
 {
-	/* How can we transform "a <op> b" into "b <op> a"? */
-	static const u8 opcode_flip[16] = {
-		/* these stay the same */
-		[BPF_JEQ  >> 4] = BPF_JEQ,
-		[BPF_JNE  >> 4] = BPF_JNE,
-		[BPF_JSET >> 4] = BPF_JSET,
-		/* these swap "lesser" and "greater" (L and G in the opcodes) */
-		[BPF_JGE  >> 4] = BPF_JLE,
-		[BPF_JGT  >> 4] = BPF_JLT,
-		[BPF_JLE  >> 4] = BPF_JGE,
-		[BPF_JLT  >> 4] = BPF_JGT,
-		[BPF_JSGE >> 4] = BPF_JSLE,
-		[BPF_JSGT >> 4] = BPF_JSLT,
-		[BPF_JSLE >> 4] = BPF_JSGE,
-		[BPF_JSLT >> 4] = BPF_JSGT
-	};
-	opcode = opcode_flip[opcode >> 4];
+	opcode = flip_opcode(opcode);
 	/* This uses zero as "not present in table"; luckily the zero opcode,
 	 * BPF_JA, can't get here.
 	 */
@@ -7334,6 +7408,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
 			find_good_pkt_pointers(this_branch, dst_reg,
 					       dst_reg->type, false);
+			mark_pkt_end(other_branch, insn->dst_reg, true);
 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
 			    src_reg->type == PTR_TO_PACKET) ||
 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7341,6 +7416,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
 			find_good_pkt_pointers(other_branch, src_reg,
 					       src_reg->type, true);
+			mark_pkt_end(this_branch, insn->src_reg, false);
 		} else {
 			return false;
 		}
@@ -7353,6 +7429,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
 			find_good_pkt_pointers(other_branch, dst_reg,
 					       dst_reg->type, true);
+			mark_pkt_end(this_branch, insn->dst_reg, false);
 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
 			    src_reg->type == PTR_TO_PACKET) ||
 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7360,6 +7437,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
 			find_good_pkt_pointers(this_branch, src_reg,
 					       src_reg->type, false);
+			mark_pkt_end(other_branch, insn->src_reg, true);
 		} else {
 			return false;
 		}
@@ -7372,6 +7450,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
 			find_good_pkt_pointers(this_branch, dst_reg,
 					       dst_reg->type, true);
+			mark_pkt_end(other_branch, insn->dst_reg, false);
 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
 			    src_reg->type == PTR_TO_PACKET) ||
 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7379,6 +7458,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
 			find_good_pkt_pointers(other_branch, src_reg,
 					       src_reg->type, false);
+			mark_pkt_end(this_branch, insn->src_reg, true);
 		} else {
 			return false;
 		}
@@ -7391,6 +7471,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
 			find_good_pkt_pointers(other_branch, dst_reg,
 					       dst_reg->type, false);
+			mark_pkt_end(this_branch, insn->dst_reg, true);
 		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
 			    src_reg->type == PTR_TO_PACKET) ||
 			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7398,6 +7479,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
 			find_good_pkt_pointers(this_branch, src_reg,
 					       src_reg->type, true);
+			mark_pkt_end(other_branch, insn->src_reg, false);
 		} else {
 			return false;
 		}
@@ -7497,6 +7579,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 				       src_reg->var_off.value,
 				       opcode,
 				       is_jmp32);
+	} else if (reg_is_pkt_pointer_any(dst_reg) &&
+		   reg_is_pkt_pointer_any(src_reg) &&
+		   !is_jmp32) {
+		pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
 	}
 
 	if (pred >= 0) {
@@ -7505,7 +7591,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 		 */
 		if (!__is_pointer_value(false, dst_reg))
 			err = mark_chain_precision(env, insn->dst_reg);
-		if (BPF_SRC(insn->code) == BPF_X && !err)
+		if (BPF_SRC(insn->code) == BPF_X && !err &&
+		    !__is_pointer_value(false, src_reg))
 			err = mark_chain_precision(env, insn->src_reg);
 		if (err)
 			return err;
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH bpf-next 2/3] selftests/bpf: Add skb_pkt_end test
  2020-10-21 18:20 [PATCH bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
  2020-10-21 18:20 ` [PATCH bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
@ 2020-10-21 18:20 ` Alexei Starovoitov
  2020-10-21 18:20 ` [PATCH bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison Alexei Starovoitov
  2020-10-22  9:47 ` [PATCH bpf-next 0/3] bpf: Pointers beyond packet end Jiri Olsa
  3 siblings, 0 replies; 6+ messages in thread
From: Alexei Starovoitov @ 2020-10-21 18:20 UTC (permalink / raw)
  To: davem; +Cc: daniel, john.fastabend, jolsa, netdev, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Add a test that currently makes LLVM generate assembly code:

$ llvm-objdump -S skb_pkt_end.o
0000000000000000 <main_prog>:
; 	if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
       0:	61 12 50 00 00 00 00 00	r2 = *(u32 *)(r1 + 80)
       1:	61 14 4c 00 00 00 00 00	r4 = *(u32 *)(r1 + 76)
       2:	bf 43 00 00 00 00 00 00	r3 = r4
       3:	07 03 00 00 36 00 00 00	r3 += 54
       4:	b7 01 00 00 00 00 00 00	r1 = 0
       5:	2d 23 02 00 00 00 00 00	if r3 > r2 goto +2 <LBB0_2>
       6:	07 04 00 00 0e 00 00 00	r4 += 14
; 	if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
       7:	bf 41 00 00 00 00 00 00	r1 = r4
0000000000000040 <LBB0_2>:
       8:	b4 00 00 00 ff ff ff ff	w0 = -1
; 	if (!(ip = get_iphdr(skb)))
       9:	2d 23 05 00 00 00 00 00	if r3 > r2 goto +5 <LBB0_6>
; 	proto = ip->protocol;
      10:	71 12 09 00 00 00 00 00	r2 = *(u8 *)(r1 + 9)
; 	if (proto != IPPROTO_TCP)
      11:	56 02 03 00 06 00 00 00	if w2 != 6 goto +3 <LBB0_6>
; 	if (tcp->dest != 0)
      12:	69 12 16 00 00 00 00 00	r2 = *(u16 *)(r1 + 22)
      13:	56 02 01 00 00 00 00 00	if w2 != 0 goto +1 <LBB0_6>
; 	return tcp->urg_ptr;
      14:	69 10 26 00 00 00 00 00	r0 = *(u16 *)(r1 + 38)
0000000000000078 <LBB0_6>:
; }
      15:	95 00 00 00 00 00 00 00	exit

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 .../bpf/prog_tests/test_skb_pkt_end.c         | 41 ++++++++++++++
 .../testing/selftests/bpf/progs/skb_pkt_end.c | 54 +++++++++++++++++++
 2 files changed, 95 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
 create mode 100644 tools/testing/selftests/bpf/progs/skb_pkt_end.c

diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
new file mode 100644
index 000000000000..cf1215531920
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "skb_pkt_end.skel.h"
+
+static int sanity_run(struct bpf_program *prog)
+{
+	__u32 duration, retval;
+	int err, prog_fd;
+
+	prog_fd = bpf_program__fd(prog);
+	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+				NULL, NULL, &retval, &duration);
+	if (CHECK(err || retval != 123, "test_run",
+		  "err %d errno %d retval %d duration %d\n",
+		  err, errno, retval, duration))
+		return -1;
+	return 0;
+}
+
+void test_test_skb_pkt_end(void)
+{
+	struct skb_pkt_end *skb_pkt_end_skel = NULL;
+	__u32 duration = 0;
+	int err;
+
+	skb_pkt_end_skel = skb_pkt_end__open_and_load();
+	if (CHECK(!skb_pkt_end_skel, "skb_pkt_end_skel_load", "skb_pkt_end skeleton failed\n"))
+		goto cleanup;
+
+	err = skb_pkt_end__attach(skb_pkt_end_skel);
+	if (CHECK(err, "skb_pkt_end_attach", "skb_pkt_end attach failed: %d\n", err))
+		goto cleanup;
+
+	if (sanity_run(skb_pkt_end_skel->progs.main_prog))
+		goto cleanup;
+
+cleanup:
+	skb_pkt_end__destroy(skb_pkt_end_skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
new file mode 100644
index 000000000000..cf6823f42e80
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#define BPF_NO_PRESERVE_ACCESS_INDEX
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+#define NULL 0
+#define INLINE __always_inline
+
+#define skb_shorter(skb, len) ((void *)(long)(skb)->data + (len) > (void *)(long)skb->data_end)
+
+#define ETH_IPV4_TCP_SIZE (14 + sizeof(struct iphdr) + sizeof(struct tcphdr))
+
+static INLINE struct iphdr *get_iphdr(struct __sk_buff *skb)
+{
+	struct iphdr *ip = NULL;
+	struct ethhdr *eth;
+
+	if (skb_shorter(skb, ETH_IPV4_TCP_SIZE))
+		goto out;
+
+	eth = (void *)(long)skb->data;
+	ip = (void *)(eth + 1);
+
+out:
+	return ip;
+}
+
+SEC("classifier/cls")
+int main_prog(struct __sk_buff *skb)
+{
+	struct iphdr *ip = NULL;
+	struct tcphdr *tcp;
+	__u8 proto = 0;
+
+	if (!(ip = get_iphdr(skb)))
+		goto out;
+
+	proto = ip->protocol;
+
+	if (proto != IPPROTO_TCP)
+		goto out;
+
+	tcp = (void*)(ip + 1);
+	if (tcp->dest != 0)
+		goto out;
+	if (!tcp)
+		goto out;
+
+	return tcp->urg_ptr;
+out:
+	return -1;
+}
+char _license[] SEC("license") = "GPL";
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison.
  2020-10-21 18:20 [PATCH bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
  2020-10-21 18:20 ` [PATCH bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
  2020-10-21 18:20 ` [PATCH bpf-next 2/3] selftests/bpf: Add skb_pkt_end test Alexei Starovoitov
@ 2020-10-21 18:20 ` Alexei Starovoitov
  2020-10-22  9:47 ` [PATCH bpf-next 0/3] bpf: Pointers beyond packet end Jiri Olsa
  3 siblings, 0 replies; 6+ messages in thread
From: Alexei Starovoitov @ 2020-10-21 18:20 UTC (permalink / raw)
  To: davem; +Cc: daniel, john.fastabend, jolsa, netdev, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Add few assembly tests for packet comparison.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 .../testing/selftests/bpf/verifier/ctx_skb.c  | 42 +++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
index 2e16b8e268f2..2022c0f2cd75 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -1089,3 +1089,45 @@
 	.errstr_unpriv = "R1 leaks addr",
 	.result = REJECT,
 },
+{
+       "pkt > pkt_end taken check",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                //  0. r2 = *(u32 *)(r1 + data_end)
+                   offsetof(struct __sk_buff, data_end)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,                //  1. r4 = *(u32 *)(r1 + data)
+                   offsetof(struct __sk_buff, data)),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),                    //  2. r3 = r4
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),                  //  3. r3 += 42
+       BPF_MOV64_IMM(BPF_REG_1, 0),                            //  4. r1 = 0
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),          //  5. if r3 > r2 goto 8
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),                  //  6. r4 += 14
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),                    //  7. r1 = r4
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),          //  8. if r3 > r2 goto 10
+       BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),            //  9. r2 = *(u8 *)(r1 + 9)
+       BPF_MOV64_IMM(BPF_REG_0, 0),                            // 10. r0 = 0
+       BPF_EXIT_INSN(),                                        // 11. exit
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+       "pkt_end < pkt taken check",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                //  0. r2 = *(u32 *)(r1 + data_end)
+                   offsetof(struct __sk_buff, data_end)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,                //  1. r4 = *(u32 *)(r1 + data)
+                   offsetof(struct __sk_buff, data)),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),                    //  2. r3 = r4
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),                  //  3. r3 += 42
+       BPF_MOV64_IMM(BPF_REG_1, 0),                            //  4. r1 = 0
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),          //  5. if r3 > r2 goto 8
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),                  //  6. r4 += 14
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),                    //  7. r1 = r4
+       BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1),          //  8. if r2 < r3 goto 10
+       BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),            //  9. r2 = *(u8 *)(r1 + 9)
+       BPF_MOV64_IMM(BPF_REG_0, 0),                            // 10. r0 = 0
+       BPF_EXIT_INSN(),                                        // 11. exit
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 1/3] bpf: Support for pointers beyond pkt_end.
  2020-10-21 18:20 ` [PATCH bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
@ 2020-10-21 22:07   ` kernel test robot
  0 siblings, 0 replies; 6+ messages in thread
From: kernel test robot @ 2020-10-21 22:07 UTC (permalink / raw)
  To: Alexei Starovoitov, davem
  Cc: kbuild-all, daniel, john.fastabend, jolsa, netdev, bpf, kernel-team

[-- Attachment #1: Type: text/plain, Size: 7257 bytes --]

Hi Alexei,

I love your patch! Perhaps something to improve:

[auto build test WARNING on bpf-next/master]

url:    https://github.com/0day-ci/linux/commits/Alexei-Starovoitov/bpf-Pointers-beyond-packet-end/20201022-022139
base:   https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
config: i386-randconfig-r026-20201021 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
        # https://github.com/0day-ci/linux/commit/1aa4c81ae1dbbc3b6de7416f0384cde6cc8739b1
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Alexei-Starovoitov/bpf-Pointers-beyond-packet-end/20201022-022139
        git checkout 1aa4c81ae1dbbc3b6de7416f0384cde6cc8739b1
        # save the attached .config to linux build tree
        make W=1 ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   kernel/bpf/verifier.c: In function 'is_pkt_ptr_branch_taken':
>> kernel/bpf/verifier.c:7029:24: warning: variable 'pkt_end' set but not used [-Wunused-but-set-variable]
    7029 |  struct bpf_reg_state *pkt_end, *pkt;
         |                        ^~~~~~~
   In file included from include/linux/bpf_verifier.h:8,
                    from kernel/bpf/verifier.c:12:
   kernel/bpf/verifier.c: In function 'jit_subprogs':
   include/linux/filter.h:345:4: warning: cast between incompatible function types from 'unsigned int (*)(const void *, const struct bpf_insn *)' to 'u64 (*)(u64,  u64,  u64,  u64,  u64)' {aka 'long long unsigned int (*)(long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int)'} [-Wcast-function-type]
     345 |   ((u64 (*)(u64, u64, u64, u64, u64))(x))
         |    ^
   kernel/bpf/verifier.c:10793:16: note: in expansion of macro 'BPF_CAST_CALL'
   10793 |    insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
         |                ^~~~~~~~~~~~~
   kernel/bpf/verifier.c: In function 'fixup_bpf_calls':
   include/linux/filter.h:345:4: warning: cast between incompatible function types from 'void * (* const)(struct bpf_map *, void *)' to 'u64 (*)(u64,  u64,  u64,  u64,  u64)' {aka 'long long unsigned int (*)(long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int)'} [-Wcast-function-type]
     345 |   ((u64 (*)(u64, u64, u64, u64, u64))(x))
         |    ^
   kernel/bpf/verifier.c:11186:17: note: in expansion of macro 'BPF_CAST_CALL'
   11186 |     insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
         |                 ^~~~~~~~~~~~~
   include/linux/filter.h:345:4: warning: cast between incompatible function types from 'int (* const)(struct bpf_map *, void *, void *, u64)' {aka 'int (* const)(struct bpf_map *, void *, void *, long long unsigned int)'} to 'u64 (*)(u64,  u64,  u64,  u64,  u64)' {aka 'long long unsigned int (*)(long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int)'} [-Wcast-function-type]
     345 |   ((u64 (*)(u64, u64, u64, u64, u64))(x))
         |    ^
   kernel/bpf/verifier.c:11190:17: note: in expansion of macro 'BPF_CAST_CALL'
   11190 |     insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
         |                 ^~~~~~~~~~~~~
   include/linux/filter.h:345:4: warning: cast between incompatible function types from 'int (* const)(struct bpf_map *, void *)' to 'u64 (*)(u64,  u64,  u64,  u64,  u64)' {aka 'long long unsigned int (*)(long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int)'} [-Wcast-function-type]
     345 |   ((u64 (*)(u64, u64, u64, u64, u64))(x))
         |    ^
   kernel/bpf/verifier.c:11194:17: note: in expansion of macro 'BPF_CAST_CALL'
   11194 |     insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
         |                 ^~~~~~~~~~~~~
   include/linux/filter.h:345:4: warning: cast between incompatible function types from 'int (* const)(struct bpf_map *, void *, u64)' {aka 'int (* const)(struct bpf_map *, void *, long long unsigned int)'} to 'u64 (*)(u64,  u64,  u64,  u64,  u64)' {aka 'long long unsigned int (*)(long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int)'} [-Wcast-function-type]
     345 |   ((u64 (*)(u64, u64, u64, u64, u64))(x))
         |    ^
   kernel/bpf/verifier.c:11198:17: note: in expansion of macro 'BPF_CAST_CALL'
   11198 |     insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
         |                 ^~~~~~~~~~~~~
   include/linux/filter.h:345:4: warning: cast between incompatible function types from 'int (* const)(struct bpf_map *, void *)' to 'u64 (*)(u64,  u64,  u64,  u64,  u64)' {aka 'long long unsigned int (*)(long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int)'} [-Wcast-function-type]
     345 |   ((u64 (*)(u64, u64, u64, u64, u64))(x))
         |    ^
   kernel/bpf/verifier.c:11202:17: note: in expansion of macro 'BPF_CAST_CALL'
   11202 |     insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
         |                 ^~~~~~~~~~~~~
   include/linux/filter.h:345:4: warning: cast between incompatible function types from 'int (* const)(struct bpf_map *, void *)' to 'u64 (*)(u64,  u64,  u64,  u64,  u64)' {aka 'long long unsigned int (*)(long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int,  long long unsigned int)'} [-Wcast-function-type]
     345 |   ((u64 (*)(u64, u64, u64, u64, u64))(x))
         |    ^
   kernel/bpf/verifier.c:11206:17: note: in expansion of macro 'BPF_CAST_CALL'
   11206 |     insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
         |                 ^~~~~~~~~~~~~

vim +/pkt_end +7029 kernel/bpf/verifier.c

  7024	
  7025	static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
  7026					   struct bpf_reg_state *src_reg,
  7027					   u8 opcode)
  7028	{
> 7029		struct bpf_reg_state *pkt_end, *pkt;
  7030	
  7031		if (src_reg->type == PTR_TO_PACKET_END) {
  7032			pkt_end = src_reg;
  7033			pkt = dst_reg;
  7034		} else if (dst_reg->type == PTR_TO_PACKET_END) {
  7035			pkt_end = dst_reg;
  7036			pkt = src_reg;
  7037			opcode = flip_opcode(opcode);
  7038		} else {
  7039			return -1;
  7040		}
  7041	
  7042		if (pkt->range >= 0)
  7043			return -1;
  7044	
  7045		switch (opcode) {
  7046		case BPF_JLE:
  7047			/* pkt <= pkt_end */
  7048			fallthrough;
  7049		case BPF_JGT:
  7050			/* pkt > pkt_end */
  7051			if (pkt->range == BEYOND_PKT_END)
  7052				/* pkt has at last one extra byte beyond pkt_end */
  7053				return opcode == BPF_JGT;
  7054			break;
  7055		case BPF_JLT:
  7056			/* pkt < pkt_end */
  7057			fallthrough;
  7058		case BPF_JGE:
  7059			/* pkt >= pkt_end */
  7060			if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
  7061				return opcode == BPF_JGE;
  7062			break;
  7063		}
  7064		return -1;
  7065	}
  7066	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 36550 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 0/3] bpf: Pointers beyond packet end.
  2020-10-21 18:20 [PATCH bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
                   ` (2 preceding siblings ...)
  2020-10-21 18:20 ` [PATCH bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison Alexei Starovoitov
@ 2020-10-22  9:47 ` Jiri Olsa
  3 siblings, 0 replies; 6+ messages in thread
From: Jiri Olsa @ 2020-10-22  9:47 UTC (permalink / raw)
  To: Alexei Starovoitov
  Cc: davem, daniel, john.fastabend, jolsa, netdev, bpf, kernel-team

On Wed, Oct 21, 2020 at 11:20:12AM -0700, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> In some cases LLVM uses the knowledge that branch is taken to optimze the code
> which causes the verifier to reject valid programs.
> Teach the verifier to recognize that
> r1 = skb->data;
> r1 += 10;
> r2 = skb->data_end;
> if (r1 > r2) {
>   here r1 points beyond packet_end and subsequent
>   if (r1 > r2) // always evaluates to "true".
> }
> 
> Alexei Starovoitov (3):
>   bpf: Support for pointers beyond pkt_end.
>   selftests/bpf: Add skb_pkt_end test
>   selftests/bpf: Add asm tests for pkt vs pkt_end comparison.

Tested-by: Jiri Olsa <jolsa@redhat.com>

thanks,
jirka

> 
>  include/linux/bpf_verifier.h                  |   2 +-
>  kernel/bpf/verifier.c                         | 131 +++++++++++++++---
>  .../bpf/prog_tests/test_skb_pkt_end.c         |  41 ++++++
>  .../testing/selftests/bpf/progs/skb_pkt_end.c |  54 ++++++++
>  .../testing/selftests/bpf/verifier/ctx_skb.c  |  42 ++++++
>  5 files changed, 247 insertions(+), 23 deletions(-)
>  create mode 100644 tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c
>  create mode 100644 tools/testing/selftests/bpf/progs/skb_pkt_end.c
> 
> -- 
> 2.23.0
> 


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-10-22  9:48 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-21 18:20 [PATCH bpf-next 0/3] bpf: Pointers beyond packet end Alexei Starovoitov
2020-10-21 18:20 ` [PATCH bpf-next 1/3] bpf: Support for pointers beyond pkt_end Alexei Starovoitov
2020-10-21 22:07   ` kernel test robot
2020-10-21 18:20 ` [PATCH bpf-next 2/3] selftests/bpf: Add skb_pkt_end test Alexei Starovoitov
2020-10-21 18:20 ` [PATCH bpf-next 3/3] selftests/bpf: Add asm tests for pkt vs pkt_end comparison Alexei Starovoitov
2020-10-22  9:47 ` [PATCH bpf-next 0/3] bpf: Pointers beyond packet end Jiri Olsa

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).