All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH bpf-next 0/2] bpf: Narrow loads for bpf_sock_addr.user_port
@ 2020-05-14  1:50 Andrey Ignatov
  2020-05-14  1:50 ` [PATCH bpf-next 1/2] bpf: Support narrow loads from bpf_sock_addr.user_port Andrey Ignatov
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Andrey Ignatov @ 2020-05-14  1:50 UTC (permalink / raw)
  To: bpf; +Cc: Andrey Ignatov, ast, daniel, kernel-team

This patch set adds support for narrow loads from bpf_sock_addr.user_port
in BPF_PROG_TYPE_CGROUP_SOCK_ADDR program.s

Patch 1 adds narrow loads support for user_port.
Patch 2 tests it.


Andrey Ignatov (2):
  bpf: Support narrow loads from bpf_sock_addr.user_port
  selftests/bpf: Test narrow loads for bpf_sock_addr.user_port

 include/uapi/linux/bpf.h                     |  2 +-
 net/core/filter.c                            | 15 ++++----
 tools/include/uapi/linux/bpf.h               |  2 +-
 tools/testing/selftests/bpf/test_sock_addr.c | 38 ++++++++++++++------
 4 files changed, 37 insertions(+), 20 deletions(-)

-- 
2.24.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH bpf-next 1/2] bpf: Support narrow loads from bpf_sock_addr.user_port
  2020-05-14  1:50 [PATCH bpf-next 0/2] bpf: Narrow loads for bpf_sock_addr.user_port Andrey Ignatov
@ 2020-05-14  1:50 ` Andrey Ignatov
  2020-05-14 16:52   ` Yonghong Song
  2020-05-14  1:50 ` [PATCH bpf-next 2/2] selftests/bpf: Test narrow loads for bpf_sock_addr.user_port Andrey Ignatov
  2020-05-15  1:37 ` [PATCH bpf-next 0/2] bpf: Narrow " Alexei Starovoitov
  2 siblings, 1 reply; 6+ messages in thread
From: Andrey Ignatov @ 2020-05-14  1:50 UTC (permalink / raw)
  To: bpf; +Cc: Andrey Ignatov, ast, daniel, kernel-team

bpf_sock_addr.user_port supports only 4-byte load and it leads to ugly
code in BPF programs, like:

	volatile __u32 user_port = ctx->user_port;
	__u16 port = bpf_ntohs(user_port);

Since otherwise clang may optimize the load to be 2-byte and it's
rejected by verifier.

Add support for 1- and 2-byte loads same way as it's supported for other
fields in bpf_sock_addr like user_ip4, msg_src_ip4, etc.

Signed-off-by: Andrey Ignatov <rdna@fb.com>
---
 include/uapi/linux/bpf.h       |  2 +-
 net/core/filter.c              | 15 +++++++--------
 tools/include/uapi/linux/bpf.h |  2 +-
 3 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index bfb31c1be219..85cfdffde182 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3728,7 +3728,7 @@ struct bpf_sock_addr {
 	__u32 user_ip6[4];	/* Allows 1,2,4,8-byte read and 4,8-byte write.
 				 * Stored in network byte order.
 				 */
-	__u32 user_port;	/* Allows 4-byte read and write.
+	__u32 user_port;	/* Allows 1,2,4-byte read and 4-byte write.
 				 * Stored in network byte order
 				 */
 	__u32 family;		/* Allows 4-byte read, but no write */
diff --git a/net/core/filter.c b/net/core/filter.c
index da0634979f53..1fe8c0c2d408 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7029,6 +7029,7 @@ static bool sock_addr_is_valid_access(int off, int size,
 	case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
 	case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
 				msg_src_ip6[3]):
+	case bpf_ctx_range(struct bpf_sock_addr, user_port):
 		if (type == BPF_READ) {
 			bpf_ctx_record_field_size(info, size_default);
 
@@ -7059,10 +7060,6 @@ static bool sock_addr_is_valid_access(int off, int size,
 				return false;
 		}
 		break;
-	case bpf_ctx_range(struct bpf_sock_addr, user_port):
-		if (size != size_default)
-			return false;
-		break;
 	case offsetof(struct bpf_sock_addr, sk):
 		if (type != BPF_READ)
 			return false;
@@ -7958,8 +7955,8 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
 					struct bpf_insn *insn_buf,
 					struct bpf_prog *prog, u32 *target_size)
 {
+	int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port);
 	struct bpf_insn *insn = insn_buf;
-	int off;
 
 	switch (si->off) {
 	case offsetof(struct bpf_sock_addr, user_family):
@@ -7994,9 +7991,11 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
 			     offsetof(struct sockaddr_in6, sin6_port));
 		BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
 			     sizeof_field(struct sockaddr_in6, sin6_port));
-		SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
-						     struct sockaddr_in6, uaddr,
-						     sin6_port, tmp_reg);
+		/* Account for sin6_port being smaller than user_port. */
+		port_size = min(port_size, BPF_LDST_BYTES(si));
+		SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
+			struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
+			sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg);
 		break;
 
 	case offsetof(struct bpf_sock_addr, family):
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index bfb31c1be219..85cfdffde182 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -3728,7 +3728,7 @@ struct bpf_sock_addr {
 	__u32 user_ip6[4];	/* Allows 1,2,4,8-byte read and 4,8-byte write.
 				 * Stored in network byte order.
 				 */
-	__u32 user_port;	/* Allows 4-byte read and write.
+	__u32 user_port;	/* Allows 1,2,4-byte read and 4-byte write.
 				 * Stored in network byte order
 				 */
 	__u32 family;		/* Allows 4-byte read, but no write */
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH bpf-next 2/2] selftests/bpf: Test narrow loads for bpf_sock_addr.user_port
  2020-05-14  1:50 [PATCH bpf-next 0/2] bpf: Narrow loads for bpf_sock_addr.user_port Andrey Ignatov
  2020-05-14  1:50 ` [PATCH bpf-next 1/2] bpf: Support narrow loads from bpf_sock_addr.user_port Andrey Ignatov
@ 2020-05-14  1:50 ` Andrey Ignatov
  2020-05-14 17:08   ` Yonghong Song
  2020-05-15  1:37 ` [PATCH bpf-next 0/2] bpf: Narrow " Alexei Starovoitov
  2 siblings, 1 reply; 6+ messages in thread
From: Andrey Ignatov @ 2020-05-14  1:50 UTC (permalink / raw)
  To: bpf; +Cc: Andrey Ignatov, ast, daniel, kernel-team

Test 1,2,4-byte loads from bpf_sock_addr.user_port in sock_addr
programs.

Signed-off-by: Andrey Ignatov <rdna@fb.com>
---
 tools/testing/selftests/bpf/test_sock_addr.c | 38 ++++++++++++++------
 1 file changed, 28 insertions(+), 10 deletions(-)

diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index 61fd95b89af8..0358814c67dc 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -677,7 +677,7 @@ static int bind4_prog_load(const struct sock_addr_test *test)
 		uint8_t u4_addr8[4];
 		uint16_t u4_addr16[2];
 		uint32_t u4_addr32;
-	} ip4;
+	} ip4, port;
 	struct sockaddr_in addr4_rw;
 
 	if (inet_pton(AF_INET, SERV4_IP, (void *)&ip4) != 1) {
@@ -685,6 +685,8 @@ static int bind4_prog_load(const struct sock_addr_test *test)
 		return -1;
 	}
 
+	port.u4_addr32 = htons(SERV4_PORT);
+
 	if (mk_sockaddr(AF_INET, SERV4_REWRITE_IP, SERV4_REWRITE_PORT,
 			(struct sockaddr *)&addr4_rw, sizeof(addr4_rw)) == -1)
 		return -1;
@@ -696,49 +698,65 @@ static int bind4_prog_load(const struct sock_addr_test *test)
 		/* if (sk.family == AF_INET && */
 		BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, family)),
-		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 24),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 32),
 
 		/*     (sk.type == SOCK_DGRAM || sk.type == SOCK_STREAM) && */
 		BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, type)),
 		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 1),
 		BPF_JMP_A(1),
-		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 20),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 28),
 
 		/*     1st_byte_of_user_ip4 == expected && */
 		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, user_ip4)),
-		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 18),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 26),
 
 		/*     2nd_byte_of_user_ip4 == expected && */
 		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, user_ip4) + 1),
-		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[1], 16),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[1], 24),
 
 		/*     3rd_byte_of_user_ip4 == expected && */
 		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, user_ip4) + 2),
-		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[2], 14),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[2], 22),
 
 		/*     4th_byte_of_user_ip4 == expected && */
 		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, user_ip4) + 3),
-		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[3], 12),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[3], 20),
 
 		/*     1st_half_of_user_ip4 == expected && */
 		BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, user_ip4)),
-		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 10),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 18),
 
 		/*     2nd_half_of_user_ip4 == expected && */
 		BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, user_ip4) + 2),
-		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[1], 8),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[1], 16),
 
-		/*     whole_user_ip4 == expected) { */
+		/*     whole_user_ip4 == expected && */
 		BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
 			    offsetof(struct bpf_sock_addr, user_ip4)),
 		BPF_LD_IMM64(BPF_REG_8, ip4.u4_addr32), /* See [2]. */
+		BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 12),
+
+		/*     1st_byte_of_user_port == expected && */
+		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
+			    offsetof(struct bpf_sock_addr, user_port)),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, port.u4_addr8[0], 10),
+
+		/*     1st_half_of_user_port == expected && */
+		BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
+			    offsetof(struct bpf_sock_addr, user_port)),
+		BPF_JMP_IMM(BPF_JNE, BPF_REG_7, port.u4_addr16[0], 8),
+
+		/*     user_port == expected) { */
+		BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+			    offsetof(struct bpf_sock_addr, user_port)),
+		BPF_LD_IMM64(BPF_REG_8, port.u4_addr32), /* See [2]. */
 		BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 4),
 
 		/*      user_ip4 = addr4_rw.sin_addr */
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 1/2] bpf: Support narrow loads from bpf_sock_addr.user_port
  2020-05-14  1:50 ` [PATCH bpf-next 1/2] bpf: Support narrow loads from bpf_sock_addr.user_port Andrey Ignatov
@ 2020-05-14 16:52   ` Yonghong Song
  0 siblings, 0 replies; 6+ messages in thread
From: Yonghong Song @ 2020-05-14 16:52 UTC (permalink / raw)
  To: Andrey Ignatov, bpf; +Cc: ast, daniel, kernel-team



On 5/13/20 6:50 PM, Andrey Ignatov wrote:
> bpf_sock_addr.user_port supports only 4-byte load and it leads to ugly
> code in BPF programs, like:
> 
> 	volatile __u32 user_port = ctx->user_port;
> 	__u16 port = bpf_ntohs(user_port);
> 
> Since otherwise clang may optimize the load to be 2-byte and it's
> rejected by verifier.
> 
> Add support for 1- and 2-byte loads same way as it's supported for other
> fields in bpf_sock_addr like user_ip4, msg_src_ip4, etc.
> 
> Signed-off-by: Andrey Ignatov <rdna@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 2/2] selftests/bpf: Test narrow loads for bpf_sock_addr.user_port
  2020-05-14  1:50 ` [PATCH bpf-next 2/2] selftests/bpf: Test narrow loads for bpf_sock_addr.user_port Andrey Ignatov
@ 2020-05-14 17:08   ` Yonghong Song
  0 siblings, 0 replies; 6+ messages in thread
From: Yonghong Song @ 2020-05-14 17:08 UTC (permalink / raw)
  To: Andrey Ignatov, bpf; +Cc: ast, daniel, kernel-team



On 5/13/20 6:50 PM, Andrey Ignatov wrote:
> Test 1,2,4-byte loads from bpf_sock_addr.user_port in sock_addr
> programs.
> 
> Signed-off-by: Andrey Ignatov <rdna@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 0/2] bpf: Narrow loads for bpf_sock_addr.user_port
  2020-05-14  1:50 [PATCH bpf-next 0/2] bpf: Narrow loads for bpf_sock_addr.user_port Andrey Ignatov
  2020-05-14  1:50 ` [PATCH bpf-next 1/2] bpf: Support narrow loads from bpf_sock_addr.user_port Andrey Ignatov
  2020-05-14  1:50 ` [PATCH bpf-next 2/2] selftests/bpf: Test narrow loads for bpf_sock_addr.user_port Andrey Ignatov
@ 2020-05-15  1:37 ` Alexei Starovoitov
  2 siblings, 0 replies; 6+ messages in thread
From: Alexei Starovoitov @ 2020-05-15  1:37 UTC (permalink / raw)
  To: Andrey Ignatov; +Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Kernel Team

On Wed, May 13, 2020 at 6:50 PM Andrey Ignatov <rdna@fb.com> wrote:
>
> This patch set adds support for narrow loads from bpf_sock_addr.user_port
> in BPF_PROG_TYPE_CGROUP_SOCK_ADDR program.s
>
> Patch 1 adds narrow loads support for user_port.
> Patch 2 tests it.

Applied. Thanks

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-05-15  1:37 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-14  1:50 [PATCH bpf-next 0/2] bpf: Narrow loads for bpf_sock_addr.user_port Andrey Ignatov
2020-05-14  1:50 ` [PATCH bpf-next 1/2] bpf: Support narrow loads from bpf_sock_addr.user_port Andrey Ignatov
2020-05-14 16:52   ` Yonghong Song
2020-05-14  1:50 ` [PATCH bpf-next 2/2] selftests/bpf: Test narrow loads for bpf_sock_addr.user_port Andrey Ignatov
2020-05-14 17:08   ` Yonghong Song
2020-05-15  1:37 ` [PATCH bpf-next 0/2] bpf: Narrow " Alexei Starovoitov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.