All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX.
@ 2022-08-18  4:23 Kuniyuki Iwashima
  2022-08-18  4:23 ` [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable Kuniyuki Iwashima
                   ` (4 more replies)
  0 siblings, 5 replies; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-18  4:23 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
  Cc: Kuniyuki Iwashima, Kuniyuki Iwashima, bpf, netdev

This series split from [0] fixes data-races around 4 bpf knobs
in net_core_table.

[0]: https://lore.kernel.org/netdev/20220818035227.81567-1-kuniyu@amazon.com/


Kuniyuki Iwashima (4):
  bpf: Fix data-races around bpf_jit_enable.
  bpf: Fix data-races around bpf_jit_harden.
  bpf: Fix data-races around bpf_jit_kallsyms.
  bpf: Fix a data-race around bpf_jit_limit.

 arch/arm/net/bpf_jit_32.c        |  2 +-
 arch/arm64/net/bpf_jit_comp.c    |  2 +-
 arch/mips/net/bpf_jit_comp.c     |  2 +-
 arch/powerpc/net/bpf_jit_comp.c  |  5 +++--
 arch/riscv/net/bpf_jit_core.c    |  2 +-
 arch/s390/net/bpf_jit_comp.c     |  2 +-
 arch/sparc/net/bpf_jit_comp_32.c |  5 +++--
 arch/sparc/net/bpf_jit_comp_64.c |  5 +++--
 arch/x86/net/bpf_jit_comp.c      |  2 +-
 arch/x86/net/bpf_jit_comp32.c    |  2 +-
 include/linux/filter.h           | 16 ++++++++++------
 kernel/bpf/core.c                |  2 +-
 net/core/sysctl_net_core.c       |  4 ++--
 13 files changed, 29 insertions(+), 22 deletions(-)

-- 
2.30.2


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-18  4:23 [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX Kuniyuki Iwashima
@ 2022-08-18  4:23 ` Kuniyuki Iwashima
  2022-08-18 22:49   ` Alexei Starovoitov
  2022-08-18  4:23 ` [PATCH v1 bpf 2/4] bpf: Fix data-races around bpf_jit_harden Kuniyuki Iwashima
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-18  4:23 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
  Cc: Kuniyuki Iwashima, Kuniyuki Iwashima, bpf, netdev

A sysctl variable bpf_jit_enable is accessed concurrently, and there is
always a chance of data-race.  So, all readers and a writer need some
basic protection to avoid load/store-tearing.

Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
---
 arch/arm/net/bpf_jit_32.c        | 2 +-
 arch/arm64/net/bpf_jit_comp.c    | 2 +-
 arch/mips/net/bpf_jit_comp.c     | 2 +-
 arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
 arch/riscv/net/bpf_jit_core.c    | 2 +-
 arch/s390/net/bpf_jit_comp.c     | 2 +-
 arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
 arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
 arch/x86/net/bpf_jit_comp.c      | 2 +-
 arch/x86/net/bpf_jit_comp32.c    | 2 +-
 include/linux/filter.h           | 2 +-
 net/core/sysctl_net_core.c       | 4 ++--
 12 files changed, 19 insertions(+), 16 deletions(-)

diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6a1c9fca5260..4b6b62a6fdd4 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	}
 	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
 
-	if (bpf_jit_enable > 1)
+	if (READ_ONCE(bpf_jit_enable) > 1)
 		/* there are 2 passes here */
 		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
 
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 389623ae5a91..03bb40352d2c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1568,7 +1568,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	}
 
 	/* And we're done. */
-	if (bpf_jit_enable > 1)
+	if (READ_ONCE(bpf_jit_enable) > 1)
 		bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
 
 	bpf_flush_icache(header, ctx.image + ctx.idx);
diff --git a/arch/mips/net/bpf_jit_comp.c b/arch/mips/net/bpf_jit_comp.c
index b17130d510d4..1e623ae7eadf 100644
--- a/arch/mips/net/bpf_jit_comp.c
+++ b/arch/mips/net/bpf_jit_comp.c
@@ -1012,7 +1012,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	flush_icache_range((unsigned long)header,
 			   (unsigned long)&ctx.target[ctx.jit_index]);
 
-	if (bpf_jit_enable > 1)
+	if (READ_ONCE(bpf_jit_enable) > 1)
 		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
 
 	prog->bpf_func = (void *)ctx.target;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 43e634126514..c71d1e94ee7e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -122,6 +122,7 @@ bool bpf_jit_needs_zext(void)
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 {
+	int jit_enable = READ_ONCE(bpf_jit_enable);
 	u32 proglen;
 	u32 alloclen;
 	u8 *image = NULL;
@@ -263,13 +264,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 		}
 		bpf_jit_build_epilogue(code_base, &cgctx);
 
-		if (bpf_jit_enable > 1)
+		if (jit_enable > 1)
 			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
 				proglen - (cgctx.idx * 4), cgctx.seen);
 	}
 
 skip_codegen_passes:
-	if (bpf_jit_enable > 1)
+	if (jit_enable > 1)
 		/*
 		 * Note that we output the base address of the code_base
 		 * rather than image, since opcodes are in code_base.
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 737baf8715da..603b5b66379b 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -151,7 +151,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	}
 	bpf_jit_build_epilogue(ctx);
 
-	if (bpf_jit_enable > 1)
+	if (READ_ONCE(bpf_jit_enable) > 1)
 		bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
 
 	prog->bpf_func = (void *)ctx->insns;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index af35052d06ed..06897a4e9c62 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1831,7 +1831,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 		fp = orig_fp;
 		goto free_addrs;
 	}
-	if (bpf_jit_enable > 1) {
+	if (READ_ONCE(bpf_jit_enable) > 1) {
 		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
 		print_fn_code(jit.prg_buf, jit.size_prg);
 	}
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index b1dbf2fa8c0a..7c454b920250 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -326,13 +326,14 @@ do {	*prog++ = BR_OPC | WDISP22(OFF);		\
 void bpf_jit_compile(struct bpf_prog *fp)
 {
 	unsigned int cleanup_addr, proglen, oldproglen = 0;
+	int jit_enable = READ_ONCE(bpf_jit_enable);
 	u32 temp[8], *prog, *func, seen = 0, pass;
 	const struct sock_filter *filter = fp->insns;
 	int i, flen = fp->len, pc_ret0 = -1;
 	unsigned int *addrs;
 	void *image;
 
-	if (!bpf_jit_enable)
+	if (!jit_enable)
 		return;
 
 	addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL);
@@ -743,7 +744,7 @@ cond_branch:			f_offset = addrs[i + filter[i].jf];
 		oldproglen = proglen;
 	}
 
-	if (bpf_jit_enable > 1)
+	if (jit_enable > 1)
 		bpf_jit_dump(flen, proglen, pass + 1, image);
 
 	if (image) {
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index fa0759bfe498..74cc1fa1f97f 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1479,6 +1479,7 @@ struct sparc64_jit_data {
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
+	int jit_enable = READ_ONCE(bpf_jit_enable);
 	struct bpf_prog *tmp, *orig_prog = prog;
 	struct sparc64_jit_data *jit_data;
 	struct bpf_binary_header *header;
@@ -1549,7 +1550,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		}
 		build_epilogue(&ctx);
 
-		if (bpf_jit_enable > 1)
+		if (jit_enable > 1)
 			pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass,
 				ctx.idx * 4,
 				ctx.tmp_1_used ? '1' : ' ',
@@ -1596,7 +1597,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		goto out_off;
 	}
 
-	if (bpf_jit_enable > 1)
+	if (jit_enable > 1)
 		bpf_jit_dump(prog->len, image_size, pass, ctx.image);
 
 	bpf_flush_icache(header, (u8 *)header + header->size);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index c1f6c1c51d99..a5c7df7cab2a 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -2439,7 +2439,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		cond_resched();
 	}
 
-	if (bpf_jit_enable > 1)
+	if (READ_ONCE(bpf_jit_enable) > 1)
 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
 
 	if (image) {
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 429a89c5468b..745f15a29dd3 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -2597,7 +2597,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		cond_resched();
 	}
 
-	if (bpf_jit_enable > 1)
+	if (READ_ONCE(bpf_jit_enable) > 1)
 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
 
 	if (image) {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a5f21dc3c432..ce8072626ccf 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1080,7 +1080,7 @@ static inline bool bpf_jit_is_ebpf(void)
 
 static inline bool ebpf_jit_enabled(void)
 {
-	return bpf_jit_enable && bpf_jit_is_ebpf();
+	return READ_ONCE(bpf_jit_enable) && bpf_jit_is_ebpf();
 }
 
 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 71a13596ea2b..2ddd153a6ff4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -263,7 +263,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
 					   void *buffer, size_t *lenp,
 					   loff_t *ppos)
 {
-	int ret, jit_enable = *(int *)table->data;
+	int ret, jit_enable = READ_ONCE(*(int *)table->data);
 	int min = *(int *)table->extra1;
 	int max = *(int *)table->extra2;
 	struct ctl_table tmp = *table;
@@ -276,7 +276,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
 	if (write && !ret) {
 		if (jit_enable < 2 ||
 		    (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
-			*(int *)table->data = jit_enable;
+			WRITE_ONCE(*(int *)table->data, jit_enable);
 			if (jit_enable == 2)
 				pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
 		} else {
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 bpf 2/4] bpf: Fix data-races around bpf_jit_harden.
  2022-08-18  4:23 [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX Kuniyuki Iwashima
  2022-08-18  4:23 ` [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable Kuniyuki Iwashima
@ 2022-08-18  4:23 ` Kuniyuki Iwashima
  2022-08-18  4:23 ` [PATCH v1 bpf 3/4] bpf: Fix data-races around bpf_jit_kallsyms Kuniyuki Iwashima
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-18  4:23 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
  Cc: Kuniyuki Iwashima, Kuniyuki Iwashima, bpf, netdev

While reading bpf_jit_harden, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.

Fixes: 4f3446bb809f ("bpf: add generic constant blinding for use in jits")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
---
 include/linux/filter.h | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index ce8072626ccf..09566ad211bd 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1090,6 +1090,8 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
 
 static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
 {
+	int jit_harden = READ_ONCE(bpf_jit_harden);
+
 	/* These are the prerequisites, should someone ever have the
 	 * idea to call blinding outside of them, we make sure to
 	 * bail out.
@@ -1098,9 +1100,9 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
 		return false;
 	if (!prog->jit_requested)
 		return false;
-	if (!bpf_jit_harden)
+	if (!jit_harden)
 		return false;
-	if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+	if (jit_harden == 1 && capable(CAP_SYS_ADMIN))
 		return false;
 
 	return true;
@@ -1111,7 +1113,7 @@ static inline bool bpf_jit_kallsyms_enabled(void)
 	/* There are a couple of corner cases where kallsyms should
 	 * not be enabled f.e. on hardening.
 	 */
-	if (bpf_jit_harden)
+	if (READ_ONCE(bpf_jit_harden))
 		return false;
 	if (!bpf_jit_kallsyms)
 		return false;
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 bpf 3/4] bpf: Fix data-races around bpf_jit_kallsyms.
  2022-08-18  4:23 [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX Kuniyuki Iwashima
  2022-08-18  4:23 ` [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable Kuniyuki Iwashima
  2022-08-18  4:23 ` [PATCH v1 bpf 2/4] bpf: Fix data-races around bpf_jit_harden Kuniyuki Iwashima
@ 2022-08-18  4:23 ` Kuniyuki Iwashima
  2022-08-18  4:23 ` [PATCH v1 bpf 4/4] bpf: Fix a data-race around bpf_jit_limit Kuniyuki Iwashima
  2022-08-24  1:10 ` [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX dongdwdw
  4 siblings, 0 replies; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-18  4:23 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
  Cc: Kuniyuki Iwashima, Kuniyuki Iwashima, bpf, netdev

While reading bpf_jit_kallsyms, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.

Fixes: 74451e66d516 ("bpf: make jited programs visible in traces")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
---
 include/linux/filter.h | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 09566ad211bd..35881fccce05 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1110,14 +1110,16 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
 
 static inline bool bpf_jit_kallsyms_enabled(void)
 {
+	int jit_kallsyms = READ_ONCE(bpf_jit_kallsyms);
+
 	/* There are a couple of corner cases where kallsyms should
 	 * not be enabled f.e. on hardening.
 	 */
 	if (READ_ONCE(bpf_jit_harden))
 		return false;
-	if (!bpf_jit_kallsyms)
+	if (!jit_kallsyms)
 		return false;
-	if (bpf_jit_kallsyms == 1)
+	if (jit_kallsyms == 1)
 		return true;
 
 	return false;
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v1 bpf 4/4] bpf: Fix a data-race around bpf_jit_limit.
  2022-08-18  4:23 [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX Kuniyuki Iwashima
                   ` (2 preceding siblings ...)
  2022-08-18  4:23 ` [PATCH v1 bpf 3/4] bpf: Fix data-races around bpf_jit_kallsyms Kuniyuki Iwashima
@ 2022-08-18  4:23 ` Kuniyuki Iwashima
  2022-08-24  1:10 ` [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX dongdwdw
  4 siblings, 0 replies; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-18  4:23 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
  Cc: Kuniyuki Iwashima, Kuniyuki Iwashima, bpf, netdev

While reading bpf_jit_limit, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its reader.

Fixes: ede95a63b5e8 ("bpf: add bpf_jit_limit knob to restrict unpriv allocations")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
---
 kernel/bpf/core.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c1e10d088dbb..3d9eb3ae334c 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -971,7 +971,7 @@ pure_initcall(bpf_jit_charge_init);
 
 int bpf_jit_charge_modmem(u32 size)
 {
-	if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
+	if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
 		if (!bpf_capable()) {
 			atomic_long_sub(size, &bpf_jit_current);
 			return -EPERM;
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-18  4:23 ` [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable Kuniyuki Iwashima
@ 2022-08-18 22:49   ` Alexei Starovoitov
  2022-08-19  0:06     ` Kuniyuki Iwashima
  0 siblings, 1 reply; 15+ messages in thread
From: Alexei Starovoitov @ 2022-08-18 22:49 UTC (permalink / raw)
  To: Kuniyuki Iwashima
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Kuniyuki Iwashima, bpf, Network Development

On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
>
> A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> always a chance of data-race.  So, all readers and a writer need some
> basic protection to avoid load/store-tearing.
>
> Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> ---
>  arch/arm/net/bpf_jit_32.c        | 2 +-
>  arch/arm64/net/bpf_jit_comp.c    | 2 +-
>  arch/mips/net/bpf_jit_comp.c     | 2 +-
>  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
>  arch/riscv/net/bpf_jit_core.c    | 2 +-
>  arch/s390/net/bpf_jit_comp.c     | 2 +-
>  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
>  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
>  arch/x86/net/bpf_jit_comp.c      | 2 +-
>  arch/x86/net/bpf_jit_comp32.c    | 2 +-
>  include/linux/filter.h           | 2 +-
>  net/core/sysctl_net_core.c       | 4 ++--
>  12 files changed, 19 insertions(+), 16 deletions(-)
>
> diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> index 6a1c9fca5260..4b6b62a6fdd4 100644
> --- a/arch/arm/net/bpf_jit_32.c
> +++ b/arch/arm/net/bpf_jit_32.c
> @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>         }
>         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
>
> -       if (bpf_jit_enable > 1)
> +       if (READ_ONCE(bpf_jit_enable) > 1)

Nack.
Even if the compiler decides to use single byte loads for some
odd reason there is no issue here.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-18 22:49   ` Alexei Starovoitov
@ 2022-08-19  0:06     ` Kuniyuki Iwashima
  2022-08-19  0:13       ` Alexei Starovoitov
  0 siblings, 1 reply; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-19  0:06 UTC (permalink / raw)
  To: alexei.starovoitov; +Cc: andrii, ast, bpf, daniel, kuni1840, kuniyu, netdev

From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
Date:   Thu, 18 Aug 2022 15:49:46 -0700
> On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> >
> > A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> > always a chance of data-race.  So, all readers and a writer need some
> > basic protection to avoid load/store-tearing.
> >
> > Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > ---
> >  arch/arm/net/bpf_jit_32.c        | 2 +-
> >  arch/arm64/net/bpf_jit_comp.c    | 2 +-
> >  arch/mips/net/bpf_jit_comp.c     | 2 +-
> >  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
> >  arch/riscv/net/bpf_jit_core.c    | 2 +-
> >  arch/s390/net/bpf_jit_comp.c     | 2 +-
> >  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
> >  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
> >  arch/x86/net/bpf_jit_comp.c      | 2 +-
> >  arch/x86/net/bpf_jit_comp32.c    | 2 +-
> >  include/linux/filter.h           | 2 +-
> >  net/core/sysctl_net_core.c       | 4 ++--
> >  12 files changed, 19 insertions(+), 16 deletions(-)
> >
> > diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> > index 6a1c9fca5260..4b6b62a6fdd4 100644
> > --- a/arch/arm/net/bpf_jit_32.c
> > +++ b/arch/arm/net/bpf_jit_32.c
> > @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> >         }
> >         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
> >
> > -       if (bpf_jit_enable > 1)
> > +       if (READ_ONCE(bpf_jit_enable) > 1)
> 
> Nack.
> Even if the compiler decides to use single byte loads for some
> odd reason there is no issue here.

I see, and same for 2nd/3rd patches, right?

Then how about this part?
It's not data-race nor problematic in practice, but should the value be
consistent in the same function?
The 2nd/3rd patches also have this kind of part.

---8<---
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 43e634126514..c71d1e94ee7e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -122,6 +122,7 @@ bool bpf_jit_needs_zext(void)
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 {
+	int jit_enable = READ_ONCE(bpf_jit_enable);
 	u32 proglen;
 	u32 alloclen;
 	u8 *image = NULL;
@@ -263,13 +264,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 		}
 		bpf_jit_build_epilogue(code_base, &cgctx);
 
-		if (bpf_jit_enable > 1)
+		if (jit_enable > 1)
 			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
 				proglen - (cgctx.idx * 4), cgctx.seen);
 	}
 
 skip_codegen_passes:
-	if (bpf_jit_enable > 1)
+	if (jit_enable > 1)
 		/*
 		 * Note that we output the base address of the code_base
 		 * rather than image, since opcodes are in code_base.
---8<---

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-19  0:06     ` Kuniyuki Iwashima
@ 2022-08-19  0:13       ` Alexei Starovoitov
  2022-08-19  0:55         ` Kuniyuki Iwashima
  0 siblings, 1 reply; 15+ messages in thread
From: Alexei Starovoitov @ 2022-08-19  0:13 UTC (permalink / raw)
  To: Kuniyuki Iwashima
  Cc: Andrii Nakryiko, Alexei Starovoitov, bpf, Daniel Borkmann,
	Kuniyuki Iwashima, Network Development

On Thu, Aug 18, 2022 at 5:07 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
>
> From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> Date:   Thu, 18 Aug 2022 15:49:46 -0700
> > On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > >
> > > A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> > > always a chance of data-race.  So, all readers and a writer need some
> > > basic protection to avoid load/store-tearing.
> > >
> > > Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > ---
> > >  arch/arm/net/bpf_jit_32.c        | 2 +-
> > >  arch/arm64/net/bpf_jit_comp.c    | 2 +-
> > >  arch/mips/net/bpf_jit_comp.c     | 2 +-
> > >  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
> > >  arch/riscv/net/bpf_jit_core.c    | 2 +-
> > >  arch/s390/net/bpf_jit_comp.c     | 2 +-
> > >  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
> > >  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
> > >  arch/x86/net/bpf_jit_comp.c      | 2 +-
> > >  arch/x86/net/bpf_jit_comp32.c    | 2 +-
> > >  include/linux/filter.h           | 2 +-
> > >  net/core/sysctl_net_core.c       | 4 ++--
> > >  12 files changed, 19 insertions(+), 16 deletions(-)
> > >
> > > diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> > > index 6a1c9fca5260..4b6b62a6fdd4 100644
> > > --- a/arch/arm/net/bpf_jit_32.c
> > > +++ b/arch/arm/net/bpf_jit_32.c
> > > @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > >         }
> > >         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
> > >
> > > -       if (bpf_jit_enable > 1)
> > > +       if (READ_ONCE(bpf_jit_enable) > 1)
> >
> > Nack.
> > Even if the compiler decides to use single byte loads for some
> > odd reason there is no issue here.
>
> I see, and same for 2nd/3rd patches, right?
>
> Then how about this part?
> It's not data-race nor problematic in practice, but should the value be
> consistent in the same function?
> The 2nd/3rd patches also have this kind of part.

The bof_jit_enable > 1 is unsupported and buggy.
It will be removed eventually.

Why are you doing these changes if they're not fixing any bugs ?
Just to shut up some race sanitizer?

> ---8<---
> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
> index 43e634126514..c71d1e94ee7e 100644
> --- a/arch/powerpc/net/bpf_jit_comp.c
> +++ b/arch/powerpc/net/bpf_jit_comp.c
> @@ -122,6 +122,7 @@ bool bpf_jit_needs_zext(void)
>
>  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>  {
> +       int jit_enable = READ_ONCE(bpf_jit_enable);
>         u32 proglen;
>         u32 alloclen;
>         u8 *image = NULL;
> @@ -263,13 +264,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
>                 }
>                 bpf_jit_build_epilogue(code_base, &cgctx);
>
> -               if (bpf_jit_enable > 1)
> +               if (jit_enable > 1)
>                         pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
>                                 proglen - (cgctx.idx * 4), cgctx.seen);
>         }
>
>  skip_codegen_passes:
> -       if (bpf_jit_enable > 1)
> +       if (jit_enable > 1)
>                 /*
>                  * Note that we output the base address of the code_base
>                  * rather than image, since opcodes are in code_base.
> ---8<---

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-19  0:13       ` Alexei Starovoitov
@ 2022-08-19  0:55         ` Kuniyuki Iwashima
  2022-08-19  1:05           ` Alexei Starovoitov
  0 siblings, 1 reply; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-19  0:55 UTC (permalink / raw)
  To: alexei.starovoitov; +Cc: andrii, ast, bpf, daniel, kuni1840, kuniyu, netdev

From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
Date:   Thu, 18 Aug 2022 17:13:25 -0700
> On Thu, Aug 18, 2022 at 5:07 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> >
> > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > Date:   Thu, 18 Aug 2022 15:49:46 -0700
> > > On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > >
> > > > A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> > > > always a chance of data-race.  So, all readers and a writer need some
> > > > basic protection to avoid load/store-tearing.
> > > >
> > > > Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > > ---
> > > >  arch/arm/net/bpf_jit_32.c        | 2 +-
> > > >  arch/arm64/net/bpf_jit_comp.c    | 2 +-
> > > >  arch/mips/net/bpf_jit_comp.c     | 2 +-
> > > >  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
> > > >  arch/riscv/net/bpf_jit_core.c    | 2 +-
> > > >  arch/s390/net/bpf_jit_comp.c     | 2 +-
> > > >  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
> > > >  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
> > > >  arch/x86/net/bpf_jit_comp.c      | 2 +-
> > > >  arch/x86/net/bpf_jit_comp32.c    | 2 +-
> > > >  include/linux/filter.h           | 2 +-
> > > >  net/core/sysctl_net_core.c       | 4 ++--
> > > >  12 files changed, 19 insertions(+), 16 deletions(-)
> > > >
> > > > diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> > > > index 6a1c9fca5260..4b6b62a6fdd4 100644
> > > > --- a/arch/arm/net/bpf_jit_32.c
> > > > +++ b/arch/arm/net/bpf_jit_32.c
> > > > @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > > >         }
> > > >         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
> > > >
> > > > -       if (bpf_jit_enable > 1)
> > > > +       if (READ_ONCE(bpf_jit_enable) > 1)
> > >
> > > Nack.
> > > Even if the compiler decides to use single byte loads for some
> > > odd reason there is no issue here.
> >
> > I see, and same for 2nd/3rd patches, right?
> >
> > Then how about this part?
> > It's not data-race nor problematic in practice, but should the value be
> > consistent in the same function?
> > The 2nd/3rd patches also have this kind of part.
> 
> The bof_jit_enable > 1 is unsupported and buggy.
> It will be removed eventually.

Ok, then I'm fine with no change.

> 
> Why are you doing these changes if they're not fixing any bugs ?
> Just to shut up some race sanitizer?

For data-race, it's one of reason.  I should have made sure the change fixes
an actual bug, my apologies.

For two reads, I feel buggy that there's an inconsitent snapshot.
e.g.) in the 2nd patch, bpf_jit_harden == 0 in bpf_jit_blinding_enabled()
could return true.  Thinking the previous value was 1, it seems to be timing
issue, but not intuitive.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-19  0:55         ` Kuniyuki Iwashima
@ 2022-08-19  1:05           ` Alexei Starovoitov
  2022-08-19  1:15             ` Kuniyuki Iwashima
  0 siblings, 1 reply; 15+ messages in thread
From: Alexei Starovoitov @ 2022-08-19  1:05 UTC (permalink / raw)
  To: Kuniyuki Iwashima
  Cc: Andrii Nakryiko, Alexei Starovoitov, bpf, Daniel Borkmann,
	Kuniyuki Iwashima, Network Development

On Thu, Aug 18, 2022 at 5:56 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
>
> From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> Date:   Thu, 18 Aug 2022 17:13:25 -0700
> > On Thu, Aug 18, 2022 at 5:07 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > >
> > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > Date:   Thu, 18 Aug 2022 15:49:46 -0700
> > > > On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > >
> > > > > A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> > > > > always a chance of data-race.  So, all readers and a writer need some
> > > > > basic protection to avoid load/store-tearing.
> > > > >
> > > > > Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > > > ---
> > > > >  arch/arm/net/bpf_jit_32.c        | 2 +-
> > > > >  arch/arm64/net/bpf_jit_comp.c    | 2 +-
> > > > >  arch/mips/net/bpf_jit_comp.c     | 2 +-
> > > > >  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
> > > > >  arch/riscv/net/bpf_jit_core.c    | 2 +-
> > > > >  arch/s390/net/bpf_jit_comp.c     | 2 +-
> > > > >  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
> > > > >  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
> > > > >  arch/x86/net/bpf_jit_comp.c      | 2 +-
> > > > >  arch/x86/net/bpf_jit_comp32.c    | 2 +-
> > > > >  include/linux/filter.h           | 2 +-
> > > > >  net/core/sysctl_net_core.c       | 4 ++--
> > > > >  12 files changed, 19 insertions(+), 16 deletions(-)
> > > > >
> > > > > diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> > > > > index 6a1c9fca5260..4b6b62a6fdd4 100644
> > > > > --- a/arch/arm/net/bpf_jit_32.c
> > > > > +++ b/arch/arm/net/bpf_jit_32.c
> > > > > @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > > > >         }
> > > > >         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
> > > > >
> > > > > -       if (bpf_jit_enable > 1)
> > > > > +       if (READ_ONCE(bpf_jit_enable) > 1)
> > > >
> > > > Nack.
> > > > Even if the compiler decides to use single byte loads for some
> > > > odd reason there is no issue here.
> > >
> > > I see, and same for 2nd/3rd patches, right?
> > >
> > > Then how about this part?
> > > It's not data-race nor problematic in practice, but should the value be
> > > consistent in the same function?
> > > The 2nd/3rd patches also have this kind of part.
> >
> > The bof_jit_enable > 1 is unsupported and buggy.
> > It will be removed eventually.
>
> Ok, then I'm fine with no change.
>
> >
> > Why are you doing these changes if they're not fixing any bugs ?
> > Just to shut up some race sanitizer?
>
> For data-race, it's one of reason.  I should have made sure the change fixes
> an actual bug, my apologies.
>
> For two reads, I feel buggy that there's an inconsitent snapshot.
> e.g.) in the 2nd patch, bpf_jit_harden == 0 in bpf_jit_blinding_enabled()
> could return true.  Thinking the previous value was 1, it seems to be timing
> issue, but not intuitive.

it's also used in bpf_jit_kallsyms_enabled.
So the patch 2 doesn't make anything 'intuitive'.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-19  1:05           ` Alexei Starovoitov
@ 2022-08-19  1:15             ` Kuniyuki Iwashima
  2022-08-19  3:27               ` Alexei Starovoitov
  0 siblings, 1 reply; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-19  1:15 UTC (permalink / raw)
  To: alexei.starovoitov; +Cc: andrii, ast, bpf, daniel, kuni1840, kuniyu, netdev

From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
Date:   Thu, 18 Aug 2022 18:05:44 -0700
> On Thu, Aug 18, 2022 at 5:56 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> >
> > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > Date:   Thu, 18 Aug 2022 17:13:25 -0700
> > > On Thu, Aug 18, 2022 at 5:07 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > >
> > > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > > Date:   Thu, 18 Aug 2022 15:49:46 -0700
> > > > > On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > > >
> > > > > > A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> > > > > > always a chance of data-race.  So, all readers and a writer need some
> > > > > > basic protection to avoid load/store-tearing.
> > > > > >
> > > > > > Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> > > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > > > > ---
> > > > > >  arch/arm/net/bpf_jit_32.c        | 2 +-
> > > > > >  arch/arm64/net/bpf_jit_comp.c    | 2 +-
> > > > > >  arch/mips/net/bpf_jit_comp.c     | 2 +-
> > > > > >  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
> > > > > >  arch/riscv/net/bpf_jit_core.c    | 2 +-
> > > > > >  arch/s390/net/bpf_jit_comp.c     | 2 +-
> > > > > >  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
> > > > > >  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
> > > > > >  arch/x86/net/bpf_jit_comp.c      | 2 +-
> > > > > >  arch/x86/net/bpf_jit_comp32.c    | 2 +-
> > > > > >  include/linux/filter.h           | 2 +-
> > > > > >  net/core/sysctl_net_core.c       | 4 ++--
> > > > > >  12 files changed, 19 insertions(+), 16 deletions(-)
> > > > > >
> > > > > > diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> > > > > > index 6a1c9fca5260..4b6b62a6fdd4 100644
> > > > > > --- a/arch/arm/net/bpf_jit_32.c
> > > > > > +++ b/arch/arm/net/bpf_jit_32.c
> > > > > > @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > > > > >         }
> > > > > >         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
> > > > > >
> > > > > > -       if (bpf_jit_enable > 1)
> > > > > > +       if (READ_ONCE(bpf_jit_enable) > 1)
> > > > >
> > > > > Nack.
> > > > > Even if the compiler decides to use single byte loads for some
> > > > > odd reason there is no issue here.
> > > >
> > > > I see, and same for 2nd/3rd patches, right?
> > > >
> > > > Then how about this part?
> > > > It's not data-race nor problematic in practice, but should the value be
> > > > consistent in the same function?
> > > > The 2nd/3rd patches also have this kind of part.
> > >
> > > The bof_jit_enable > 1 is unsupported and buggy.
> > > It will be removed eventually.
> >
> > Ok, then I'm fine with no change.
> >
> > >
> > > Why are you doing these changes if they're not fixing any bugs ?
> > > Just to shut up some race sanitizer?
> >
> > For data-race, it's one of reason.  I should have made sure the change fixes
> > an actual bug, my apologies.
> >
> > For two reads, I feel buggy that there's an inconsitent snapshot.
> > e.g.) in the 2nd patch, bpf_jit_harden == 0 in bpf_jit_blinding_enabled()
> > could return true.  Thinking the previous value was 1, it seems to be timing
> > issue, but not intuitive.
> 
> it's also used in bpf_jit_kallsyms_enabled.
> So the patch 2 doesn't make anything 'intuitive'.

Exactly...

So finally, should I repost 4th patch or drop it?

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-19  1:15             ` Kuniyuki Iwashima
@ 2022-08-19  3:27               ` Alexei Starovoitov
  2022-08-19  3:46                 ` Kuniyuki Iwashima
  0 siblings, 1 reply; 15+ messages in thread
From: Alexei Starovoitov @ 2022-08-19  3:27 UTC (permalink / raw)
  To: Kuniyuki Iwashima
  Cc: Andrii Nakryiko, Alexei Starovoitov, bpf, Daniel Borkmann,
	Kuniyuki Iwashima, Network Development

On Thu, Aug 18, 2022 at 6:15 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
>
> From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> Date:   Thu, 18 Aug 2022 18:05:44 -0700
> > On Thu, Aug 18, 2022 at 5:56 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > >
> > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > Date:   Thu, 18 Aug 2022 17:13:25 -0700
> > > > On Thu, Aug 18, 2022 at 5:07 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > >
> > > > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > > > Date:   Thu, 18 Aug 2022 15:49:46 -0700
> > > > > > On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > > > >
> > > > > > > A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> > > > > > > always a chance of data-race.  So, all readers and a writer need some
> > > > > > > basic protection to avoid load/store-tearing.
> > > > > > >
> > > > > > > Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> > > > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > > > > > ---
> > > > > > >  arch/arm/net/bpf_jit_32.c        | 2 +-
> > > > > > >  arch/arm64/net/bpf_jit_comp.c    | 2 +-
> > > > > > >  arch/mips/net/bpf_jit_comp.c     | 2 +-
> > > > > > >  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
> > > > > > >  arch/riscv/net/bpf_jit_core.c    | 2 +-
> > > > > > >  arch/s390/net/bpf_jit_comp.c     | 2 +-
> > > > > > >  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
> > > > > > >  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
> > > > > > >  arch/x86/net/bpf_jit_comp.c      | 2 +-
> > > > > > >  arch/x86/net/bpf_jit_comp32.c    | 2 +-
> > > > > > >  include/linux/filter.h           | 2 +-
> > > > > > >  net/core/sysctl_net_core.c       | 4 ++--
> > > > > > >  12 files changed, 19 insertions(+), 16 deletions(-)
> > > > > > >
> > > > > > > diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> > > > > > > index 6a1c9fca5260..4b6b62a6fdd4 100644
> > > > > > > --- a/arch/arm/net/bpf_jit_32.c
> > > > > > > +++ b/arch/arm/net/bpf_jit_32.c
> > > > > > > @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > > > > > >         }
> > > > > > >         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
> > > > > > >
> > > > > > > -       if (bpf_jit_enable > 1)
> > > > > > > +       if (READ_ONCE(bpf_jit_enable) > 1)
> > > > > >
> > > > > > Nack.
> > > > > > Even if the compiler decides to use single byte loads for some
> > > > > > odd reason there is no issue here.
> > > > >
> > > > > I see, and same for 2nd/3rd patches, right?
> > > > >
> > > > > Then how about this part?
> > > > > It's not data-race nor problematic in practice, but should the value be
> > > > > consistent in the same function?
> > > > > The 2nd/3rd patches also have this kind of part.
> > > >
> > > > The bof_jit_enable > 1 is unsupported and buggy.
> > > > It will be removed eventually.
> > >
> > > Ok, then I'm fine with no change.
> > >
> > > >
> > > > Why are you doing these changes if they're not fixing any bugs ?
> > > > Just to shut up some race sanitizer?
> > >
> > > For data-race, it's one of reason.  I should have made sure the change fixes
> > > an actual bug, my apologies.
> > >
> > > For two reads, I feel buggy that there's an inconsitent snapshot.
> > > e.g.) in the 2nd patch, bpf_jit_harden == 0 in bpf_jit_blinding_enabled()
> > > could return true.  Thinking the previous value was 1, it seems to be timing
> > > issue, but not intuitive.
> >
> > it's also used in bpf_jit_kallsyms_enabled.
> > So the patch 2 doesn't make anything 'intuitive'.
>
> Exactly...
>
> So finally, should I repost 4th patch or drop it?

This?
-       if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
+       if (atomic_long_add_return(size, &bpf_jit_current) >
READ_ONCE(bpf_jit_limit)) {

same question. What does it fix?

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-19  3:27               ` Alexei Starovoitov
@ 2022-08-19  3:46                 ` Kuniyuki Iwashima
  2022-08-19 22:56                   ` Alexei Starovoitov
  0 siblings, 1 reply; 15+ messages in thread
From: Kuniyuki Iwashima @ 2022-08-19  3:46 UTC (permalink / raw)
  To: alexei.starovoitov; +Cc: andrii, ast, bpf, daniel, kuni1840, kuniyu, netdev

From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
Date:   Thu, 18 Aug 2022 20:27:49 -0700
> On Thu, Aug 18, 2022 at 6:15 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> >
> > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > Date:   Thu, 18 Aug 2022 18:05:44 -0700
> > > On Thu, Aug 18, 2022 at 5:56 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > >
> > > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > > Date:   Thu, 18 Aug 2022 17:13:25 -0700
> > > > > On Thu, Aug 18, 2022 at 5:07 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > > >
> > > > > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > > > > Date:   Thu, 18 Aug 2022 15:49:46 -0700
> > > > > > > On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > > > > >
> > > > > > > > A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> > > > > > > > always a chance of data-race.  So, all readers and a writer need some
> > > > > > > > basic protection to avoid load/store-tearing.
> > > > > > > >
> > > > > > > > Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> > > > > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > > > > > > ---
> > > > > > > >  arch/arm/net/bpf_jit_32.c        | 2 +-
> > > > > > > >  arch/arm64/net/bpf_jit_comp.c    | 2 +-
> > > > > > > >  arch/mips/net/bpf_jit_comp.c     | 2 +-
> > > > > > > >  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
> > > > > > > >  arch/riscv/net/bpf_jit_core.c    | 2 +-
> > > > > > > >  arch/s390/net/bpf_jit_comp.c     | 2 +-
> > > > > > > >  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
> > > > > > > >  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
> > > > > > > >  arch/x86/net/bpf_jit_comp.c      | 2 +-
> > > > > > > >  arch/x86/net/bpf_jit_comp32.c    | 2 +-
> > > > > > > >  include/linux/filter.h           | 2 +-
> > > > > > > >  net/core/sysctl_net_core.c       | 4 ++--
> > > > > > > >  12 files changed, 19 insertions(+), 16 deletions(-)
> > > > > > > >
> > > > > > > > diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> > > > > > > > index 6a1c9fca5260..4b6b62a6fdd4 100644
> > > > > > > > --- a/arch/arm/net/bpf_jit_32.c
> > > > > > > > +++ b/arch/arm/net/bpf_jit_32.c
> > > > > > > > @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > > > > > > >         }
> > > > > > > >         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
> > > > > > > >
> > > > > > > > -       if (bpf_jit_enable > 1)
> > > > > > > > +       if (READ_ONCE(bpf_jit_enable) > 1)
> > > > > > >
> > > > > > > Nack.
> > > > > > > Even if the compiler decides to use single byte loads for some
> > > > > > > odd reason there is no issue here.
> > > > > >
> > > > > > I see, and same for 2nd/3rd patches, right?
> > > > > >
> > > > > > Then how about this part?
> > > > > > It's not data-race nor problematic in practice, but should the value be
> > > > > > consistent in the same function?
> > > > > > The 2nd/3rd patches also have this kind of part.
> > > > >
> > > > > The bof_jit_enable > 1 is unsupported and buggy.
> > > > > It will be removed eventually.
> > > >
> > > > Ok, then I'm fine with no change.
> > > >
> > > > >
> > > > > Why are you doing these changes if they're not fixing any bugs ?
> > > > > Just to shut up some race sanitizer?
> > > >
> > > > For data-race, it's one of reason.  I should have made sure the change fixes
> > > > an actual bug, my apologies.
> > > >
> > > > For two reads, I feel buggy that there's an inconsitent snapshot.
> > > > e.g.) in the 2nd patch, bpf_jit_harden == 0 in bpf_jit_blinding_enabled()
> > > > could return true.  Thinking the previous value was 1, it seems to be timing
> > > > issue, but not intuitive.
> > >
> > > it's also used in bpf_jit_kallsyms_enabled.
> > > So the patch 2 doesn't make anything 'intuitive'.
> >
> > Exactly...
> >
> > So finally, should I repost 4th patch or drop it?
> 
> This?
> -       if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
> +       if (atomic_long_add_return(size, &bpf_jit_current) >
> READ_ONCE(bpf_jit_limit)) {
> 
> same question. What does it fix?

Its size is long, and load tearing [0] could occur by compiler
optimisation.  So, concurrent writes & a teared-read could get
a bigger limit than intended.

        write 0xFFFFFFFF00000000
  teared-read 0xFFFFFFFF
        write 0x00000000FFFFFFFF
  teared-read 0xFFFFFFFFFFFFFFFF

[0]: https://lwn.net/Articles/793253/#Load%20Tearing

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable.
  2022-08-19  3:46                 ` Kuniyuki Iwashima
@ 2022-08-19 22:56                   ` Alexei Starovoitov
  0 siblings, 0 replies; 15+ messages in thread
From: Alexei Starovoitov @ 2022-08-19 22:56 UTC (permalink / raw)
  To: Kuniyuki Iwashima
  Cc: Andrii Nakryiko, Alexei Starovoitov, bpf, Daniel Borkmann,
	Kuniyuki Iwashima, Network Development

On Thu, Aug 18, 2022 at 8:46 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
>
> From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> Date:   Thu, 18 Aug 2022 20:27:49 -0700
> > On Thu, Aug 18, 2022 at 6:15 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > >
> > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > Date:   Thu, 18 Aug 2022 18:05:44 -0700
> > > > On Thu, Aug 18, 2022 at 5:56 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > >
> > > > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > > > Date:   Thu, 18 Aug 2022 17:13:25 -0700
> > > > > > On Thu, Aug 18, 2022 at 5:07 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > > > >
> > > > > > > From:   Alexei Starovoitov <alexei.starovoitov@gmail.com>
> > > > > > > Date:   Thu, 18 Aug 2022 15:49:46 -0700
> > > > > > > > On Wed, Aug 17, 2022 at 9:24 PM Kuniyuki Iwashima <kuniyu@amazon.com> wrote:
> > > > > > > > >
> > > > > > > > > A sysctl variable bpf_jit_enable is accessed concurrently, and there is
> > > > > > > > > always a chance of data-race.  So, all readers and a writer need some
> > > > > > > > > basic protection to avoid load/store-tearing.
> > > > > > > > >
> > > > > > > > > Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
> > > > > > > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > > > > > > > ---
> > > > > > > > >  arch/arm/net/bpf_jit_32.c        | 2 +-
> > > > > > > > >  arch/arm64/net/bpf_jit_comp.c    | 2 +-
> > > > > > > > >  arch/mips/net/bpf_jit_comp.c     | 2 +-
> > > > > > > > >  arch/powerpc/net/bpf_jit_comp.c  | 5 +++--
> > > > > > > > >  arch/riscv/net/bpf_jit_core.c    | 2 +-
> > > > > > > > >  arch/s390/net/bpf_jit_comp.c     | 2 +-
> > > > > > > > >  arch/sparc/net/bpf_jit_comp_32.c | 5 +++--
> > > > > > > > >  arch/sparc/net/bpf_jit_comp_64.c | 5 +++--
> > > > > > > > >  arch/x86/net/bpf_jit_comp.c      | 2 +-
> > > > > > > > >  arch/x86/net/bpf_jit_comp32.c    | 2 +-
> > > > > > > > >  include/linux/filter.h           | 2 +-
> > > > > > > > >  net/core/sysctl_net_core.c       | 4 ++--
> > > > > > > > >  12 files changed, 19 insertions(+), 16 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
> > > > > > > > > index 6a1c9fca5260..4b6b62a6fdd4 100644
> > > > > > > > > --- a/arch/arm/net/bpf_jit_32.c
> > > > > > > > > +++ b/arch/arm/net/bpf_jit_32.c
> > > > > > > > > @@ -1999,7 +1999,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > > > > > > > >         }
> > > > > > > > >         flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
> > > > > > > > >
> > > > > > > > > -       if (bpf_jit_enable > 1)
> > > > > > > > > +       if (READ_ONCE(bpf_jit_enable) > 1)
> > > > > > > >
> > > > > > > > Nack.
> > > > > > > > Even if the compiler decides to use single byte loads for some
> > > > > > > > odd reason there is no issue here.
> > > > > > >
> > > > > > > I see, and same for 2nd/3rd patches, right?
> > > > > > >
> > > > > > > Then how about this part?
> > > > > > > It's not data-race nor problematic in practice, but should the value be
> > > > > > > consistent in the same function?
> > > > > > > The 2nd/3rd patches also have this kind of part.
> > > > > >
> > > > > > The bof_jit_enable > 1 is unsupported and buggy.
> > > > > > It will be removed eventually.
> > > > >
> > > > > Ok, then I'm fine with no change.
> > > > >
> > > > > >
> > > > > > Why are you doing these changes if they're not fixing any bugs ?
> > > > > > Just to shut up some race sanitizer?
> > > > >
> > > > > For data-race, it's one of reason.  I should have made sure the change fixes
> > > > > an actual bug, my apologies.
> > > > >
> > > > > For two reads, I feel buggy that there's an inconsitent snapshot.
> > > > > e.g.) in the 2nd patch, bpf_jit_harden == 0 in bpf_jit_blinding_enabled()
> > > > > could return true.  Thinking the previous value was 1, it seems to be timing
> > > > > issue, but not intuitive.
> > > >
> > > > it's also used in bpf_jit_kallsyms_enabled.
> > > > So the patch 2 doesn't make anything 'intuitive'.
> > >
> > > Exactly...
> > >
> > > So finally, should I repost 4th patch or drop it?
> >
> > This?
> > -       if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
> > +       if (atomic_long_add_return(size, &bpf_jit_current) >
> > READ_ONCE(bpf_jit_limit)) {
> >
> > same question. What does it fix?
>
> Its size is long, and load tearing [0] could occur by compiler
> optimisation.  So, concurrent writes & a teared-read could get
> a bigger limit than intended.

'long' indeed. Still improbable, but sure. let's read_once it.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX.
  2022-08-18  4:23 [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX Kuniyuki Iwashima
                   ` (3 preceding siblings ...)
  2022-08-18  4:23 ` [PATCH v1 bpf 4/4] bpf: Fix a data-race around bpf_jit_limit Kuniyuki Iwashima
@ 2022-08-24  1:10 ` dongdwdw
  4 siblings, 0 replies; 15+ messages in thread
From: dongdwdw @ 2022-08-24  1:10 UTC (permalink / raw)
  To: Kuniyuki Iwashima, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
  Cc: Kuniyuki Iwashima, bpf, netdev

This patch set got my attention because I found some issue about 
bpf_jit_harden.
But after reading your patches, I did not get what the problem you are 
trying resolve and what is improved.
You said there is a data race, what is that? Can you please show how 
does this happen? What is the influence?
Would you please let me know these background info?

On 2022/8/18 12:23, Kuniyuki Iwashima wrote:
> This series split from [0] fixes data-races around 4 bpf knobs
> in net_core_table.
>
> [0]: https://lore.kernel.org/netdev/20220818035227.81567-1-kuniyu@amazon.com/
>
>
> Kuniyuki Iwashima (4):
>    bpf: Fix data-races around bpf_jit_enable.
>    bpf: Fix data-races around bpf_jit_harden.
>    bpf: Fix data-races around bpf_jit_kallsyms.
>    bpf: Fix a data-race around bpf_jit_limit.
>
>   arch/arm/net/bpf_jit_32.c        |  2 +-
>   arch/arm64/net/bpf_jit_comp.c    |  2 +-
>   arch/mips/net/bpf_jit_comp.c     |  2 +-
>   arch/powerpc/net/bpf_jit_comp.c  |  5 +++--
>   arch/riscv/net/bpf_jit_core.c    |  2 +-
>   arch/s390/net/bpf_jit_comp.c     |  2 +-
>   arch/sparc/net/bpf_jit_comp_32.c |  5 +++--
>   arch/sparc/net/bpf_jit_comp_64.c |  5 +++--
>   arch/x86/net/bpf_jit_comp.c      |  2 +-
>   arch/x86/net/bpf_jit_comp32.c    |  2 +-
>   include/linux/filter.h           | 16 ++++++++++------
>   kernel/bpf/core.c                |  2 +-
>   net/core/sysctl_net_core.c       |  4 ++--
>   13 files changed, 29 insertions(+), 22 deletions(-)
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2022-08-24  1:12 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-18  4:23 [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX Kuniyuki Iwashima
2022-08-18  4:23 ` [PATCH v1 bpf 1/4] bpf: Fix data-races around bpf_jit_enable Kuniyuki Iwashima
2022-08-18 22:49   ` Alexei Starovoitov
2022-08-19  0:06     ` Kuniyuki Iwashima
2022-08-19  0:13       ` Alexei Starovoitov
2022-08-19  0:55         ` Kuniyuki Iwashima
2022-08-19  1:05           ` Alexei Starovoitov
2022-08-19  1:15             ` Kuniyuki Iwashima
2022-08-19  3:27               ` Alexei Starovoitov
2022-08-19  3:46                 ` Kuniyuki Iwashima
2022-08-19 22:56                   ` Alexei Starovoitov
2022-08-18  4:23 ` [PATCH v1 bpf 2/4] bpf: Fix data-races around bpf_jit_harden Kuniyuki Iwashima
2022-08-18  4:23 ` [PATCH v1 bpf 3/4] bpf: Fix data-races around bpf_jit_kallsyms Kuniyuki Iwashima
2022-08-18  4:23 ` [PATCH v1 bpf 4/4] bpf: Fix a data-race around bpf_jit_limit Kuniyuki Iwashima
2022-08-24  1:10 ` [PATCH v1 bpf 0/4] bpf: sysctl: Fix data-races around net.core.bpf_XXX dongdwdw

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.