bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 bpf-next 0/9] bpf: Misc improvements
@ 2021-02-10  3:36 Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 1/9] bpf: Optimize program stats Alexei Starovoitov
                   ` (9 more replies)
  0 siblings, 10 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

v4:
- split migrate_disable into separate patch

v3:
- address review comments
- improve recursion selftest

Several bpf improvements:
- optimize prog stats
- compute stats for sleepable progs
- prevent recursion fentry/fexit and sleepable progs
- allow map-in-map and per-cpu maps in sleepable progs

Alexei Starovoitov (9):
  bpf: Optimize program stats
  bpf: Run sleepable programs with migration disabled
  bpf: Compute program stats for sleepable programs
  bpf: Add per-program recursion prevention mechanism
  selftest/bpf: Add a recursion test
  bpf: Count the number of times recursion was prevented
  selftests/bpf: Improve recursion selftest
  bpf: Allows per-cpu maps and map-in-map in sleepable programs
  selftests/bpf: Add a test for map-in-map and per-cpu maps in sleepable
    progs

 arch/x86/net/bpf_jit_comp.c                   | 46 ++++++-----
 include/linux/bpf.h                           | 16 +---
 include/linux/filter.h                        | 16 +++-
 include/uapi/linux/bpf.h                      |  1 +
 kernel/bpf/core.c                             | 16 +++-
 kernel/bpf/hashtab.c                          |  4 +-
 kernel/bpf/syscall.c                          | 16 ++--
 kernel/bpf/trampoline.c                       | 77 +++++++++++++++----
 kernel/bpf/verifier.c                         |  9 ++-
 tools/bpf/bpftool/prog.c                      |  4 +
 tools/include/uapi/linux/bpf.h                |  1 +
 .../selftests/bpf/prog_tests/fexit_stress.c   |  4 +-
 .../selftests/bpf/prog_tests/recursion.c      | 41 ++++++++++
 .../bpf/prog_tests/trampoline_count.c         |  4 +-
 tools/testing/selftests/bpf/progs/lsm.c       | 69 +++++++++++++++++
 tools/testing/selftests/bpf/progs/recursion.c | 46 +++++++++++
 16 files changed, 303 insertions(+), 67 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/recursion.c
 create mode 100644 tools/testing/selftests/bpf/progs/recursion.c

-- 
2.24.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 1/9] bpf: Optimize program stats
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-12  3:26   ` Ilya Leoshkevich
  2021-02-10  3:36 ` [PATCH v4 bpf-next 2/9] bpf: Run sleepable programs with migration disabled Alexei Starovoitov
                   ` (8 subsequent siblings)
  9 siblings, 1 reply; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Move bpf_prog_stats from prog->aux into prog to avoid one extra load
in critical path of program execution.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
---
 include/linux/bpf.h     |  8 --------
 include/linux/filter.h  | 14 +++++++++++---
 kernel/bpf/core.c       |  8 ++++----
 kernel/bpf/syscall.c    |  2 +-
 kernel/bpf/trampoline.c |  2 +-
 kernel/bpf/verifier.c   |  2 +-
 6 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 321966fc35db..026fa8873c5d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -14,7 +14,6 @@
 #include <linux/numa.h>
 #include <linux/mm_types.h>
 #include <linux/wait.h>
-#include <linux/u64_stats_sync.h>
 #include <linux/refcount.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
@@ -507,12 +506,6 @@ enum bpf_cgroup_storage_type {
  */
 #define MAX_BPF_FUNC_ARGS 12
 
-struct bpf_prog_stats {
-	u64 cnt;
-	u64 nsecs;
-	struct u64_stats_sync syncp;
-} __aligned(2 * sizeof(u64));
-
 struct btf_func_model {
 	u8 ret_size;
 	u8 nr_args;
@@ -845,7 +838,6 @@ struct bpf_prog_aux {
 	u32 linfo_idx;
 	u32 num_exentries;
 	struct exception_table_entry *extable;
-	struct bpf_prog_stats __percpu *stats;
 	union {
 		struct work_struct work;
 		struct rcu_head	rcu;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 5b3137d7b690..cecb03c9d251 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -22,6 +22,7 @@
 #include <linux/vmalloc.h>
 #include <linux/sockptr.h>
 #include <crypto/sha1.h>
+#include <linux/u64_stats_sync.h>
 
 #include <net/sch_generic.h>
 
@@ -539,6 +540,12 @@ struct bpf_binary_header {
 	u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
 };
 
+struct bpf_prog_stats {
+	u64 cnt;
+	u64 nsecs;
+	struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
+
 struct bpf_prog {
 	u16			pages;		/* Number of allocated pages */
 	u16			jited:1,	/* Is our filter JIT'ed? */
@@ -557,10 +564,11 @@ struct bpf_prog {
 	u32			len;		/* Number of filter blocks */
 	u32			jited_len;	/* Size of jited insns in bytes */
 	u8			tag[BPF_TAG_SIZE];
-	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
-	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
+	struct bpf_prog_stats __percpu *stats;
 	unsigned int		(*bpf_func)(const void *ctx,
 					    const struct bpf_insn *insn);
+	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
+	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
 	/* Instructions for interpreter */
 	struct sock_filter	insns[0];
 	struct bpf_insn		insnsi[];
@@ -581,7 +589,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 		struct bpf_prog_stats *__stats;				\
 		u64 __start = sched_clock();				\
 		__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);	\
-		__stats = this_cpu_ptr(prog->aux->stats);		\
+		__stats = this_cpu_ptr(prog->stats);			\
 		u64_stats_update_begin(&__stats->syncp);		\
 		__stats->cnt++;						\
 		__stats->nsecs += sched_clock() - __start;		\
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5bbd4884ff7a..2cf71fd39c22 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -114,8 +114,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 	if (!prog)
 		return NULL;
 
-	prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
-	if (!prog->aux->stats) {
+	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
+	if (!prog->stats) {
 		kfree(prog->aux);
 		vfree(prog);
 		return NULL;
@@ -124,7 +124,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 	for_each_possible_cpu(cpu) {
 		struct bpf_prog_stats *pstats;
 
-		pstats = per_cpu_ptr(prog->aux->stats, cpu);
+		pstats = per_cpu_ptr(prog->stats, cpu);
 		u64_stats_init(&pstats->syncp);
 	}
 	return prog;
@@ -249,10 +249,10 @@ void __bpf_prog_free(struct bpf_prog *fp)
 	if (fp->aux) {
 		mutex_destroy(&fp->aux->used_maps_mutex);
 		mutex_destroy(&fp->aux->dst_mutex);
-		free_percpu(fp->aux->stats);
 		kfree(fp->aux->poke_tab);
 		kfree(fp->aux);
 	}
+	free_percpu(fp->stats);
 	vfree(fp);
 }
 
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e5999d86c76e..f7df56a704de 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1739,7 +1739,7 @@ static void bpf_prog_get_stats(const struct bpf_prog *prog,
 		unsigned int start;
 		u64 tnsecs, tcnt;
 
-		st = per_cpu_ptr(prog->aux->stats, cpu);
+		st = per_cpu_ptr(prog->stats, cpu);
 		do {
 			start = u64_stats_fetch_begin_irq(&st->syncp);
 			tnsecs = st->nsecs;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 35c5887d82ff..5be3beeedd74 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -412,7 +412,7 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
 	     * Hence check that 'start' is not zero.
 	     */
 	    start) {
-		stats = this_cpu_ptr(prog->aux->stats);
+		stats = this_cpu_ptr(prog->stats);
 		u64_stats_update_begin(&stats->syncp);
 		stats->cnt++;
 		stats->nsecs += sched_clock() - start;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 15694246f854..4189edb41b73 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -10889,7 +10889,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
 		/* BPF_PROG_RUN doesn't call subprogs directly,
 		 * hence main prog stats include the runtime of subprogs.
 		 * subprogs don't have IDs and not reachable via prog_get_next_id
-		 * func[i]->aux->stats will never be accessed and stays NULL
+		 * func[i]->stats will never be accessed and stays NULL
 		 */
 		func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
 		if (!func[i])
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 2/9] bpf: Run sleepable programs with migration disabled
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 1/9] bpf: Optimize program stats Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 3/9] bpf: Compute program stats for sleepable programs Alexei Starovoitov
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

In older non-RT kernels migrate_disable() was the same as preempt_disable().
Since commit 74d862b682f5 ("sched: Make migrate_disable/enable() independent of RT")
migrate_disable() is real and doesn't prevent sleeping.
Running sleepable programs with migration disabled allows to add support for
program stats and per-cpu maps later.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: KP Singh <kpsingh@kernel.org>
---
 kernel/bpf/trampoline.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 5be3beeedd74..89fc849ba271 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -425,11 +425,13 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
 void notrace __bpf_prog_enter_sleepable(void)
 {
 	rcu_read_lock_trace();
+	migrate_disable();
 	might_fault();
 }
 
 void notrace __bpf_prog_exit_sleepable(void)
 {
+	migrate_enable();
 	rcu_read_unlock_trace();
 }
 
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 3/9] bpf: Compute program stats for sleepable programs
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 1/9] bpf: Optimize program stats Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 2/9] bpf: Run sleepable programs with migration disabled Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 4/9] bpf: Add per-program recursion prevention mechanism Alexei Starovoitov
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Since sleepable programs don't migrate from the cpu the excution stats can be
computed for them as well. Reuse the same infrastructure for both sleepable and
non-sleepable programs.
run_cnt     -> the number of times the program was executed.
run_time_ns -> the program execution time in nanoseconds including the
               off-cpu time when the program was sleeping.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: KP Singh <kpsingh@kernel.org>
---
 arch/x86/net/bpf_jit_comp.c | 31 +++++++++++----------------
 include/linux/bpf.h         |  4 ++--
 kernel/bpf/trampoline.c     | 42 ++++++++++++++++++++++++-------------
 3 files changed, 42 insertions(+), 35 deletions(-)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a3dc3bd154ac..d11b9bcebbea 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1742,15 +1742,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
 	u8 *prog = *pprog;
 	int cnt = 0;
 
-	if (p->aux->sleepable) {
-		if (emit_call(&prog, __bpf_prog_enter_sleepable, prog))
+	if (emit_call(&prog,
+		      p->aux->sleepable ? __bpf_prog_enter_sleepable :
+		      __bpf_prog_enter, prog))
 			return -EINVAL;
-	} else {
-		if (emit_call(&prog, __bpf_prog_enter, prog))
-			return -EINVAL;
-		/* remember prog start time returned by __bpf_prog_enter */
-		emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
-	}
+	/* remember prog start time returned by __bpf_prog_enter */
+	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
 
 	/* arg1: lea rdi, [rbp - stack_size] */
 	EMIT4(0x48, 0x8D, 0x7D, -stack_size);
@@ -1770,18 +1767,14 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
 	if (mod_ret)
 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
 
-	if (p->aux->sleepable) {
-		if (emit_call(&prog, __bpf_prog_exit_sleepable, prog))
+	/* arg1: mov rdi, progs[i] */
+	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
+	/* arg2: mov rsi, rbx <- start time in nsec */
+	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
+	if (emit_call(&prog,
+		      p->aux->sleepable ? __bpf_prog_exit_sleepable :
+		      __bpf_prog_exit, prog))
 			return -EINVAL;
-	} else {
-		/* arg1: mov rdi, progs[i] */
-		emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
-			       (u32) (long) p);
-		/* arg2: mov rsi, rbx <- start time in nsec */
-		emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
-		if (emit_call(&prog, __bpf_prog_exit, prog))
-			return -EINVAL;
-	}
 
 	*pprog = prog;
 	return 0;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 026fa8873c5d..2fa48439ef31 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -563,8 +563,8 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
 /* these two functions are called from generated trampoline */
 u64 notrace __bpf_prog_enter(void);
 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
-void notrace __bpf_prog_enter_sleepable(void);
-void notrace __bpf_prog_exit_sleepable(void);
+u64 notrace __bpf_prog_enter_sleepable(void);
+void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
 
 struct bpf_ksym {
 	unsigned long		 start;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 89fc849ba271..48eb021e1421 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -381,56 +381,70 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
 	mutex_unlock(&trampoline_mutex);
 }
 
+#define NO_START_TIME 0
+static u64 notrace bpf_prog_start_time(void)
+{
+	u64 start = NO_START_TIME;
+
+	if (static_branch_unlikely(&bpf_stats_enabled_key))
+		start = sched_clock();
+	return start;
+}
+
 /* The logic is similar to BPF_PROG_RUN, but with an explicit
  * rcu_read_lock() and migrate_disable() which are required
  * for the trampoline. The macro is split into
- * call _bpf_prog_enter
+ * call __bpf_prog_enter
  * call prog->bpf_func
  * call __bpf_prog_exit
  */
 u64 notrace __bpf_prog_enter(void)
 	__acquires(RCU)
 {
-	u64 start = 0;
-
 	rcu_read_lock();
 	migrate_disable();
-	if (static_branch_unlikely(&bpf_stats_enabled_key))
-		start = sched_clock();
-	return start;
+	return bpf_prog_start_time();
 }
 
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
-	__releases(RCU)
+static void notrace update_prog_stats(struct bpf_prog *prog,
+				      u64 start)
 {
 	struct bpf_prog_stats *stats;
 
 	if (static_branch_unlikely(&bpf_stats_enabled_key) &&
-	    /* static_key could be enabled in __bpf_prog_enter
-	     * and disabled in __bpf_prog_exit.
+	    /* static_key could be enabled in __bpf_prog_enter*
+	     * and disabled in __bpf_prog_exit*.
 	     * And vice versa.
-	     * Hence check that 'start' is not zero.
+	     * Hence check that 'start' is valid.
 	     */
-	    start) {
+	    start > NO_START_TIME) {
 		stats = this_cpu_ptr(prog->stats);
 		u64_stats_update_begin(&stats->syncp);
 		stats->cnt++;
 		stats->nsecs += sched_clock() - start;
 		u64_stats_update_end(&stats->syncp);
 	}
+}
+
+void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
+	__releases(RCU)
+{
+	update_prog_stats(prog, start);
 	migrate_enable();
 	rcu_read_unlock();
 }
 
-void notrace __bpf_prog_enter_sleepable(void)
+u64 notrace __bpf_prog_enter_sleepable(void)
 {
 	rcu_read_lock_trace();
 	migrate_disable();
 	might_fault();
+	return bpf_prog_start_time();
 }
 
-void notrace __bpf_prog_exit_sleepable(void)
+void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
 {
+	update_prog_stats(prog, start);
 	migrate_enable();
 	rcu_read_unlock_trace();
 }
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 4/9] bpf: Add per-program recursion prevention mechanism
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
                   ` (2 preceding siblings ...)
  2021-02-10  3:36 ` [PATCH v4 bpf-next 3/9] bpf: Compute program stats for sleepable programs Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 5/9] selftest/bpf: Add a recursion test Alexei Starovoitov
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Since both sleepable and non-sleepable programs execute under migrate_disable
add recursion prevention mechanism to both types of programs when they're
executed via bpf trampoline.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
---
 arch/x86/net/bpf_jit_comp.c                   | 15 ++++++++++++
 include/linux/bpf.h                           |  6 ++---
 include/linux/filter.h                        |  1 +
 kernel/bpf/core.c                             |  8 +++++++
 kernel/bpf/trampoline.c                       | 23 +++++++++++++++----
 .../selftests/bpf/prog_tests/fexit_stress.c   |  4 ++--
 .../bpf/prog_tests/trampoline_count.c         |  4 ++--
 7 files changed, 50 insertions(+), 11 deletions(-)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d11b9bcebbea..79e7a0ec1da5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1740,8 +1740,11 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
 			   struct bpf_prog *p, int stack_size, bool mod_ret)
 {
 	u8 *prog = *pprog;
+	u8 *jmp_insn;
 	int cnt = 0;
 
+	/* arg1: mov rdi, progs[i] */
+	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
 	if (emit_call(&prog,
 		      p->aux->sleepable ? __bpf_prog_enter_sleepable :
 		      __bpf_prog_enter, prog))
@@ -1749,6 +1752,14 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
 	/* remember prog start time returned by __bpf_prog_enter */
 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
 
+	/* if (__bpf_prog_enter*(prog) == 0)
+	 *	goto skip_exec_of_prog;
+	 */
+	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
+	/* emit 2 nops that will be replaced with JE insn */
+	jmp_insn = prog;
+	emit_nops(&prog, 2);
+
 	/* arg1: lea rdi, [rbp - stack_size] */
 	EMIT4(0x48, 0x8D, 0x7D, -stack_size);
 	/* arg2: progs[i]->insnsi for interpreter */
@@ -1767,6 +1778,10 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
 	if (mod_ret)
 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
 
+	/* replace 2 nops with JE insn, since jmp target is known */
+	jmp_insn[0] = X86_JE;
+	jmp_insn[1] = prog - jmp_insn - 2;
+
 	/* arg1: mov rdi, progs[i] */
 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
 	/* arg2: mov rsi, rbx <- start time in nsec */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 2fa48439ef31..6f019b06a2fd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -529,7 +529,7 @@ struct btf_func_model {
 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
  * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
  */
-#define BPF_MAX_TRAMP_PROGS 40
+#define BPF_MAX_TRAMP_PROGS 38
 
 struct bpf_tramp_progs {
 	struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
@@ -561,9 +561,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
 				struct bpf_tramp_progs *tprogs,
 				void *orig_call);
 /* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(void);
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
-u64 notrace __bpf_prog_enter_sleepable(void);
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
 
 struct bpf_ksym {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index cecb03c9d251..6a06f3c69f4e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -565,6 +565,7 @@ struct bpf_prog {
 	u32			jited_len;	/* Size of jited insns in bytes */
 	u8			tag[BPF_TAG_SIZE];
 	struct bpf_prog_stats __percpu *stats;
+	int __percpu		*active;
 	unsigned int		(*bpf_func)(const void *ctx,
 					    const struct bpf_insn *insn);
 	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 2cf71fd39c22..334070c4b8a1 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -91,6 +91,12 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
 		vfree(fp);
 		return NULL;
 	}
+	fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
+	if (!fp->active) {
+		vfree(fp);
+		kfree(aux);
+		return NULL;
+	}
 
 	fp->pages = size / PAGE_SIZE;
 	fp->aux = aux;
@@ -116,6 +122,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 
 	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 	if (!prog->stats) {
+		free_percpu(prog->active);
 		kfree(prog->aux);
 		vfree(prog);
 		return NULL;
@@ -253,6 +260,7 @@ void __bpf_prog_free(struct bpf_prog *fp)
 		kfree(fp->aux);
 	}
 	free_percpu(fp->stats);
+	free_percpu(fp->active);
 	vfree(fp);
 }
 
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 48eb021e1421..89ef6320d19b 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -381,13 +381,16 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
 	mutex_unlock(&trampoline_mutex);
 }
 
-#define NO_START_TIME 0
+#define NO_START_TIME 1
 static u64 notrace bpf_prog_start_time(void)
 {
 	u64 start = NO_START_TIME;
 
-	if (static_branch_unlikely(&bpf_stats_enabled_key))
+	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
 		start = sched_clock();
+		if (unlikely(!start))
+			start = NO_START_TIME;
+	}
 	return start;
 }
 
@@ -397,12 +400,20 @@ static u64 notrace bpf_prog_start_time(void)
  * call __bpf_prog_enter
  * call prog->bpf_func
  * call __bpf_prog_exit
+ *
+ * __bpf_prog_enter returns:
+ * 0 - skip execution of the bpf prog
+ * 1 - execute bpf prog
+ * [2..MAX_U64] - excute bpf prog and record execution time.
+ *     This is start time.
  */
-u64 notrace __bpf_prog_enter(void)
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
 	__acquires(RCU)
 {
 	rcu_read_lock();
 	migrate_disable();
+	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+		return 0;
 	return bpf_prog_start_time();
 }
 
@@ -430,21 +441,25 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
 	__releases(RCU)
 {
 	update_prog_stats(prog, start);
+	__this_cpu_dec(*(prog->active));
 	migrate_enable();
 	rcu_read_unlock();
 }
 
-u64 notrace __bpf_prog_enter_sleepable(void)
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
 {
 	rcu_read_lock_trace();
 	migrate_disable();
 	might_fault();
+	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+		return 0;
 	return bpf_prog_start_time();
 }
 
 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
 {
 	update_prog_stats(prog, start);
+	__this_cpu_dec(*(prog->active));
 	migrate_enable();
 	rcu_read_unlock_trace();
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 3b9dbf7433f0..7c9b62e971f1 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -2,8 +2,8 @@
 /* Copyright (c) 2019 Facebook */
 #include <test_progs.h>
 
-/* x86-64 fits 55 JITed and 43 interpreted progs into half page */
-#define CNT 40
+/* that's kernel internal BPF_MAX_TRAMP_PROGS define */
+#define CNT 38
 
 void test_fexit_stress(void)
 {
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 781c8d11604b..f3022d934e2d 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -4,7 +4,7 @@
 #include <sys/prctl.h>
 #include <test_progs.h>
 
-#define MAX_TRAMP_PROGS 40
+#define MAX_TRAMP_PROGS 38
 
 struct inst {
 	struct bpf_object *obj;
@@ -52,7 +52,7 @@ void test_trampoline_count(void)
 	struct bpf_link *link;
 	char comm[16] = {};
 
-	/* attach 'allowed' 40 trampoline programs */
+	/* attach 'allowed' trampoline programs */
 	for (i = 0; i < MAX_TRAMP_PROGS; i++) {
 		obj = bpf_object__open_file(object, NULL);
 		if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 5/9] selftest/bpf: Add a recursion test
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
                   ` (3 preceding siblings ...)
  2021-02-10  3:36 ` [PATCH v4 bpf-next 4/9] bpf: Add per-program recursion prevention mechanism Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 6/9] bpf: Count the number of times recursion was prevented Alexei Starovoitov
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Add recursive non-sleepable fentry program as a test.
All attach points where sleepable progs can execute are non recursive so far.
The recursion protection mechanism for sleepable cannot be activated yet.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
---
 .../selftests/bpf/prog_tests/recursion.c      | 33 +++++++++++++
 tools/testing/selftests/bpf/progs/recursion.c | 46 +++++++++++++++++++
 2 files changed, 79 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/recursion.c
 create mode 100644 tools/testing/selftests/bpf/progs/recursion.c

diff --git a/tools/testing/selftests/bpf/prog_tests/recursion.c b/tools/testing/selftests/bpf/prog_tests/recursion.c
new file mode 100644
index 000000000000..863757461e3f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/recursion.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include "recursion.skel.h"
+
+void test_recursion(void)
+{
+	struct recursion *skel;
+	int key = 0;
+	int err;
+
+	skel = recursion__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+		return;
+
+	err = recursion__attach(skel);
+	if (!ASSERT_OK(err, "skel_attach"))
+		goto out;
+
+	ASSERT_EQ(skel->bss->pass1, 0, "pass1 == 0");
+	bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash1), &key, 0);
+	ASSERT_EQ(skel->bss->pass1, 1, "pass1 == 1");
+	bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash1), &key, 0);
+	ASSERT_EQ(skel->bss->pass1, 2, "pass1 == 2");
+
+	ASSERT_EQ(skel->bss->pass2, 0, "pass2 == 0");
+	bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash2), &key, 0);
+	ASSERT_EQ(skel->bss->pass2, 1, "pass2 == 1");
+	bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash2), &key, 0);
+	ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2");
+out:
+	recursion__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/recursion.c b/tools/testing/selftests/bpf/progs/recursion.c
new file mode 100644
index 000000000000..49f679375b9d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/recursion.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 1);
+	__type(key, int);
+	__type(value, long);
+} hash1 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 1);
+	__type(key, int);
+	__type(value, long);
+} hash2 SEC(".maps");
+
+int pass1 = 0;
+int pass2 = 0;
+
+SEC("fentry/__htab_map_lookup_elem")
+int BPF_PROG(on_lookup, struct bpf_map *map)
+{
+	int key = 0;
+
+	if (map == (void *)&hash1) {
+		pass1++;
+		return 0;
+	}
+	if (map == (void *)&hash2) {
+		pass2++;
+		/* htab_map_gen_lookup() will inline below call
+		 * into direct call to __htab_map_lookup_elem()
+		 */
+		bpf_map_lookup_elem(&hash2, &key);
+		return 0;
+	}
+
+	return 0;
+}
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 6/9] bpf: Count the number of times recursion was prevented
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
                   ` (4 preceding siblings ...)
  2021-02-10  3:36 ` [PATCH v4 bpf-next 5/9] selftest/bpf: Add a recursion test Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 7/9] selftests/bpf: Improve recursion selftest Alexei Starovoitov
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Add per-program counter for number of times recursion prevention mechanism
was triggered and expose it via show_fdinfo and bpf_prog_info.
Teach bpftool to print it.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
---
 include/linux/filter.h         |  1 +
 include/uapi/linux/bpf.h       |  1 +
 kernel/bpf/syscall.c           | 14 ++++++++++----
 kernel/bpf/trampoline.c        | 18 ++++++++++++++++--
 tools/bpf/bpftool/prog.c       |  4 ++++
 tools/include/uapi/linux/bpf.h |  1 +
 6 files changed, 33 insertions(+), 6 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6a06f3c69f4e..3b00fc906ccd 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -543,6 +543,7 @@ struct bpf_binary_header {
 struct bpf_prog_stats {
 	u64 cnt;
 	u64 nsecs;
+	u64 misses;
 	struct u64_stats_sync syncp;
 } __aligned(2 * sizeof(u64));
 
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c001766adcbc..c547ad1ffe43 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4501,6 +4501,7 @@ struct bpf_prog_info {
 	__aligned_u64 prog_tags;
 	__u64 run_time_ns;
 	__u64 run_cnt;
+	__u64 recursion_misses;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index f7df56a704de..c859bc46d06c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1731,25 +1731,28 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
 static void bpf_prog_get_stats(const struct bpf_prog *prog,
 			       struct bpf_prog_stats *stats)
 {
-	u64 nsecs = 0, cnt = 0;
+	u64 nsecs = 0, cnt = 0, misses = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
 		const struct bpf_prog_stats *st;
 		unsigned int start;
-		u64 tnsecs, tcnt;
+		u64 tnsecs, tcnt, tmisses;
 
 		st = per_cpu_ptr(prog->stats, cpu);
 		do {
 			start = u64_stats_fetch_begin_irq(&st->syncp);
 			tnsecs = st->nsecs;
 			tcnt = st->cnt;
+			tmisses = st->misses;
 		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
 		nsecs += tnsecs;
 		cnt += tcnt;
+		misses += tmisses;
 	}
 	stats->nsecs = nsecs;
 	stats->cnt = cnt;
+	stats->misses = misses;
 }
 
 #ifdef CONFIG_PROC_FS
@@ -1768,14 +1771,16 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
 		   "memlock:\t%llu\n"
 		   "prog_id:\t%u\n"
 		   "run_time_ns:\t%llu\n"
-		   "run_cnt:\t%llu\n",
+		   "run_cnt:\t%llu\n"
+		   "recursion_misses:\t%llu\n",
 		   prog->type,
 		   prog->jited,
 		   prog_tag,
 		   prog->pages * 1ULL << PAGE_SHIFT,
 		   prog->aux->id,
 		   stats.nsecs,
-		   stats.cnt);
+		   stats.cnt,
+		   stats.misses);
 }
 #endif
 
@@ -3438,6 +3443,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
 	bpf_prog_get_stats(prog, &stats);
 	info.run_time_ns = stats.nsecs;
 	info.run_cnt = stats.cnt;
+	info.recursion_misses = stats.misses;
 
 	if (!bpf_capable()) {
 		info.jited_prog_len = 0;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 89ef6320d19b..7bc3b3209224 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -394,6 +394,16 @@ static u64 notrace bpf_prog_start_time(void)
 	return start;
 }
 
+static void notrace inc_misses_counter(struct bpf_prog *prog)
+{
+	struct bpf_prog_stats *stats;
+
+	stats = this_cpu_ptr(prog->stats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->misses++;
+	u64_stats_update_end(&stats->syncp);
+}
+
 /* The logic is similar to BPF_PROG_RUN, but with an explicit
  * rcu_read_lock() and migrate_disable() which are required
  * for the trampoline. The macro is split into
@@ -412,8 +422,10 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
 {
 	rcu_read_lock();
 	migrate_disable();
-	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+		inc_misses_counter(prog);
 		return 0;
+	}
 	return bpf_prog_start_time();
 }
 
@@ -451,8 +463,10 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
 	rcu_read_lock_trace();
 	migrate_disable();
 	might_fault();
-	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+		inc_misses_counter(prog);
 		return 0;
+	}
 	return bpf_prog_start_time();
 }
 
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 1fe3ba255bad..f2b915b20546 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -368,6 +368,8 @@ static void print_prog_header_json(struct bpf_prog_info *info)
 		jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
 		jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
 	}
+	if (info->recursion_misses)
+		jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
 }
 
 static void print_prog_json(struct bpf_prog_info *info, int fd)
@@ -446,6 +448,8 @@ static void print_prog_header_plain(struct bpf_prog_info *info)
 	if (info->run_time_ns)
 		printf(" run_time_ns %lld run_cnt %lld",
 		       info->run_time_ns, info->run_cnt);
+	if (info->recursion_misses)
+		printf(" recursion_misses %lld", info->recursion_misses);
 	printf("\n");
 }
 
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index c001766adcbc..c547ad1ffe43 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4501,6 +4501,7 @@ struct bpf_prog_info {
 	__aligned_u64 prog_tags;
 	__u64 run_time_ns;
 	__u64 run_cnt;
+	__u64 recursion_misses;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 7/9] selftests/bpf: Improve recursion selftest
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
                   ` (5 preceding siblings ...)
  2021-02-10  3:36 ` [PATCH v4 bpf-next 6/9] bpf: Count the number of times recursion was prevented Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 8/9] bpf: Allows per-cpu maps and map-in-map in sleepable programs Alexei Starovoitov
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Since recursion_misses counter is available in bpf_prog_info
improve the selftest to make sure it's counting correctly.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 tools/testing/selftests/bpf/prog_tests/recursion.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/recursion.c b/tools/testing/selftests/bpf/prog_tests/recursion.c
index 863757461e3f..0e378d63fe18 100644
--- a/tools/testing/selftests/bpf/prog_tests/recursion.c
+++ b/tools/testing/selftests/bpf/prog_tests/recursion.c
@@ -5,6 +5,8 @@
 
 void test_recursion(void)
 {
+	struct bpf_prog_info prog_info = {};
+	__u32 prog_info_len = sizeof(prog_info);
 	struct recursion *skel;
 	int key = 0;
 	int err;
@@ -28,6 +30,12 @@ void test_recursion(void)
 	ASSERT_EQ(skel->bss->pass2, 1, "pass2 == 1");
 	bpf_map_lookup_elem(bpf_map__fd(skel->maps.hash2), &key, 0);
 	ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2");
+
+	err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_lookup),
+				     &prog_info, &prog_info_len);
+	if (!ASSERT_OK(err, "get_prog_info"))
+		goto out;
+	ASSERT_EQ(prog_info.recursion_misses, 2, "recursion_misses");
 out:
 	recursion__destroy(skel);
 }
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 8/9] bpf: Allows per-cpu maps and map-in-map in sleepable programs
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
                   ` (6 preceding siblings ...)
  2021-02-10  3:36 ` [PATCH v4 bpf-next 7/9] selftests/bpf: Improve recursion selftest Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-10  3:36 ` [PATCH v4 bpf-next 9/9] selftests/bpf: Add a test for map-in-map and per-cpu maps in sleepable progs Alexei Starovoitov
  2021-02-11 15:30 ` [PATCH v4 bpf-next 0/9] bpf: Misc improvements patchwork-bot+netdevbpf
  9 siblings, 0 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Since sleepable programs are now executing under migrate_disable
the per-cpu maps are safe to use.
The map-in-map were ok to use in sleepable from the time sleepable
progs were introduced.

Note that non-preallocated maps are still not safe, since there is
no rcu_read_lock yet in sleepable programs and dynamically allocated
map elements are relying on rcu protection. The sleepable programs
have rcu_read_lock_trace instead. That limitation will be addresses
in the future.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: KP Singh <kpsingh@kernel.org>
---
 kernel/bpf/hashtab.c  | 4 ++--
 kernel/bpf/verifier.c | 7 ++++++-
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index c1ac7f964bc9..d63912e73ad9 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1148,7 +1148,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
 		/* unknown flags */
 		return -EINVAL;
 
-	WARN_ON_ONCE(!rcu_read_lock_held());
+	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
 
 	key_size = map->key_size;
 
@@ -1202,7 +1202,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 		/* unknown flags */
 		return -EINVAL;
 
-	WARN_ON_ONCE(!rcu_read_lock_held());
+	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
 
 	key_size = map->key_size;
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4189edb41b73..9561f2af7710 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -10020,9 +10020,14 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
 		case BPF_MAP_TYPE_HASH:
 		case BPF_MAP_TYPE_LRU_HASH:
 		case BPF_MAP_TYPE_ARRAY:
+		case BPF_MAP_TYPE_PERCPU_HASH:
+		case BPF_MAP_TYPE_PERCPU_ARRAY:
+		case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+		case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+		case BPF_MAP_TYPE_HASH_OF_MAPS:
 			if (!is_preallocated_map(map)) {
 				verbose(env,
-					"Sleepable programs can only use preallocated hash maps\n");
+					"Sleepable programs can only use preallocated maps\n");
 				return -EINVAL;
 			}
 			break;
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v4 bpf-next 9/9] selftests/bpf: Add a test for map-in-map and per-cpu maps in sleepable progs
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
                   ` (7 preceding siblings ...)
  2021-02-10  3:36 ` [PATCH v4 bpf-next 8/9] bpf: Allows per-cpu maps and map-in-map in sleepable programs Alexei Starovoitov
@ 2021-02-10  3:36 ` Alexei Starovoitov
  2021-02-11 15:30 ` [PATCH v4 bpf-next 0/9] bpf: Misc improvements patchwork-bot+netdevbpf
  9 siblings, 0 replies; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-10  3:36 UTC (permalink / raw)
  To: davem; +Cc: daniel, bpf, kernel-team

From: Alexei Starovoitov <ast@kernel.org>

Add a basic test for map-in-map and per-cpu maps in sleepable programs.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: KP Singh <kpsingh@kernel.org>
---
 tools/testing/selftests/bpf/progs/lsm.c | 69 +++++++++++++++++++++++++
 1 file changed, 69 insertions(+)

diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
index ff4d343b94b5..33694ef8acfa 100644
--- a/tools/testing/selftests/bpf/progs/lsm.c
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -30,6 +30,53 @@ struct {
 	__type(value, __u64);
 } lru_hash SEC(".maps");
 
+struct {
+	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+	__uint(max_entries, 1);
+	__type(key, __u32);
+	__type(value, __u64);
+} percpu_array SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+	__uint(max_entries, 1);
+	__type(key, __u32);
+	__type(value, __u64);
+} percpu_hash SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+	__uint(max_entries, 1);
+	__type(key, __u32);
+	__type(value, __u64);
+} lru_percpu_hash SEC(".maps");
+
+struct inner_map {
+	__uint(type, BPF_MAP_TYPE_ARRAY);
+	__uint(max_entries, 1);
+	__type(key, int);
+	__type(value, __u64);
+} inner_map SEC(".maps");
+
+struct outer_arr {
+	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+	__uint(max_entries, 1);
+	__uint(key_size, sizeof(int));
+	__uint(value_size, sizeof(int));
+	__array(values, struct inner_map);
+} outer_arr SEC(".maps") = {
+	.values = { [0] = &inner_map },
+};
+
+struct outer_hash {
+	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+	__uint(max_entries, 1);
+	__uint(key_size, sizeof(int));
+	__array(values, struct inner_map);
+} outer_hash SEC(".maps") = {
+	.values = { [0] = &inner_map },
+};
+
 char _license[] SEC("license") = "GPL";
 
 int monitored_pid = 0;
@@ -61,6 +108,7 @@ SEC("lsm.s/bprm_committed_creds")
 int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
 {
 	__u32 pid = bpf_get_current_pid_tgid() >> 32;
+	struct inner_map *inner_map;
 	char args[64];
 	__u32 key = 0;
 	__u64 *value;
@@ -80,6 +128,27 @@ int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
 	value = bpf_map_lookup_elem(&lru_hash, &key);
 	if (value)
 		*value = 0;
+	value = bpf_map_lookup_elem(&percpu_array, &key);
+	if (value)
+		*value = 0;
+	value = bpf_map_lookup_elem(&percpu_hash, &key);
+	if (value)
+		*value = 0;
+	value = bpf_map_lookup_elem(&lru_percpu_hash, &key);
+	if (value)
+		*value = 0;
+	inner_map = bpf_map_lookup_elem(&outer_arr, &key);
+	if (inner_map) {
+		value = bpf_map_lookup_elem(inner_map, &key);
+		if (value)
+			*value = 0;
+	}
+	inner_map = bpf_map_lookup_elem(&outer_hash, &key);
+	if (inner_map) {
+		value = bpf_map_lookup_elem(inner_map, &key);
+		if (value)
+			*value = 0;
+	}
 
 	return 0;
 }
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH v4 bpf-next 0/9] bpf: Misc improvements
  2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
                   ` (8 preceding siblings ...)
  2021-02-10  3:36 ` [PATCH v4 bpf-next 9/9] selftests/bpf: Add a test for map-in-map and per-cpu maps in sleepable progs Alexei Starovoitov
@ 2021-02-11 15:30 ` patchwork-bot+netdevbpf
  9 siblings, 0 replies; 14+ messages in thread
From: patchwork-bot+netdevbpf @ 2021-02-11 15:30 UTC (permalink / raw)
  To: Alexei Starovoitov; +Cc: davem, daniel, bpf, kernel-team

Hello:

This series was applied to bpf/bpf-next.git (refs/heads/master):

On Tue,  9 Feb 2021 19:36:25 -0800 you wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> v4:
> - split migrate_disable into separate patch
> 
> v3:
> - address review comments
> - improve recursion selftest
> 
> [...]

Here is the summary with links:
  - [v4,bpf-next,1/9] bpf: Optimize program stats
    https://git.kernel.org/bpf/bpf-next/c/700d4796ef59
  - [v4,bpf-next,2/9] bpf: Run sleepable programs with migration disabled
    https://git.kernel.org/bpf/bpf-next/c/031d6e02ddbb
  - [v4,bpf-next,3/9] bpf: Compute program stats for sleepable programs
    https://git.kernel.org/bpf/bpf-next/c/f2dd3b394674
  - [v4,bpf-next,4/9] bpf: Add per-program recursion prevention mechanism
    https://git.kernel.org/bpf/bpf-next/c/ca06f55b9002
  - [v4,bpf-next,5/9] selftest/bpf: Add a recursion test
    https://git.kernel.org/bpf/bpf-next/c/406c557edc5b
  - [v4,bpf-next,6/9] bpf: Count the number of times recursion was prevented
    https://git.kernel.org/bpf/bpf-next/c/9ed9e9ba2337
  - [v4,bpf-next,7/9] selftests/bpf: Improve recursion selftest
    https://git.kernel.org/bpf/bpf-next/c/dcf33b6f4de1
  - [v4,bpf-next,8/9] bpf: Allows per-cpu maps and map-in-map in sleepable programs
    https://git.kernel.org/bpf/bpf-next/c/638e4b825d52
  - [v4,bpf-next,9/9] selftests/bpf: Add a test for map-in-map and per-cpu maps in sleepable progs
    https://git.kernel.org/bpf/bpf-next/c/750e5d7649b1

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v4 bpf-next 1/9] bpf: Optimize program stats
  2021-02-10  3:36 ` [PATCH v4 bpf-next 1/9] bpf: Optimize program stats Alexei Starovoitov
@ 2021-02-12  3:26   ` Ilya Leoshkevich
  2021-02-12  3:43     ` Alexei Starovoitov
  0 siblings, 1 reply; 14+ messages in thread
From: Ilya Leoshkevich @ 2021-02-12  3:26 UTC (permalink / raw)
  To: Alexei Starovoitov, davem; +Cc: daniel, bpf, kernel-team

On Tue, 2021-02-09 at 19:36 -0800, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> Move bpf_prog_stats from prog->aux into prog to avoid one extra load
> in critical path of program execution.
> 
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
> ---
>  include/linux/bpf.h     |  8 --------
>  include/linux/filter.h  | 14 +++++++++++---
>  kernel/bpf/core.c       |  8 ++++----
>  kernel/bpf/syscall.c    |  2 +-
>  kernel/bpf/trampoline.c |  2 +-
>  kernel/bpf/verifier.c   |  2 +-
>  6 files changed, 18 insertions(+), 18 deletions(-)

...

> @@ -249,10 +249,10 @@ void __bpf_prog_free(struct bpf_prog *fp)
>         if (fp->aux) {
>                 mutex_destroy(&fp->aux->used_maps_mutex);
>                 mutex_destroy(&fp->aux->dst_mutex);
> -               free_percpu(fp->aux->stats);
>                 kfree(fp->aux->poke_tab);
>                 kfree(fp->aux);
>         }
> +       free_percpu(fp->stats);

On s390 this line causes the following in "ld_abs: vlan + abs, test 1"
with the latest bpf-next:

Unable to handle kernel pointer dereference in virtual kernel address
space
Failing address: 0000000000000000 TEID: 0000000000000483
Fault in home space mode while using kernel ASCE.
AS:0000000001bd0007 R3:00000001ffff0007 S:00000001ffffd000
P:000000000000003d 
Oops: 0004 ilc:2 [#1] SMP 
Modules linked in:
CPU: 0 PID: 184 Comm: test_verifier Not tainted 5.11.0-rc4-00952-
g6fdd671baaf5 #7
Hardware name: IBM 3906 M03 703 (KVM/Linux)
Krnl PSW : 0404c00180000000 000000000042707a
(refill_obj_stock+0x11a/0x1e0)
           R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 RI:0 EA:3
Krnl GPRS: 0000000000000000 0000000000000000 0000000000000018
0000000100000000
           0000000000000000 000000008764ca88 00000000013d3ff8
000000000141d140
           0000000000000080 0000000000000000 0000000000000000
00000001ff61c8f0
           000000008764c000 00000000012eb678 0000000000427066
00000380001bb888
Krnl Code: 0000000000427070: a7380000           lhi     %r3,0
           0000000000427074: 5810a018           l       %r1,24(%r10)
          #0000000000427078: 1841               lr      %r4,%r1
          >000000000042707a: ba432000           cs      %r4,%r3,0(%r2)
           000000000042707e: a774fffb           brc    
7,0000000000427074
           0000000000427082: 1a18               ar      %r1,%r8
           0000000000427084: 5010b018           st      %r1,24(%r11)
           0000000000427088: c21f00001000       clfi    %r1,4096
Call Trace:
 [<000000000042707a>] refill_obj_stock+0x11a/0x1e0 
([<0000000000427066>] refill_obj_stock+0x106/0x1e0)
 [<000000000039bd86>] free_percpu.part.0+0xd6/0x428 
 [<00000000002ef738>] bpf_prog_realloc+0xa0/0xd8 
 [<00000000002efae8>] bpf_patch_insn_single+0x88/0x208 
 [<000000000030762e>] bpf_patch_insn_data+0x36/0x290 
 [<00000000003086ca>] fixup_bpf_calls+0x572/0xa28 
 [<000000000031045c>] bpf_check+0xb44/0xcb8 
 [<00000000002f747a>] bpf_prog_load+0x5fa/0x968 
 [<00000000002fa25c>] __do_sys_bpf+0x634/0x700 
 [<0000000000a2f3ca>] system_call+0xe2/0x28c 
INFO: lockdep is turned off.
Last Breaking-Event-Address:
 [<0000000000203f76>] lock_release+0x6e/0x218
Kernel panic - not syncing: Fatal exception: panic_on_oops

Here is the better backtrace (line numbers correspond to commit
6fdd671baaf5):

#0  refill_obj_stock (objcg=objcg@entry=0x0, nr_bytes=<optimized out>)
at mm/memcontrol.c:3248
#1  0x0000000000427a08 in obj_cgroup_uncharge (objcg=objcg@entry=0x0,
size=<optimized out>) at mm/memcontrol.c:3300
#2  0x000000000039bd86 in pcpu_memcg_free_hook (size=32, off=<optimized
out>, chunk=0x82d4fa00) at ./include/linux/bitmap.h:400
#3  free_percpu (ptr=0x3fd813b5960) at mm/percpu.c:2105
#4  0x000000000039c0ec in free_percpu (ptr=<optimized out>) at
mm/percpu.c:2089
#5  0x00000000002ef738 in __bpf_prog_free (fp=0x380001ce000) at
kernel/bpf/core.c:262
#6  bpf_prog_realloc (fp_old=fp_old@entry=0x380001ce000, size=249856,
size@entry=245776, gfp_extra_flags=gfp_extra_flags@entry=1051840) at
kernel/bpf/core.c:248
#7  0x00000000002efae8 in bpf_patch_insn_single (prog=0x380001ce000,
off=off@entry=2205, patch=patch@entry=0x380001bbba0, len=len@entry=6)
at ./include/linux/filter.h:788
#8  0x000000000030762e in bpf_patch_insn_data
(env=env@entry=0x87566000, off=off@entry=2205,
patch=patch@entry=0x380001bbba0, len=<optimized out>) at
kernel/bpf/verifier.c:10669
#9  0x00000000003086ca in fixup_bpf_calls (env=env@entry=0x87566000) at
kernel/bpf/verifier.c:11539
#10 0x000000000031045c in bpf_check (prog=prog@entry=0x380001bbda0,
attr=attr@entry=0x380001bbe80, uattr=uattr@entry=0x3ffe66fe9d0) at
kernel/bpf/verifier.c:12573
#11 0x00000000002f747a in bpf_prog_load (attr=attr@entry=0x380001bbe80,
uattr=uattr@entry=0x3ffe66fe9d0) at kernel/bpf/syscall.c:2209
#12 0x00000000002fa25c in __do_sys_bpf (cmd=<optimized out>,
uattr=0x3ffe66fe9d0, size=120) at kernel/bpf/syscall.c:4388
#13 0x0000000000a2f3ca in system_call () at
arch/s390/kernel/entry.S:439

So we end up with objcg=NULL, but I'm not sure why this happens.
Please let me know if you need more info.


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v4 bpf-next 1/9] bpf: Optimize program stats
  2021-02-12  3:26   ` Ilya Leoshkevich
@ 2021-02-12  3:43     ` Alexei Starovoitov
  2021-02-12  3:51       ` Ilya Leoshkevich
  0 siblings, 1 reply; 14+ messages in thread
From: Alexei Starovoitov @ 2021-02-12  3:43 UTC (permalink / raw)
  To: Ilya Leoshkevich; +Cc: David S. Miller, Daniel Borkmann, bpf, Kernel Team

On Thu, Feb 11, 2021 at 7:26 PM Ilya Leoshkevich <iii@linux.ibm.com> wrote:
> mm/percpu.c:2089
> #5  0x00000000002ef738 in __bpf_prog_free (fp=0x380001ce000) at
> kernel/bpf/core.c:262
> #6  bpf_prog_realloc (fp_old=fp_old@entry=0x380001ce000, size=249856,
>
> So we end up with objcg=NULL, but I'm not sure why this happens.
> Please let me know if you need more info.

Argh. Thanks for reporting!
Pushed the obvious fix:
https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/commit/?id=1336c662474edec3966c96c8de026f794d16b804
Pls pull bpf-next and give it a spin.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v4 bpf-next 1/9] bpf: Optimize program stats
  2021-02-12  3:43     ` Alexei Starovoitov
@ 2021-02-12  3:51       ` Ilya Leoshkevich
  0 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2021-02-12  3:51 UTC (permalink / raw)
  To: Alexei Starovoitov; +Cc: David S. Miller, Daniel Borkmann, bpf, Kernel Team

On Thu, 2021-02-11 at 19:43 -0800, Alexei Starovoitov wrote:
> On Thu, Feb 11, 2021 at 7:26 PM Ilya Leoshkevich <iii@linux.ibm.com>
> wrote:
> > mm/percpu.c:2089
> > #5  0x00000000002ef738 in __bpf_prog_free (fp=0x380001ce000) at
> > kernel/bpf/core.c:262
> > #6  bpf_prog_realloc (fp_old=fp_old@entry=0x380001ce000,
> > size=249856,
> > 
> > So we end up with objcg=NULL, but I'm not sure why this happens.
> > Please let me know if you need more info.
> 
> Argh. Thanks for reporting!
> Pushed the obvious fix:
> https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/commit/?id=1336c662474edec3966c96c8de026f794d16b804
> Pls pull bpf-next and give it a spin.

Works now, thanks for the very quick fix!


^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2021-02-12  3:53 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-10  3:36 [PATCH v4 bpf-next 0/9] bpf: Misc improvements Alexei Starovoitov
2021-02-10  3:36 ` [PATCH v4 bpf-next 1/9] bpf: Optimize program stats Alexei Starovoitov
2021-02-12  3:26   ` Ilya Leoshkevich
2021-02-12  3:43     ` Alexei Starovoitov
2021-02-12  3:51       ` Ilya Leoshkevich
2021-02-10  3:36 ` [PATCH v4 bpf-next 2/9] bpf: Run sleepable programs with migration disabled Alexei Starovoitov
2021-02-10  3:36 ` [PATCH v4 bpf-next 3/9] bpf: Compute program stats for sleepable programs Alexei Starovoitov
2021-02-10  3:36 ` [PATCH v4 bpf-next 4/9] bpf: Add per-program recursion prevention mechanism Alexei Starovoitov
2021-02-10  3:36 ` [PATCH v4 bpf-next 5/9] selftest/bpf: Add a recursion test Alexei Starovoitov
2021-02-10  3:36 ` [PATCH v4 bpf-next 6/9] bpf: Count the number of times recursion was prevented Alexei Starovoitov
2021-02-10  3:36 ` [PATCH v4 bpf-next 7/9] selftests/bpf: Improve recursion selftest Alexei Starovoitov
2021-02-10  3:36 ` [PATCH v4 bpf-next 8/9] bpf: Allows per-cpu maps and map-in-map in sleepable programs Alexei Starovoitov
2021-02-10  3:36 ` [PATCH v4 bpf-next 9/9] selftests/bpf: Add a test for map-in-map and per-cpu maps in sleepable progs Alexei Starovoitov
2021-02-11 15:30 ` [PATCH v4 bpf-next 0/9] bpf: Misc improvements patchwork-bot+netdevbpf

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).