* [PATCH bpf-next 01/12] bpf: Move update_prog_stats to syscall object
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-08-28 17:25 ` Alexei Starovoitov
2023-08-28 7:55 ` [PATCH bpf-next 02/12] bpf: Move bpf_prog_start_time to linux/filter.h Jiri Olsa
` (11 subsequent siblings)
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Moving update_prog_stats function to syscall object and making it
global together with NO_START_TIME macro and adding 'bpf_' prefix
for both.
It will be used by other program types in following changes.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/bpf.h | 7 +++++++
kernel/bpf/syscall.c | 22 ++++++++++++++++++++++
kernel/bpf/trampoline.c | 37 +++++++------------------------------
3 files changed, 36 insertions(+), 30 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 12596af59c00..05eece17a989 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1982,6 +1982,8 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
return ret;
}
+#define BPF_PROG_NO_START_TIME 1
+
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern struct mutex bpf_stats_enabled_mutex;
@@ -2456,6 +2458,7 @@ static inline bool has_current_bpf_ctx(void)
}
void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
+void notrace bpf_prog_update_prog_stats(struct bpf_prog *prog, u64 start);
void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
@@ -2695,6 +2698,10 @@ static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
{
}
+static void bpf_prog_update_prog_stats(struct bpf_prog *prog, u64 start)
+{
+}
+
static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
{
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ebeb0695305a..5d39d98f5eb1 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2188,6 +2188,28 @@ void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
u64_stats_update_end_irqrestore(&stats->syncp, flags);
}
+void notrace bpf_prog_update_prog_stats(struct bpf_prog *prog,
+ u64 start)
+{
+ struct bpf_prog_stats *stats;
+
+ if (static_branch_unlikely(&bpf_stats_enabled_key) &&
+ /* static_key could be enabled in __bpf_prog_enter*
+ * and disabled in __bpf_prog_exit*.
+ * And vice versa.
+ * Hence check that 'start' is valid.
+ */
+ start > BPF_PROG_NO_START_TIME) {
+ unsigned long flags;
+
+ stats = this_cpu_ptr(prog->stats);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, sched_clock() - start);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ }
+}
+
static void bpf_prog_get_stats(const struct bpf_prog *prog,
struct bpf_prog_kstats *stats)
{
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 78acf28d4873..a6528e847fae 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -819,15 +819,14 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
mutex_unlock(&trampoline_mutex);
}
-#define NO_START_TIME 1
static __always_inline u64 notrace bpf_prog_start_time(void)
{
- u64 start = NO_START_TIME;
+ u64 start = BPF_PROG_NO_START_TIME;
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
start = sched_clock();
if (unlikely(!start))
- start = NO_START_TIME;
+ start = BPF_PROG_NO_START_TIME;
}
return start;
}
@@ -860,35 +859,13 @@ static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tram
return bpf_prog_start_time();
}
-static void notrace update_prog_stats(struct bpf_prog *prog,
- u64 start)
-{
- struct bpf_prog_stats *stats;
-
- if (static_branch_unlikely(&bpf_stats_enabled_key) &&
- /* static_key could be enabled in __bpf_prog_enter*
- * and disabled in __bpf_prog_exit*.
- * And vice versa.
- * Hence check that 'start' is valid.
- */
- start > NO_START_TIME) {
- unsigned long flags;
-
- stats = this_cpu_ptr(prog->stats);
- flags = u64_stats_update_begin_irqsave(&stats->syncp);
- u64_stats_inc(&stats->cnt);
- u64_stats_add(&stats->nsecs, sched_clock() - start);
- u64_stats_update_end_irqrestore(&stats->syncp, flags);
- }
-}
-
static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx)
__releases(RCU)
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
- update_prog_stats(prog, start);
+ bpf_prog_update_prog_stats(prog, start);
this_cpu_dec(*(prog->active));
migrate_enable();
rcu_read_unlock();
@@ -906,7 +883,7 @@ static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
- return NO_START_TIME;
+ return BPF_PROG_NO_START_TIME;
}
static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
@@ -941,7 +918,7 @@ void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
- update_prog_stats(prog, start);
+ bpf_prog_update_prog_stats(prog, start);
this_cpu_dec(*(prog->active));
migrate_enable();
rcu_read_unlock_trace();
@@ -964,7 +941,7 @@ static void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
- update_prog_stats(prog, start);
+ bpf_prog_update_prog_stats(prog, start);
migrate_enable();
rcu_read_unlock_trace();
}
@@ -987,7 +964,7 @@ static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
{
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
- update_prog_stats(prog, start);
+ bpf_prog_update_prog_stats(prog, start);
migrate_enable();
rcu_read_unlock();
}
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 01/12] bpf: Move update_prog_stats to syscall object
2023-08-28 7:55 ` [PATCH bpf-next 01/12] bpf: Move update_prog_stats to syscall object Jiri Olsa
@ 2023-08-28 17:25 ` Alexei Starovoitov
2023-08-29 8:00 ` Jiri Olsa
0 siblings, 1 reply; 34+ messages in thread
From: Alexei Starovoitov @ 2023-08-28 17:25 UTC (permalink / raw)
To: Jiri Olsa
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
On Mon, Aug 28, 2023 at 12:55 AM Jiri Olsa <jolsa@kernel.org> wrote:
>
> static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
> struct bpf_tramp_run_ctx *run_ctx)
> __releases(RCU)
> {
> bpf_reset_run_ctx(run_ctx->saved_run_ctx);
>
> - update_prog_stats(prog, start);
> + bpf_prog_update_prog_stats(prog, start);
I bet this adds a noticeable performance regression.
The function was inlined before and the static key made it a nop.
Above makes it into a function call.
Please use always_inline and move it to a header.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 01/12] bpf: Move update_prog_stats to syscall object
2023-08-28 17:25 ` Alexei Starovoitov
@ 2023-08-29 8:00 ` Jiri Olsa
0 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-08-29 8:00 UTC (permalink / raw)
To: Alexei Starovoitov
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
On Mon, Aug 28, 2023 at 10:25:38AM -0700, Alexei Starovoitov wrote:
> On Mon, Aug 28, 2023 at 12:55 AM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
> > struct bpf_tramp_run_ctx *run_ctx)
> > __releases(RCU)
> > {
> > bpf_reset_run_ctx(run_ctx->saved_run_ctx);
> >
> > - update_prog_stats(prog, start);
> > + bpf_prog_update_prog_stats(prog, start);
>
> I bet this adds a noticeable performance regression.
> The function was inlined before and the static key made it a nop.
> Above makes it into a function call.
> Please use always_inline and move it to a header.
right.. will change
thanks,
jirka
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 02/12] bpf: Move bpf_prog_start_time to linux/filter.h
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
2023-08-28 7:55 ` [PATCH bpf-next 01/12] bpf: Move update_prog_stats to syscall object Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-09-04 13:12 ` Hou Tao
2023-08-28 7:55 ` [PATCH bpf-next 03/12] bpf: Count stats for kprobe_multi programs Jiri Olsa
` (10 subsequent siblings)
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Moving bpf_prog_start_time to linux/filter.h and making it
globally available.
It will be used by other program types in following changes.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/bpf.h | 20 ++++++++++++++++++++
kernel/bpf/trampoline.c | 12 ------------
2 files changed, 20 insertions(+), 12 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 05eece17a989..23a73f52c7bc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -29,6 +29,9 @@
#include <linux/rcupdate_trace.h>
#include <linux/static_call.h>
#include <linux/memcontrol.h>
+#include <linux/sched/clock.h>
+
+DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -2460,6 +2463,18 @@ static inline bool has_current_bpf_ctx(void)
void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
void notrace bpf_prog_update_prog_stats(struct bpf_prog *prog, u64 start);
+static __always_inline u64 notrace bpf_prog_start_time(void)
+{
+ u64 start = BPF_PROG_NO_START_TIME;
+
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+ start = sched_clock();
+ if (unlikely(!start))
+ start = BPF_PROG_NO_START_TIME;
+ }
+ return start;
+}
+
void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
@@ -2702,6 +2717,11 @@ static void bpf_prog_update_prog_stats(struct bpf_prog *prog, u64 start)
{
}
+static inline u64 notrace bpf_prog_start_time(void)
+{
+ return 0;
+}
+
static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
{
}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index a6528e847fae..ed5b014f9532 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -819,18 +819,6 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
mutex_unlock(&trampoline_mutex);
}
-static __always_inline u64 notrace bpf_prog_start_time(void)
-{
- u64 start = BPF_PROG_NO_START_TIME;
-
- if (static_branch_unlikely(&bpf_stats_enabled_key)) {
- start = sched_clock();
- if (unlikely(!start))
- start = BPF_PROG_NO_START_TIME;
- }
- return start;
-}
-
/* The logic is similar to bpf_prog_run(), but with an explicit
* rcu_read_lock() and migrate_disable() which are required
* for the trampoline. The macro is split into
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 02/12] bpf: Move bpf_prog_start_time to linux/filter.h
2023-08-28 7:55 ` [PATCH bpf-next 02/12] bpf: Move bpf_prog_start_time to linux/filter.h Jiri Olsa
@ 2023-09-04 13:12 ` Hou Tao
0 siblings, 0 replies; 34+ messages in thread
From: Hou Tao @ 2023-09-04 13:12 UTC (permalink / raw)
To: Jiri Olsa
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> Moving bpf_prog_start_time to linux/filter.h and making it
> globally available.
>
> It will be used by other program types in following changes.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Hou Tao <houtao1@huawei.com>
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 03/12] bpf: Count stats for kprobe_multi programs
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
2023-08-28 7:55 ` [PATCH bpf-next 01/12] bpf: Move update_prog_stats to syscall object Jiri Olsa
2023-08-28 7:55 ` [PATCH bpf-next 02/12] bpf: Move bpf_prog_start_time to linux/filter.h Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-09-04 13:30 ` Hou Tao
2023-09-05 6:15 ` Hou Tao
2023-08-28 7:55 ` [PATCH bpf-next 04/12] bpf: Add missed value to kprobe_multi link info Jiri Olsa
` (9 subsequent siblings)
12 siblings, 2 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Adding support to gather stats for kprobe_multi programs.
We now count:
- missed stats due to bpf_prog_active protection (always)
- cnt/nsec of the bpf program execution (if kernel.bpf_stats_enabled=1)
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
kernel/trace/bpf_trace.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index a7264b2c17ad..0a8685fc1eee 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2706,18 +2706,24 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
.link = link,
.entry_ip = entry_ip,
};
+ struct bpf_prog *prog = link->link.prog;
struct bpf_run_ctx *old_run_ctx;
+ u64 start;
int err;
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
+ bpf_prog_inc_misses_counter(prog);
err = 0;
goto out;
}
+
migrate_disable();
rcu_read_lock();
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- err = bpf_prog_run(link->link.prog, regs);
+ start = bpf_prog_start_time();
+ err = bpf_prog_run(prog, regs);
+ bpf_prog_update_prog_stats(prog, start);
bpf_reset_run_ctx(old_run_ctx);
rcu_read_unlock();
migrate_enable();
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 03/12] bpf: Count stats for kprobe_multi programs
2023-08-28 7:55 ` [PATCH bpf-next 03/12] bpf: Count stats for kprobe_multi programs Jiri Olsa
@ 2023-09-04 13:30 ` Hou Tao
2023-09-05 6:15 ` Hou Tao
1 sibling, 0 replies; 34+ messages in thread
From: Hou Tao @ 2023-09-04 13:30 UTC (permalink / raw)
To: Jiri Olsa, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> Adding support to gather stats for kprobe_multi programs.
>
> We now count:
> - missed stats due to bpf_prog_active protection (always)
> - cnt/nsec of the bpf program execution (if kernel.bpf_stats_enabled=1)
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Hou Tao <houtao1@huawei.com>
With one nit below.
> ---
> kernel/trace/bpf_trace.c | 8 +++++++-
> 1 file changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index a7264b2c17ad..0a8685fc1eee 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -2706,18 +2706,24 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
> .link = link,
> .entry_ip = entry_ip,
> };
> + struct bpf_prog *prog = link->link.prog;
> struct bpf_run_ctx *old_run_ctx;
> + u64 start;
> int err;
>
> if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
> + bpf_prog_inc_misses_counter(prog);
> err = 0;
> goto out;
> }
>
> +
The extra empty line is not needed here.
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 03/12] bpf: Count stats for kprobe_multi programs
2023-08-28 7:55 ` [PATCH bpf-next 03/12] bpf: Count stats for kprobe_multi programs Jiri Olsa
2023-09-04 13:30 ` Hou Tao
@ 2023-09-05 6:15 ` Hou Tao
2023-09-05 7:19 ` Jiri Olsa
1 sibling, 1 reply; 34+ messages in thread
From: Hou Tao @ 2023-09-05 6:15 UTC (permalink / raw)
To: Jiri Olsa
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Hi,
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> Adding support to gather stats for kprobe_multi programs.
>
> We now count:
> - missed stats due to bpf_prog_active protection (always)
> - cnt/nsec of the bpf program execution (if kernel.bpf_stats_enabled=1)
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> kernel/trace/bpf_trace.c | 8 +++++++-
> 1 file changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index a7264b2c17ad..0a8685fc1eee 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -2706,18 +2706,24 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
> .link = link,
> .entry_ip = entry_ip,
> };
> + struct bpf_prog *prog = link->link.prog;
> struct bpf_run_ctx *old_run_ctx;
> + u64 start;
> int err;
>
> if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
> + bpf_prog_inc_misses_counter(prog);
> err = 0;
> goto out;
> }
>
> +
> migrate_disable();
> rcu_read_lock();
> old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> - err = bpf_prog_run(link->link.prog, regs);
> + start = bpf_prog_start_time();
> + err = bpf_prog_run(prog, regs);
> + bpf_prog_update_prog_stats(prog, start);
Oops, I missed the bpf_prog_run() here. It seems that bpf_prog_run() has
already done the accounting thing, so there is no need for double
accounting.
> bpf_reset_run_ctx(old_run_ctx);
> rcu_read_unlock();
> migrate_enable();
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 03/12] bpf: Count stats for kprobe_multi programs
2023-09-05 6:15 ` Hou Tao
@ 2023-09-05 7:19 ` Jiri Olsa
0 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-09-05 7:19 UTC (permalink / raw)
To: Hou Tao
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On Tue, Sep 05, 2023 at 02:15:49PM +0800, Hou Tao wrote:
> Hi,
>
> On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> > Adding support to gather stats for kprobe_multi programs.
> >
> > We now count:
> > - missed stats due to bpf_prog_active protection (always)
> > - cnt/nsec of the bpf program execution (if kernel.bpf_stats_enabled=1)
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > kernel/trace/bpf_trace.c | 8 +++++++-
> > 1 file changed, 7 insertions(+), 1 deletion(-)
> >
> > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > index a7264b2c17ad..0a8685fc1eee 100644
> > --- a/kernel/trace/bpf_trace.c
> > +++ b/kernel/trace/bpf_trace.c
> > @@ -2706,18 +2706,24 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
> > .link = link,
> > .entry_ip = entry_ip,
> > };
> > + struct bpf_prog *prog = link->link.prog;
> > struct bpf_run_ctx *old_run_ctx;
> > + u64 start;
> > int err;
> >
> > if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
> > + bpf_prog_inc_misses_counter(prog);
> > err = 0;
> > goto out;
> > }
> >
> > +
> > migrate_disable();
> > rcu_read_lock();
> > old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > - err = bpf_prog_run(link->link.prog, regs);
> > + start = bpf_prog_start_time();
> > + err = bpf_prog_run(prog, regs);
> > + bpf_prog_update_prog_stats(prog, start);
>
> Oops, I missed the bpf_prog_run() here. It seems that bpf_prog_run() has
> already done the accounting thing, so there is no need for double
> accounting.
right, same as the other change, thanks
jirka
> > bpf_reset_run_ctx(old_run_ctx);
> > rcu_read_unlock();
> > migrate_enable();
>
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 04/12] bpf: Add missed value to kprobe_multi link info
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (2 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 03/12] bpf: Count stats for kprobe_multi programs Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-09-04 13:33 ` Hou Tao
2023-08-28 7:55 ` [PATCH bpf-next 05/12] bpf: Add missed value to kprobe perf " Jiri Olsa
` (8 subsequent siblings)
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Add missed value to kprobe_multi link info to hold the stats of missed
kprobe_multi probe.
The missed counter gets incremented when fprobe fails the recursion
check or there's no rethook available for return probe. In either
case the attached bpf program is not executed.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/uapi/linux/bpf.h | 1 +
kernel/trace/bpf_trace.c | 1 +
tools/include/uapi/linux/bpf.h | 1 +
3 files changed, 3 insertions(+)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8790b3962e4b..b754edfb0cd7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -6523,6 +6523,7 @@ struct bpf_link_info {
__aligned_u64 addrs;
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
+ __u64 missed;
} kprobe_multi;
struct {
__u32 type; /* enum bpf_perf_event_type */
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0a8685fc1eee..0eaec3c4a5fd 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2614,6 +2614,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
info->kprobe_multi.count = kmulti_link->cnt;
info->kprobe_multi.flags = kmulti_link->flags;
+ info->kprobe_multi.missed = kmulti_link->fp.nmissed;
if (!uaddrs)
return 0;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 8790b3962e4b..b754edfb0cd7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -6523,6 +6523,7 @@ struct bpf_link_info {
__aligned_u64 addrs;
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
+ __u64 missed;
} kprobe_multi;
struct {
__u32 type; /* enum bpf_perf_event_type */
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 04/12] bpf: Add missed value to kprobe_multi link info
2023-08-28 7:55 ` [PATCH bpf-next 04/12] bpf: Add missed value to kprobe_multi link info Jiri Olsa
@ 2023-09-04 13:33 ` Hou Tao
0 siblings, 0 replies; 34+ messages in thread
From: Hou Tao @ 2023-09-04 13:33 UTC (permalink / raw)
To: Jiri Olsa, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> Add missed value to kprobe_multi link info to hold the stats of missed
> kprobe_multi probe.
>
> The missed counter gets incremented when fprobe fails the recursion
> check or there's no rethook available for return probe. In either
> case the attached bpf program is not executed.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Hou Tao <houtao1@huawei.com>
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 05/12] bpf: Add missed value to kprobe perf link info
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (3 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 04/12] bpf: Add missed value to kprobe_multi link info Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-09-05 2:23 ` Hou Tao
2023-08-28 7:55 ` [PATCH bpf-next 06/12] bpf: Count missed stats in trace_call_bpf Jiri Olsa
` (7 subsequent siblings)
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Add missed value to kprobe attached through perf link info to
hold the stats of missed kprobe handler execution.
The kprobe's missed counter gets incremented when kprobe handler
is not executed due to another kprobe running on the same cpu.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/trace_events.h | 6 ++++--
include/uapi/linux/bpf.h | 1 +
kernel/bpf/syscall.c | 14 ++++++++------
kernel/trace/bpf_trace.c | 5 +++--
kernel/trace/trace_kprobe.c | 5 ++++-
tools/include/uapi/linux/bpf.h | 1 +
6 files changed, 21 insertions(+), 11 deletions(-)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5b85cf18c350..3917ddcf90bf 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -750,7 +750,8 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
- u64 *probe_offset, u64 *probe_addr);
+ u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed);
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
#else
@@ -790,7 +791,7 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
- u64 *probe_addr)
+ u64 *probe_addr, unsigned long *missed)
{
return -EOPNOTSUPP;
}
@@ -865,6 +866,7 @@ extern void perf_kprobe_destroy(struct perf_event *event);
extern int bpf_get_kprobe_info(const struct perf_event *event,
u32 *fd_type, const char **symbol,
u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed,
bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index b754edfb0cd7..5a39c7a13499 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -6539,6 +6539,7 @@ struct bpf_link_info {
__u32 name_len;
__u32 offset; /* offset from func_name */
__u64 addr;
+ __u64 missed;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5d39d98f5eb1..775aeb869cfb 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3392,7 +3392,7 @@ static void bpf_perf_link_dealloc(struct bpf_link *link)
static int bpf_perf_link_fill_common(const struct perf_event *event,
char __user *uname, u32 ulen,
u64 *probe_offset, u64 *probe_addr,
- u32 *fd_type)
+ u32 *fd_type, unsigned long *missed)
{
const char *buf;
u32 prog_id;
@@ -3403,7 +3403,7 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
return -EINVAL;
err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
- probe_offset, probe_addr);
+ probe_offset, probe_addr, missed);
if (err)
return err;
if (!uname)
@@ -3426,6 +3426,7 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
struct bpf_link_info *info)
{
+ unsigned long missed;
char __user *uname;
u64 addr, offset;
u32 ulen, type;
@@ -3434,7 +3435,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
ulen = info->perf_event.kprobe.name_len;
err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
- &type);
+ &type, &missed);
if (err)
return err;
if (type == BPF_FD_TYPE_KRETPROBE)
@@ -3443,6 +3444,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
info->perf_event.type = BPF_PERF_EVENT_KPROBE;
info->perf_event.kprobe.offset = offset;
+ info->perf_event.kprobe.missed = missed;
if (!kallsyms_show_value(current_cred()))
addr = 0;
info->perf_event.kprobe.addr = addr;
@@ -3462,7 +3464,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
ulen = info->perf_event.uprobe.name_len;
err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
- &type);
+ &type, NULL);
if (err)
return err;
@@ -3498,7 +3500,7 @@ static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
ulen = info->perf_event.tracepoint.name_len;
info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
- return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL);
+ return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
}
static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
@@ -4831,7 +4833,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
&buf, &probe_offset,
- &probe_addr);
+ &probe_addr, NULL);
if (!err)
err = bpf_task_fd_query_copy(attr, uattr, prog_id,
fd_type, buf,
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0eaec3c4a5fd..cde6360bf8e8 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2384,7 +2384,8 @@ int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
- u64 *probe_offset, u64 *probe_addr)
+ u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed)
{
bool is_tracepoint, is_syscall_tp;
struct bpf_prog *prog;
@@ -2419,7 +2420,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
#ifdef CONFIG_KPROBE_EVENTS
if (flags & TRACE_EVENT_FL_KPROBE)
err = bpf_get_kprobe_info(event, fd_type, buf,
- probe_offset, probe_addr,
+ probe_offset, probe_addr, missed,
event->attr.type == PERF_TYPE_TRACEPOINT);
#endif
#ifdef CONFIG_UPROBE_EVENTS
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 17c21c0b2dd1..998c88874507 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1546,7 +1546,8 @@ NOKPROBE_SYMBOL(kretprobe_perf_func);
int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
const char **symbol, u64 *probe_offset,
- u64 *probe_addr, bool perf_type_tracepoint)
+ u64 *probe_addr, unsigned long *missed,
+ bool perf_type_tracepoint)
{
const char *pevent = trace_event_name(event->tp_event);
const char *group = event->tp_event->class->system;
@@ -1565,6 +1566,8 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
*probe_addr = kallsyms_show_value(current_cred()) ?
(unsigned long)tk->rp.kp.addr : 0;
*symbol = tk->symbol;
+ if (missed)
+ *missed = tk->rp.kp.nmissed;
return 0;
}
#endif /* CONFIG_PERF_EVENTS */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index b754edfb0cd7..5a39c7a13499 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -6539,6 +6539,7 @@ struct bpf_link_info {
__u32 name_len;
__u32 offset; /* offset from func_name */
__u64 addr;
+ __u64 missed;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 05/12] bpf: Add missed value to kprobe perf link info
2023-08-28 7:55 ` [PATCH bpf-next 05/12] bpf: Add missed value to kprobe perf " Jiri Olsa
@ 2023-09-05 2:23 ` Hou Tao
2023-09-05 7:19 ` Jiri Olsa
0 siblings, 1 reply; 34+ messages in thread
From: Hou Tao @ 2023-09-05 2:23 UTC (permalink / raw)
To: Jiri Olsa
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Hi,
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> Add missed value to kprobe attached through perf link info to
> hold the stats of missed kprobe handler execution.
>
> The kprobe's missed counter gets incremented when kprobe handler
> is not executed due to another kprobe running on the same cpu.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> include/linux/trace_events.h | 6 ++++--
> include/uapi/linux/bpf.h | 1 +
> kernel/bpf/syscall.c | 14 ++++++++------
> kernel/trace/bpf_trace.c | 5 +++--
> kernel/trace/trace_kprobe.c | 5 ++++-
> tools/include/uapi/linux/bpf.h | 1 +
> 6 files changed, 21 insertions(+), 11 deletions(-)
>
SNIP
> diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
> index 17c21c0b2dd1..998c88874507 100644
> --- a/kernel/trace/trace_kprobe.c
> +++ b/kernel/trace/trace_kprobe.c
> @@ -1546,7 +1546,8 @@ NOKPROBE_SYMBOL(kretprobe_perf_func);
>
> int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
> const char **symbol, u64 *probe_offset,
> - u64 *probe_addr, bool perf_type_tracepoint)
> + u64 *probe_addr, unsigned long *missed,
> + bool perf_type_tracepoint)
> {
> const char *pevent = trace_event_name(event->tp_event);
> const char *group = event->tp_event->class->system;
> @@ -1565,6 +1566,8 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
> *probe_addr = kallsyms_show_value(current_cred()) ?
> (unsigned long)tk->rp.kp.addr : 0;
> *symbol = tk->symbol;
> + if (missed)
> + *missed = tk->rp.kp.nmissed;
According to the implement of probes_profile_seq_show(), the missed
counter for kretprobe should be tk->rp.kp.nmissed + tk->rp.nmissed. I
think it would be a good idea to factor out a common helper to get the
missed counter for kprobe or kretprobe.
> return 0;
> }
> #endif /* CONFIG_PERF_EVENTS */
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index b754edfb0cd7..5a39c7a13499 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -6539,6 +6539,7 @@ struct bpf_link_info {
> __u32 name_len;
> __u32 offset; /* offset from func_name */
> __u64 addr;
> + __u64 missed;
> } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
> struct {
> __aligned_u64 tp_name; /* in/out */
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 05/12] bpf: Add missed value to kprobe perf link info
2023-09-05 2:23 ` Hou Tao
@ 2023-09-05 7:19 ` Jiri Olsa
0 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-09-05 7:19 UTC (permalink / raw)
To: Hou Tao
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On Tue, Sep 05, 2023 at 10:23:27AM +0800, Hou Tao wrote:
> Hi,
>
> On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> > Add missed value to kprobe attached through perf link info to
> > hold the stats of missed kprobe handler execution.
> >
> > The kprobe's missed counter gets incremented when kprobe handler
> > is not executed due to another kprobe running on the same cpu.
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > include/linux/trace_events.h | 6 ++++--
> > include/uapi/linux/bpf.h | 1 +
> > kernel/bpf/syscall.c | 14 ++++++++------
> > kernel/trace/bpf_trace.c | 5 +++--
> > kernel/trace/trace_kprobe.c | 5 ++++-
> > tools/include/uapi/linux/bpf.h | 1 +
> > 6 files changed, 21 insertions(+), 11 deletions(-)
> >
>
> SNIP
> > diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
> > index 17c21c0b2dd1..998c88874507 100644
> > --- a/kernel/trace/trace_kprobe.c
> > +++ b/kernel/trace/trace_kprobe.c
> > @@ -1546,7 +1546,8 @@ NOKPROBE_SYMBOL(kretprobe_perf_func);
> >
> > int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
> > const char **symbol, u64 *probe_offset,
> > - u64 *probe_addr, bool perf_type_tracepoint)
> > + u64 *probe_addr, unsigned long *missed,
> > + bool perf_type_tracepoint)
> > {
> > const char *pevent = trace_event_name(event->tp_event);
> > const char *group = event->tp_event->class->system;
> > @@ -1565,6 +1566,8 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
> > *probe_addr = kallsyms_show_value(current_cred()) ?
> > (unsigned long)tk->rp.kp.addr : 0;
> > *symbol = tk->symbol;
> > + if (missed)
> > + *missed = tk->rp.kp.nmissed;
>
> According to the implement of probes_profile_seq_show(), the missed
> counter for kretprobe should be tk->rp.kp.nmissed + tk->rp.nmissed. I
> think it would be a good idea to factor out a common helper to get the
> missed counter for kprobe or kretprobe.
ok, makes sense.. will check, I was also thinking to move
bpf_get_kprobe_info args into struct
jirka
> > return 0;
> > }
> > #endif /* CONFIG_PERF_EVENTS */
> > diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> > index b754edfb0cd7..5a39c7a13499 100644
> > --- a/tools/include/uapi/linux/bpf.h
> > +++ b/tools/include/uapi/linux/bpf.h
> > @@ -6539,6 +6539,7 @@ struct bpf_link_info {
> > __u32 name_len;
> > __u32 offset; /* offset from func_name */
> > __u64 addr;
> > + __u64 missed;
> > } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
> > struct {
> > __aligned_u64 tp_name; /* in/out */
>
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 06/12] bpf: Count missed stats in trace_call_bpf
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (4 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 05/12] bpf: Add missed value to kprobe perf " Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-08-28 17:32 ` Alexei Starovoitov
2023-08-28 7:55 ` [PATCH bpf-next 07/12] bpf: Move bpf_prog_run_array down in the header file Jiri Olsa
` (6 subsequent siblings)
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Increase misses stats in case bpf array execution is skipped
because of recursion check in trace_call_bpf.
Adding bpf_prog_missed_array that increase misses counts for
all bpf programs in bpf_prog_array.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/bpf.h | 16 ++++++++++++++++
kernel/trace/bpf_trace.c | 3 +++
2 files changed, 19 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 23a73f52c7bc..71154e991730 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2932,6 +2932,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+static __always_inline void
+bpf_prog_missed_array(const struct bpf_prog_array *array)
+{
+ const struct bpf_prog_array_item *item;
+ struct bpf_prog *prog;
+
+ if (unlikely(!array))
+ return;
+
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ bpf_prog_inc_misses_counter(prog);
+ item++;
+ }
+}
+
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index cde6360bf8e8..7961f9d9dd13 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -117,6 +117,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
* and don't send kprobe event into ring-buffer,
* so return zero here
*/
+ rcu_read_lock();
+ bpf_prog_missed_array(rcu_dereference(call->prog_array));
+ rcu_read_unlock();
ret = 0;
goto out;
}
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 06/12] bpf: Count missed stats in trace_call_bpf
2023-08-28 7:55 ` [PATCH bpf-next 06/12] bpf: Count missed stats in trace_call_bpf Jiri Olsa
@ 2023-08-28 17:32 ` Alexei Starovoitov
2023-08-29 8:04 ` Jiri Olsa
0 siblings, 1 reply; 34+ messages in thread
From: Alexei Starovoitov @ 2023-08-28 17:32 UTC (permalink / raw)
To: Jiri Olsa
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
On Mon, Aug 28, 2023 at 12:56 AM Jiri Olsa <jolsa@kernel.org> wrote:
>
> Increase misses stats in case bpf array execution is skipped
> because of recursion check in trace_call_bpf.
>
> Adding bpf_prog_missed_array that increase misses counts for
> all bpf programs in bpf_prog_array.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> include/linux/bpf.h | 16 ++++++++++++++++
> kernel/trace/bpf_trace.c | 3 +++
> 2 files changed, 19 insertions(+)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 23a73f52c7bc..71154e991730 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -2932,6 +2932,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
> #endif /* CONFIG_BPF_SYSCALL */
> #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
>
> +static __always_inline void
> +bpf_prog_missed_array(const struct bpf_prog_array *array)
The name hardly explains the purpose.
Please give it a better name.
Maybe bpf_prog_inc_misses_counters ?
Just extra "s".
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 06/12] bpf: Count missed stats in trace_call_bpf
2023-08-28 17:32 ` Alexei Starovoitov
@ 2023-08-29 8:04 ` Jiri Olsa
0 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-08-29 8:04 UTC (permalink / raw)
To: Alexei Starovoitov
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
On Mon, Aug 28, 2023 at 10:32:05AM -0700, Alexei Starovoitov wrote:
> On Mon, Aug 28, 2023 at 12:56 AM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > Increase misses stats in case bpf array execution is skipped
> > because of recursion check in trace_call_bpf.
> >
> > Adding bpf_prog_missed_array that increase misses counts for
> > all bpf programs in bpf_prog_array.
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > include/linux/bpf.h | 16 ++++++++++++++++
> > kernel/trace/bpf_trace.c | 3 +++
> > 2 files changed, 19 insertions(+)
> >
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index 23a73f52c7bc..71154e991730 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -2932,6 +2932,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
> > #endif /* CONFIG_BPF_SYSCALL */
> > #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
> >
> > +static __always_inline void
> > +bpf_prog_missed_array(const struct bpf_prog_array *array)
>
> The name hardly explains the purpose.
> Please give it a better name.
> Maybe bpf_prog_inc_misses_counters ?
> Just extra "s".
I thought making it similar to bpf_prog_run_array,
but bpf_prog_inc_misses_counters sounds better
thanks,
jirka
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 07/12] bpf: Move bpf_prog_run_array down in the header file
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (5 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 06/12] bpf: Count missed stats in trace_call_bpf Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-08-28 7:55 ` [PATCH bpf-next 08/12] bpf: Count run stats in bpf_prog_run_array Jiri Olsa
` (5 subsequent siblings)
12 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Moving bpf_prog_run_array down in the header file so we can
easily use bpf_prog_start_time and bpf_prog_update_prog_stats
functions in bpf_prog_run_array in following change.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/bpf.h | 60 ++++++++++++++++++++++-----------------------
1 file changed, 30 insertions(+), 30 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 71154e991730..478fdc4794c9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1904,36 +1904,6 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
-static __always_inline u32
-bpf_prog_run_array(const struct bpf_prog_array *array,
- const void *ctx, bpf_prog_run_fn run_prog)
-{
- const struct bpf_prog_array_item *item;
- const struct bpf_prog *prog;
- struct bpf_run_ctx *old_run_ctx;
- struct bpf_trace_run_ctx run_ctx;
- u32 ret = 1;
-
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
-
- if (unlikely(!array))
- return ret;
-
- run_ctx.is_uprobe = false;
-
- migrate_disable();
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- item = &array->items[0];
- while ((prog = READ_ONCE(item->prog))) {
- run_ctx.bpf_cookie = item->bpf_cookie;
- ret &= run_prog(prog, ctx);
- item++;
- }
- bpf_reset_run_ctx(old_run_ctx);
- migrate_enable();
- return ret;
-}
-
/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
*
* We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
@@ -2740,6 +2710,36 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
}
#endif /* CONFIG_BPF_SYSCALL */
+static __always_inline u32
+bpf_prog_run_array(const struct bpf_prog_array *array,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
+
+ run_ctx.is_uprobe = false;
+
+ migrate_disable();
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ migrate_enable();
+ return ret;
+}
+
static __always_inline int
bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
{
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* [PATCH bpf-next 08/12] bpf: Count run stats in bpf_prog_run_array
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (6 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 07/12] bpf: Move bpf_prog_run_array down in the header file Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-09-05 2:40 ` Hou Tao
2023-08-28 7:55 ` [PATCH bpf-next 09/12] bpftool: Display missed count for kprobe_multi link Jiri Olsa
` (4 subsequent siblings)
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Count runtime stats for bf programs executed through bpf_prog_run_array
function. That covers kprobe, perf event and trace syscall probe.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/linux/bpf.h | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 478fdc4794c9..732253eea675 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2715,10 +2715,11 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
- const struct bpf_prog *prog;
+ struct bpf_prog *prog;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
u32 ret = 1;
+ u64 start;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
@@ -2732,7 +2733,9 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
item = &array->items[0];
while ((prog = READ_ONCE(item->prog))) {
run_ctx.bpf_cookie = item->bpf_cookie;
+ start = bpf_prog_start_time();
ret &= run_prog(prog, ctx);
+ bpf_prog_update_prog_stats(prog, start);
item++;
}
bpf_reset_run_ctx(old_run_ctx);
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 08/12] bpf: Count run stats in bpf_prog_run_array
2023-08-28 7:55 ` [PATCH bpf-next 08/12] bpf: Count run stats in bpf_prog_run_array Jiri Olsa
@ 2023-09-05 2:40 ` Hou Tao
2023-09-05 7:19 ` Jiri Olsa
0 siblings, 1 reply; 34+ messages in thread
From: Hou Tao @ 2023-09-05 2:40 UTC (permalink / raw)
To: Jiri Olsa
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Hi,
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> Count runtime stats for bf programs executed through bpf_prog_run_array
> function. That covers kprobe, perf event and trace syscall probe.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> include/linux/bpf.h | 5 ++++-
> 1 file changed, 4 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 478fdc4794c9..732253eea675 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -2715,10 +2715,11 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
> const void *ctx, bpf_prog_run_fn run_prog)
> {
> const struct bpf_prog_array_item *item;
> - const struct bpf_prog *prog;
> + struct bpf_prog *prog;
> struct bpf_run_ctx *old_run_ctx;
> struct bpf_trace_run_ctx run_ctx;
> u32 ret = 1;
> + u64 start;
>
> RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
>
> @@ -2732,7 +2733,9 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
> item = &array->items[0];
> while ((prog = READ_ONCE(item->prog))) {
> run_ctx.bpf_cookie = item->bpf_cookie;
> + start = bpf_prog_start_time();
> ret &= run_prog(prog, ctx);
> + bpf_prog_update_prog_stats(prog, start);
> item++;
> }
bpf_prog_run() has already accounted the running count and the consumed
time for the prog, so I think both previous patch and this patch are not
needed.
> bpf_reset_run_ctx(old_run_ctx);
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 08/12] bpf: Count run stats in bpf_prog_run_array
2023-09-05 2:40 ` Hou Tao
@ 2023-09-05 7:19 ` Jiri Olsa
0 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-09-05 7:19 UTC (permalink / raw)
To: Hou Tao
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On Tue, Sep 05, 2023 at 10:40:01AM +0800, Hou Tao wrote:
> Hi,
>
> On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> > Count runtime stats for bf programs executed through bpf_prog_run_array
> > function. That covers kprobe, perf event and trace syscall probe.
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > include/linux/bpf.h | 5 ++++-
> > 1 file changed, 4 insertions(+), 1 deletion(-)
> >
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index 478fdc4794c9..732253eea675 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -2715,10 +2715,11 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
> > const void *ctx, bpf_prog_run_fn run_prog)
> > {
> > const struct bpf_prog_array_item *item;
> > - const struct bpf_prog *prog;
> > + struct bpf_prog *prog;
> > struct bpf_run_ctx *old_run_ctx;
> > struct bpf_trace_run_ctx run_ctx;
> > u32 ret = 1;
> > + u64 start;
> >
> > RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
> >
> > @@ -2732,7 +2733,9 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
> > item = &array->items[0];
> > while ((prog = READ_ONCE(item->prog))) {
> > run_ctx.bpf_cookie = item->bpf_cookie;
> > + start = bpf_prog_start_time();
> > ret &= run_prog(prog, ctx);
> > + bpf_prog_update_prog_stats(prog, start);
> > item++;
> > }
>
> bpf_prog_run() has already accounted the running count and the consumed
> time for the prog, so I think both previous patch and this patch are not
> needed.
ugh right, I missed that.. thanks
jirka
>
> > bpf_reset_run_ctx(old_run_ctx);
>
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 09/12] bpftool: Display missed count for kprobe_multi link
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (7 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 08/12] bpf: Count run stats in bpf_prog_run_array Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-08-29 16:40 ` Quentin Monnet
2023-08-28 7:55 ` [PATCH bpf-next 10/12] bpftool: Display missed count for kprobe perf link Jiri Olsa
` (3 subsequent siblings)
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Adding 'missed' field to display missed counts for kprobes
attached by kprobe multi link, like:
# bpftool link
5: kprobe_multi prog 76
kprobe.multi func_cnt 1 missed 1
addr func [module]
ffffffffa039c030 fp3_test [fprobe_test]
# bpftool link -jp
[{
"id": 5,
"type": "kprobe_multi",
"prog_id": 76,
"retprobe": false,
"func_cnt": 1,
"missed": 1,
"funcs": [{
"addr": 18446744072102723632,
"func": "fp3_test",
"module": "fprobe_test"
}
]
}
]
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
tools/bpf/bpftool/link.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 0b214f6ab5c8..7387e51a5e5c 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -265,6 +265,7 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_bool_field(json_wtr, "retprobe",
info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
+ jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
jsonw_name(json_wtr, "funcs");
jsonw_start_array(json_wtr);
addrs = u64_to_ptr(info->kprobe_multi.addrs);
@@ -640,7 +641,9 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
printf("\n\tkretprobe.multi ");
else
printf("\n\tkprobe.multi ");
- printf("func_cnt %u ", info->kprobe_multi.count);
+ printf("func_cnt %u", info->kprobe_multi.count);
+ if (info->kprobe_multi.missed)
+ printf(" missed %llu", info->kprobe_multi.missed);
addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 09/12] bpftool: Display missed count for kprobe_multi link
2023-08-28 7:55 ` [PATCH bpf-next 09/12] bpftool: Display missed count for kprobe_multi link Jiri Olsa
@ 2023-08-29 16:40 ` Quentin Monnet
2023-08-30 10:46 ` Jiri Olsa
0 siblings, 1 reply; 34+ messages in thread
From: Quentin Monnet @ 2023-08-29 16:40 UTC (permalink / raw)
To: Jiri Olsa, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
On 28/08/2023 08:55, Jiri Olsa wrote:
> Adding 'missed' field to display missed counts for kprobes
> attached by kprobe multi link, like:
>
> # bpftool link
> 5: kprobe_multi prog 76
> kprobe.multi func_cnt 1 missed 1
> addr func [module]
> ffffffffa039c030 fp3_test [fprobe_test]
>
> # bpftool link -jp
> [{
> "id": 5,
> "type": "kprobe_multi",
> "prog_id": 76,
> "retprobe": false,
> "func_cnt": 1,
> "missed": 1,
> "funcs": [{
> "addr": 18446744072102723632,
> "func": "fp3_test",
> "module": "fprobe_test"
> }
> ]
> }
> ]
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> tools/bpf/bpftool/link.c | 5 ++++-
> 1 file changed, 4 insertions(+), 1 deletion(-)
>
> diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
> index 0b214f6ab5c8..7387e51a5e5c 100644
> --- a/tools/bpf/bpftool/link.c
> +++ b/tools/bpf/bpftool/link.c
> @@ -265,6 +265,7 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
> jsonw_bool_field(json_wtr, "retprobe",
> info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
> jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
> + jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
> jsonw_name(json_wtr, "funcs");
> jsonw_start_array(json_wtr);
> addrs = u64_to_ptr(info->kprobe_multi.addrs);
> @@ -640,7 +641,9 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
> printf("\n\tkretprobe.multi ");
> else
> printf("\n\tkprobe.multi ");
> - printf("func_cnt %u ", info->kprobe_multi.count);
> + printf("func_cnt %u", info->kprobe_multi.count);
> + if (info->kprobe_multi.missed)
> + printf(" missed %llu", info->kprobe_multi.missed);
Nit: If you respin, please conserve the double space at the beginning of
" missed %llu", to visually help separate from the previous field in
the plain output.
Looks good otherwise, thanks!
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
> addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
> qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
>
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 09/12] bpftool: Display missed count for kprobe_multi link
2023-08-29 16:40 ` Quentin Monnet
@ 2023-08-30 10:46 ` Jiri Olsa
0 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-08-30 10:46 UTC (permalink / raw)
To: Quentin Monnet
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
On Tue, Aug 29, 2023 at 05:40:57PM +0100, Quentin Monnet wrote:
> On 28/08/2023 08:55, Jiri Olsa wrote:
> > Adding 'missed' field to display missed counts for kprobes
> > attached by kprobe multi link, like:
> >
> > # bpftool link
> > 5: kprobe_multi prog 76
> > kprobe.multi func_cnt 1 missed 1
> > addr func [module]
> > ffffffffa039c030 fp3_test [fprobe_test]
> >
> > # bpftool link -jp
> > [{
> > "id": 5,
> > "type": "kprobe_multi",
> > "prog_id": 76,
> > "retprobe": false,
> > "func_cnt": 1,
> > "missed": 1,
> > "funcs": [{
> > "addr": 18446744072102723632,
> > "func": "fp3_test",
> > "module": "fprobe_test"
> > }
> > ]
> > }
> > ]
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > tools/bpf/bpftool/link.c | 5 ++++-
> > 1 file changed, 4 insertions(+), 1 deletion(-)
> >
> > diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
> > index 0b214f6ab5c8..7387e51a5e5c 100644
> > --- a/tools/bpf/bpftool/link.c
> > +++ b/tools/bpf/bpftool/link.c
> > @@ -265,6 +265,7 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
> > jsonw_bool_field(json_wtr, "retprobe",
> > info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
> > jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
> > + jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
> > jsonw_name(json_wtr, "funcs");
> > jsonw_start_array(json_wtr);
> > addrs = u64_to_ptr(info->kprobe_multi.addrs);
> > @@ -640,7 +641,9 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
> > printf("\n\tkretprobe.multi ");
> > else
> > printf("\n\tkprobe.multi ");
> > - printf("func_cnt %u ", info->kprobe_multi.count);
> > + printf("func_cnt %u", info->kprobe_multi.count);
> > + if (info->kprobe_multi.missed)
> > + printf(" missed %llu", info->kprobe_multi.missed);
>
> Nit: If you respin, please conserve the double space at the beginning of
> " missed %llu", to visually help separate from the previous field in
> the plain output.
right, will fix that
>
> Looks good otherwise, thanks!
>
> Reviewed-by: Quentin Monnet <quentin@isovalent.com>
thanks,
jirka
>
> > addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
> > qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
> >
>
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 10/12] bpftool: Display missed count for kprobe perf link
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (8 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 09/12] bpftool: Display missed count for kprobe_multi link Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-08-29 16:42 ` Quentin Monnet
2023-08-28 7:55 ` [PATCH bpf-next 11/12] selftests/bpf: Add test missed counts of perf event link kprobe Jiri Olsa
` (2 subsequent siblings)
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Adding 'missed' field to display missed counts for kprobes
attached by perf event link, like:
# bpftool link
5: perf_event prog 82
kprobe ffffffff815203e0 ksys_write
6: perf_event prog 83
kprobe ffffffff811d1e50 scheduler_tick missed 682217
# bpftool link -jp
[{
"id": 5,
"type": "perf_event",
"prog_id": 82,
"retprobe": false,
"addr": 18446744071584220128,
"func": "ksys_write",
"offset": 0,
"missed": 0
},{
"id": 6,
"type": "perf_event",
"prog_id": 83,
"retprobe": false,
"addr": 18446744071580753488,
"func": "scheduler_tick",
"offset": 0,
"missed": 693469
}
]
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
tools/bpf/bpftool/link.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 7387e51a5e5c..d65129318f82 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -302,6 +302,7 @@ show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_string_field(wtr, "func",
u64_to_ptr(info->perf_event.kprobe.func_name));
jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
+ jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
}
static void
@@ -686,6 +687,8 @@ static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
printf("%s", buf);
if (info->perf_event.kprobe.offset)
printf("+%#x", info->perf_event.kprobe.offset);
+ if (info->perf_event.kprobe.missed)
+ printf(" missed %llu", info->perf_event.kprobe.missed);
printf(" ");
}
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 10/12] bpftool: Display missed count for kprobe perf link
2023-08-28 7:55 ` [PATCH bpf-next 10/12] bpftool: Display missed count for kprobe perf link Jiri Olsa
@ 2023-08-29 16:42 ` Quentin Monnet
2023-08-30 16:01 ` Jiri Olsa
0 siblings, 1 reply; 34+ messages in thread
From: Quentin Monnet @ 2023-08-29 16:42 UTC (permalink / raw)
To: Jiri Olsa, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
On 28/08/2023 08:55, Jiri Olsa wrote:
> Adding 'missed' field to display missed counts for kprobes
> attached by perf event link, like:
>
> # bpftool link
> 5: perf_event prog 82
> kprobe ffffffff815203e0 ksys_write
> 6: perf_event prog 83
> kprobe ffffffff811d1e50 scheduler_tick missed 682217
>
> # bpftool link -jp
> [{
> "id": 5,
> "type": "perf_event",
> "prog_id": 82,
> "retprobe": false,
> "addr": 18446744071584220128,
> "func": "ksys_write",
> "offset": 0,
> "missed": 0
> },{
> "id": 6,
> "type": "perf_event",
> "prog_id": 83,
> "retprobe": false,
> "addr": 18446744071580753488,
> "func": "scheduler_tick",
> "offset": 0,
> "missed": 693469
> }
> ]
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
> tools/bpf/bpftool/link.c | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
> index 7387e51a5e5c..d65129318f82 100644
> --- a/tools/bpf/bpftool/link.c
> +++ b/tools/bpf/bpftool/link.c
> @@ -302,6 +302,7 @@ show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
> jsonw_string_field(wtr, "func",
> u64_to_ptr(info->perf_event.kprobe.func_name));
> jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
> + jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
> }
>
> static void
> @@ -686,6 +687,8 @@ static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
> printf("%s", buf);
> if (info->perf_event.kprobe.offset)
> printf("+%#x", info->perf_event.kprobe.offset);
> + if (info->perf_event.kprobe.missed)
> + printf(" missed %llu", info->perf_event.kprobe.missed);
> printf(" ");
> }
>
Same comment as for the previous patch: double space between fields in
plain output please. Thanks!
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 10/12] bpftool: Display missed count for kprobe perf link
2023-08-29 16:42 ` Quentin Monnet
@ 2023-08-30 16:01 ` Jiri Olsa
0 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-08-30 16:01 UTC (permalink / raw)
To: Quentin Monnet
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
On Tue, Aug 29, 2023 at 05:42:49PM +0100, Quentin Monnet wrote:
> On 28/08/2023 08:55, Jiri Olsa wrote:
> > Adding 'missed' field to display missed counts for kprobes
> > attached by perf event link, like:
> >
> > # bpftool link
> > 5: perf_event prog 82
> > kprobe ffffffff815203e0 ksys_write
> > 6: perf_event prog 83
> > kprobe ffffffff811d1e50 scheduler_tick missed 682217
> >
> > # bpftool link -jp
> > [{
> > "id": 5,
> > "type": "perf_event",
> > "prog_id": 82,
> > "retprobe": false,
> > "addr": 18446744071584220128,
> > "func": "ksys_write",
> > "offset": 0,
> > "missed": 0
> > },{
> > "id": 6,
> > "type": "perf_event",
> > "prog_id": 83,
> > "retprobe": false,
> > "addr": 18446744071580753488,
> > "func": "scheduler_tick",
> > "offset": 0,
> > "missed": 693469
> > }
> > ]
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> > tools/bpf/bpftool/link.c | 3 +++
> > 1 file changed, 3 insertions(+)
> >
> > diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
> > index 7387e51a5e5c..d65129318f82 100644
> > --- a/tools/bpf/bpftool/link.c
> > +++ b/tools/bpf/bpftool/link.c
> > @@ -302,6 +302,7 @@ show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
> > jsonw_string_field(wtr, "func",
> > u64_to_ptr(info->perf_event.kprobe.func_name));
> > jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
> > + jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
> > }
> >
> > static void
> > @@ -686,6 +687,8 @@ static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
> > printf("%s", buf);
> > if (info->perf_event.kprobe.offset)
> > printf("+%#x", info->perf_event.kprobe.offset);
> > + if (info->perf_event.kprobe.missed)
> > + printf(" missed %llu", info->perf_event.kprobe.missed);
> > printf(" ");
> > }
> >
>
> Same comment as for the previous patch: double space between fields in
> plain output please. Thanks!
>
> Reviewed-by: Quentin Monnet <quentin@isovalent.com>
will fix, thanks
jirka
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 11/12] selftests/bpf: Add test missed counts of perf event link kprobe
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (9 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 10/12] bpftool: Display missed count for kprobe perf link Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-09-05 3:14 ` Hou Tao
2023-08-28 7:55 ` [PATCH bpf-next 12/12] selftests/bpf: Add test recursion stats " Jiri Olsa
2023-09-05 3:30 ` [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Hou Tao
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Adding test that puts kprobe on bpf_fentry_test1 that calls
bpf_kfunc_common_test kfunc, which has also kprobe on.
The latter won't get triggered due to kprobe recursion check
and kprobe missed counter is incremented.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
.../selftests/bpf/bpf_testmod/bpf_testmod.c | 5 ++
.../bpf/bpf_testmod/bpf_testmod_kfunc.h | 2 +
.../testing/selftests/bpf/prog_tests/missed.c | 47 +++++++++++++++++++
.../selftests/bpf/progs/missed_kprobe.c | 30 ++++++++++++
4 files changed, 84 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/missed.c
create mode 100644 tools/testing/selftests/bpf/progs/missed_kprobe.c
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index cefc5dd72573..a5e246f7b202 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -138,6 +138,10 @@ __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
it->cnt = 0;
}
+__bpf_kfunc void bpf_kfunc_common_test(void)
+{
+}
+
struct bpf_testmod_btf_type_tag_1 {
int a;
};
@@ -343,6 +347,7 @@ BTF_SET8_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_kfunc_common_test)
BTF_SET8_END(bpf_testmod_common_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
index f5c5b1375c24..7c664dd61059 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
@@ -104,4 +104,6 @@ void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p);
void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p);
void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p);
void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len);
+
+void bpf_kfunc_common_test(void) __ksym;
#endif /* _BPF_TESTMOD_KFUNC_H */
diff --git a/tools/testing/selftests/bpf/prog_tests/missed.c b/tools/testing/selftests/bpf/prog_tests/missed.c
new file mode 100644
index 000000000000..fc674258c81f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/missed.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "missed_kprobe.skel.h"
+
+/*
+ * Putting kprobe on bpf_fentry_test1 that calls bpf_kfunc_common_test
+ * kfunc, which has also kprobe on. The latter won't get triggered due
+ * to kprobe recursion check and kprobe missed counter is incremented.
+ */
+static void test_missed_perf_kprobe(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct bpf_link_info info = {};
+ struct missed_kprobe *skel;
+ __u32 len = sizeof(info);
+ int err, prog_fd;
+
+ skel = missed_kprobe__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "missed_kprobe__open_and_load"))
+ goto cleanup;
+
+ err = missed_kprobe__attach(skel);
+ if (!ASSERT_OK(err, "missed_kprobe__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ err = bpf_link_get_info_by_fd(bpf_link__fd(skel->links.test2), &info, &len);
+ if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
+ goto cleanup;
+
+ ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "info.type");
+ ASSERT_EQ(info.perf_event.type, BPF_PERF_EVENT_KPROBE, "info.perf_event.type");
+ ASSERT_EQ(info.perf_event.kprobe.missed, 1, "info.perf_event.kprobe.missed");
+
+cleanup:
+ missed_kprobe__destroy(skel);
+}
+
+void serial_test_missed(void)
+{
+ if (test__start_subtest("perf_kprobe"))
+ test_missed_perf_kprobe();
+}
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe.c b/tools/testing/selftests/bpf/progs/missed_kprobe.c
new file mode 100644
index 000000000000..7f9ef701f5de
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 11/12] selftests/bpf: Add test missed counts of perf event link kprobe
2023-08-28 7:55 ` [PATCH bpf-next 11/12] selftests/bpf: Add test missed counts of perf event link kprobe Jiri Olsa
@ 2023-09-05 3:14 ` Hou Tao
0 siblings, 0 replies; 34+ messages in thread
From: Hou Tao @ 2023-09-05 3:14 UTC (permalink / raw)
To: Jiri Olsa
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> Adding test that puts kprobe on bpf_fentry_test1 that calls
> bpf_kfunc_common_test kfunc, which has also kprobe on.
>
> The latter won't get triggered due to kprobe recursion check
> and kprobe missed counter is incremented.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Hou Tao <houtao1@huawei.com>
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH bpf-next 12/12] selftests/bpf: Add test recursion stats of perf event link kprobe
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (10 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 11/12] selftests/bpf: Add test missed counts of perf event link kprobe Jiri Olsa
@ 2023-08-28 7:55 ` Jiri Olsa
2023-09-05 3:23 ` Hou Tao
2023-09-05 3:30 ` [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Hou Tao
12 siblings, 1 reply; 34+ messages in thread
From: Jiri Olsa @ 2023-08-28 7:55 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Hou Tao, Daniel Xu
Adding selftest that puts kprobe.multi on bpf_fentry_test1 that
calls bpf_kfunc_common_test kfunc which has 3 perf event kprobes
and 1 kprobe.multi attached.
Because fprobe (kprobe.multi attach layear) does not have strict
recursion check the kprobe's bpf_prog_active check is hit for test2-5.
Disabling this test for arm64, because there's no fprobe support yet.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 +
.../testing/selftests/bpf/prog_tests/missed.c | 50 +++++++++++++++++++
.../bpf/progs/missed_kprobe_recursion.c | 48 ++++++++++++++++++
3 files changed, 99 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 7f768d335698..3f2187c049db 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -15,3 +15,4 @@ fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_ma
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
+missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)
diff --git a/tools/testing/selftests/bpf/prog_tests/missed.c b/tools/testing/selftests/bpf/prog_tests/missed.c
index fc674258c81f..c65c34c84a28 100644
--- a/tools/testing/selftests/bpf/prog_tests/missed.c
+++ b/tools/testing/selftests/bpf/prog_tests/missed.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "missed_kprobe.skel.h"
+#include "missed_kprobe_recursion.skel.h"
/*
* Putting kprobe on bpf_fentry_test1 that calls bpf_kfunc_common_test
@@ -40,8 +41,57 @@ static void test_missed_perf_kprobe(void)
missed_kprobe__destroy(skel);
}
+static __u64 get_count(int fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+ int err;
+
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
+ return (__u64) -1;
+ return info.recursion_misses;
+}
+
+/*
+ * Putting kprobe.multi on bpf_fentry_test1 that calls bpf_kfunc_common_test
+ * kfunc which has 3 perf event kprobes and 1 kprobe.multi attached.
+ *
+ * Because fprobe (kprobe.multi attach layear) does not have strict recursion
+ * check the kprobe's bpf_prog_active check is hit for test2-5.
+ */
+static void test_missed_kprobe_recursion(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct missed_kprobe_recursion *skel;
+ int err, prog_fd;
+
+ skel = missed_kprobe_recursion__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "missed_kprobe_recursion__open_and_load"))
+ goto cleanup;
+
+ err = missed_kprobe_recursion__attach(skel);
+ if (!ASSERT_OK(err, "missed_kprobe_recursion__attach"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.trigger);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ ASSERT_EQ(get_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
+ ASSERT_EQ(get_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
+ ASSERT_EQ(get_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
+ ASSERT_EQ(get_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses");
+
+cleanup:
+ missed_kprobe_recursion__destroy(skel);
+}
+
void serial_test_missed(void)
{
if (test__start_subtest("perf_kprobe"))
test_missed_perf_kprobe();
+ if (test__start_subtest("kprobe_recursion"))
+ test_missed_kprobe_recursion();
}
diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
new file mode 100644
index 000000000000..8ea71cbd6c45
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+/*
+ * No tests in here, just to trigger 'bpf_fentry_test*'
+ * through tracing test_run
+ */
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(trigger)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_fentry_test1")
+int test1(struct pt_regs *ctx)
+{
+ bpf_kfunc_common_test();
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test3(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe/bpf_kfunc_common_test")
+int test4(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kprobe.multi/bpf_kfunc_common_test")
+int test5(struct pt_regs *ctx)
+{
+ return 0;
+}
--
2.41.0
^ permalink raw reply related [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 12/12] selftests/bpf: Add test recursion stats of perf event link kprobe
2023-08-28 7:55 ` [PATCH bpf-next 12/12] selftests/bpf: Add test recursion stats " Jiri Olsa
@ 2023-09-05 3:23 ` Hou Tao
0 siblings, 0 replies; 34+ messages in thread
From: Hou Tao @ 2023-09-05 3:23 UTC (permalink / raw)
To: Jiri Olsa
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> Adding selftest that puts kprobe.multi on bpf_fentry_test1 that
> calls bpf_kfunc_common_test kfunc which has 3 perf event kprobes
> and 1 kprobe.multi attached.
>
> Because fprobe (kprobe.multi attach layear) does not have strict
> recursion check the kprobe's bpf_prog_active check is hit for test2-5.
>
> Disabling this test for arm64, because there's no fprobe support yet.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Hou Tao <houtao1@huawei.com>
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes
2023-08-28 7:55 [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Jiri Olsa
` (11 preceding siblings ...)
2023-08-28 7:55 ` [PATCH bpf-next 12/12] selftests/bpf: Add test recursion stats " Jiri Olsa
@ 2023-09-05 3:30 ` Hou Tao
2023-09-05 7:20 ` Jiri Olsa
12 siblings, 1 reply; 34+ messages in thread
From: Hou Tao @ 2023-09-05 3:30 UTC (permalink / raw)
To: Jiri Olsa
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Hi,
On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> hi,
> at the moment we can't retrieve the number of missed kprobe
> executions and subsequent execution of BPF programs.
>
> This patchset adds:
> - counting of missed execution on attach layer for:
> . kprobes attached through perf link (kprobe/ftrace)
> . kprobes attached through kprobe.multi link (fprobe)
> - counting of recursion_misses for BPF kprobe programs
Because trace_call_bpf() is used for both kprobe and trace-point bpf
program, so I think it is better to add one selftest for missed counter
for trace-point program as well.
> - counting runtime stats (kernel.bpf_stats_enabled=1) for BPF programs
> executed through bpf_prog_run_array - kprobes, perf events, trace
> syscall probes
>
>
> It's still technically possible to create kprobe without perf link (using
> SET_BPF perf ioctl) in which case we don't have a way to retrieve the kprobe's
> 'missed' count. However both libbpf and cilium/ebpf libraries use perf link
> if it's available, and for old kernels without perf link support we can use
> BPF program to retrieve the kprobe missed count.
>
> Also available at:
> https://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf.git
> bpf/missed_stats
>
> thanks,
> jirka
>
>
> ---
> Jiri Olsa (12):
> bpf: Move update_prog_stats to syscall object
> bpf: Move bpf_prog_start_time to linux/filter.h
> bpf: Count stats for kprobe_multi programs
> bpf: Add missed value to kprobe_multi link info
> bpf: Add missed value to kprobe perf link info
> bpf: Count missed stats in trace_call_bpf
> bpf: Move bpf_prog_run_array down in the header file
> bpf: Count run stats in bpf_prog_run_array
> bpftool: Display missed count for kprobe_multi link
> bpftool: Display missed count for kprobe perf link
> selftests/bpf: Add test missed counts of perf event link kprobe
> elftests/bpf: Add test recursion stats of perf event link kprobe
>
> include/linux/bpf.h | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------
> include/linux/trace_events.h | 6 ++++--
> include/uapi/linux/bpf.h | 2 ++
> kernel/bpf/syscall.c | 36 +++++++++++++++++++++++++------
> kernel/bpf/trampoline.c | 45 +++++----------------------------------
> kernel/trace/bpf_trace.c | 17 ++++++++++++---
> kernel/trace/trace_kprobe.c | 5 ++++-
> tools/bpf/bpftool/link.c | 8 ++++++-
> tools/include/uapi/linux/bpf.h | 2 ++
> tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 +
> tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 5 +++++
> tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h | 2 ++
> tools/testing/selftests/bpf/prog_tests/missed.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> tools/testing/selftests/bpf/progs/missed_kprobe.c | 30 ++++++++++++++++++++++++++
> tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c | 48 +++++++++++++++++++++++++++++++++++++++++
> 15 files changed, 327 insertions(+), 83 deletions(-)
> create mode 100644 tools/testing/selftests/bpf/prog_tests/missed.c
> create mode 100644 tools/testing/selftests/bpf/progs/missed_kprobe.c
> create mode 100644 tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
>
> .
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes
2023-09-05 3:30 ` [PATCH bpf-next 00/12] bpf: Add missed stats for kprobes Hou Tao
@ 2023-09-05 7:20 ` Jiri Olsa
0 siblings, 0 replies; 34+ messages in thread
From: Jiri Olsa @ 2023-09-05 7:20 UTC (permalink / raw)
To: Hou Tao
Cc: bpf, Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
KP Singh, Stanislav Fomichev, Hao Luo, Daniel Xu,
Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On Tue, Sep 05, 2023 at 11:30:54AM +0800, Hou Tao wrote:
> Hi,
>
> On 8/28/2023 3:55 PM, Jiri Olsa wrote:
> > hi,
> > at the moment we can't retrieve the number of missed kprobe
> > executions and subsequent execution of BPF programs.
> >
> > This patchset adds:
> > - counting of missed execution on attach layer for:
> > . kprobes attached through perf link (kprobe/ftrace)
> > . kprobes attached through kprobe.multi link (fprobe)
> > - counting of recursion_misses for BPF kprobe programs
>
> Because trace_call_bpf() is used for both kprobe and trace-point bpf
> program, so I think it is better to add one selftest for missed counter
> for trace-point program as well.
ok, will try to add some
thanks,
jirka
> > - counting runtime stats (kernel.bpf_stats_enabled=1) for BPF programs
> > executed through bpf_prog_run_array - kprobes, perf events, trace
> > syscall probes
> >
> >
> > It's still technically possible to create kprobe without perf link (using
> > SET_BPF perf ioctl) in which case we don't have a way to retrieve the kprobe's
> > 'missed' count. However both libbpf and cilium/ebpf libraries use perf link
> > if it's available, and for old kernels without perf link support we can use
> > BPF program to retrieve the kprobe missed count.
> >
> > Also available at:
> > https://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf.git
> > bpf/missed_stats
> >
> > thanks,
> > jirka
> >
> >
> > ---
> > Jiri Olsa (12):
> > bpf: Move update_prog_stats to syscall object
> > bpf: Move bpf_prog_start_time to linux/filter.h
> > bpf: Count stats for kprobe_multi programs
> > bpf: Add missed value to kprobe_multi link info
> > bpf: Add missed value to kprobe perf link info
> > bpf: Count missed stats in trace_call_bpf
> > bpf: Move bpf_prog_run_array down in the header file
> > bpf: Count run stats in bpf_prog_run_array
> > bpftool: Display missed count for kprobe_multi link
> > bpftool: Display missed count for kprobe perf link
> > selftests/bpf: Add test missed counts of perf event link kprobe
> > elftests/bpf: Add test recursion stats of perf event link kprobe
> >
> > include/linux/bpf.h | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------------------------
> > include/linux/trace_events.h | 6 ++++--
> > include/uapi/linux/bpf.h | 2 ++
> > kernel/bpf/syscall.c | 36 +++++++++++++++++++++++++------
> > kernel/bpf/trampoline.c | 45 +++++----------------------------------
> > kernel/trace/bpf_trace.c | 17 ++++++++++++---
> > kernel/trace/trace_kprobe.c | 5 ++++-
> > tools/bpf/bpftool/link.c | 8 ++++++-
> > tools/include/uapi/linux/bpf.h | 2 ++
> > tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 +
> > tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 5 +++++
> > tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h | 2 ++
> > tools/testing/selftests/bpf/prog_tests/missed.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> > tools/testing/selftests/bpf/progs/missed_kprobe.c | 30 ++++++++++++++++++++++++++
> > tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c | 48 +++++++++++++++++++++++++++++++++++++++++
> > 15 files changed, 327 insertions(+), 83 deletions(-)
> > create mode 100644 tools/testing/selftests/bpf/prog_tests/missed.c
> > create mode 100644 tools/testing/selftests/bpf/progs/missed_kprobe.c
> > create mode 100644 tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c
> >
> > .
>
^ permalink raw reply [flat|nested] 34+ messages in thread