From: Alexei Starovoitov <alexei.starovoitov@gmail.com>
To: Anton Protopopov <aspsk@isovalent.com>
Cc: Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
John Fastabend <john.fastabend@gmail.com>,
Andrii Nakryiko <andrii@kernel.org>,
Martin KaFai Lau <martin.lau@linux.dev>,
Song Liu <song@kernel.org>, Yonghong Song <yhs@fb.com>,
KP Singh <kpsingh@kernel.org>,
Stanislav Fomichev <sdf@google.com>, Hao Luo <haoluo@google.com>,
Jiri Olsa <jolsa@kernel.org>,
bpf@vger.kernel.org
Subject: Re: [RFC v2 PATCH bpf-next 1/4] bpf: add percpu stats for bpf_map elements insertions/deletions
Date: Thu, 22 Jun 2023 13:11:58 -0700 [thread overview]
Message-ID: <20230622201158.s56vbdas5rcilwbd@macbook-pro-8.dhcp.thefacebook.com> (raw)
In-Reply-To: <20230622095330.1023453-2-aspsk@isovalent.com>
On Thu, Jun 22, 2023 at 09:53:27AM +0000, Anton Protopopov wrote:
> Add a generic percpu stats for bpf_map elements insertions/deletions in order
> to keep track of both, the current (approximate) number of elements in a map
> and per-cpu statistics on update/delete operations.
>
> To expose these stats a particular map implementation should initialize the
> counter and adjust it as needed using the 'bpf_map_*_elements_counter' helpers
> provided by this commit. The counter can be read by an iterator program.
>
> A bpf_map_sum_elements_counter kfunc was added to simplify getting the sum of
> the per-cpu values. If a map doesn't implement the counter, then it will always
> return 0.
>
> Signed-off-by: Anton Protopopov <aspsk@isovalent.com>
> ---
> include/linux/bpf.h | 30 +++++++++++++++++++++++++++
> kernel/bpf/map_iter.c | 48 ++++++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 77 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index f58895830ada..20292a096188 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -275,6 +275,7 @@ struct bpf_map {
> } owner;
> bool bypass_spec_v1;
> bool frozen; /* write-once; write-protected by freeze_mutex */
> + s64 __percpu *elements_count;
> };
>
> static inline const char *btf_field_type_name(enum btf_field_type type)
> @@ -2040,6 +2041,35 @@ bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
> }
> #endif
>
> +static inline int
> +bpf_map_init_elements_counter(struct bpf_map *map)
> +{
> + size_t size = sizeof(*map->elements_count), align = size;
> + gfp_t flags = GFP_USER | __GFP_NOWARN;
> +
> + map->elements_count = bpf_map_alloc_percpu(map, size, align, flags);
> + if (!map->elements_count)
> + return -ENOMEM;
> +
> + return 0;
> +}
> +
> +static inline void
> +bpf_map_free_elements_counter(struct bpf_map *map)
> +{
> + free_percpu(map->elements_count);
> +}
> +
> +static inline void bpf_map_inc_elements_counter(struct bpf_map *map)
bpf_map_inc_elem_count() to match existing inc_elem_count() ?
> +{
> + this_cpu_inc(*map->elements_count);
> +}
> +
> +static inline void bpf_map_dec_elements_counter(struct bpf_map *map)
> +{
> + this_cpu_dec(*map->elements_count);
> +}
> +
> extern int sysctl_unprivileged_bpf_disabled;
>
> static inline bool bpf_allow_ptr_leaks(void)
> diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c
> index b0fa190b0979..26ca00dde962 100644
> --- a/kernel/bpf/map_iter.c
> +++ b/kernel/bpf/map_iter.c
> @@ -93,7 +93,7 @@ static struct bpf_iter_reg bpf_map_reg_info = {
> .ctx_arg_info_size = 1,
> .ctx_arg_info = {
> { offsetof(struct bpf_iter__bpf_map, map),
> - PTR_TO_BTF_ID_OR_NULL },
> + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
this and below should be in separate patch.
> },
> .seq_info = &bpf_map_seq_info,
> };
> @@ -193,3 +193,49 @@ static int __init bpf_map_iter_init(void)
> }
>
> late_initcall(bpf_map_iter_init);
> +
> +__diag_push();
> +__diag_ignore_all("-Wmissing-prototypes",
> + "Global functions as their definitions will be in vmlinux BTF");
> +
> +__bpf_kfunc s64 bpf_map_sum_elements_counter(struct bpf_map *map)
> +{
> + s64 *pcount;
> + s64 ret = 0;
> + int cpu;
> +
> + if (!map || !map->elements_count)
> + return 0;
> +
> + for_each_possible_cpu(cpu) {
> + pcount = per_cpu_ptr(map->elements_count, cpu);
> + ret += READ_ONCE(*pcount);
> + }
> + return ret;
> +}
> +
> +__diag_pop();
> +
> +BTF_SET8_START(bpf_map_iter_kfunc_ids)
> +BTF_ID_FLAGS(func, bpf_map_sum_elements_counter, KF_TRUSTED_ARGS)
> +BTF_SET8_END(bpf_map_iter_kfunc_ids)
> +
> +static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id)
> +{
> + if (btf_id_set8_contains(&bpf_map_iter_kfunc_ids, kfunc_id) &&
> + prog->expected_attach_type != BPF_TRACE_ITER)
why restrict to trace_iter?
> + return -EACCES;
> + return 0;
> +}
> +
> +static const struct btf_kfunc_id_set bpf_map_iter_kfunc_set = {
> + .owner = THIS_MODULE,
> + .set = &bpf_map_iter_kfunc_ids,
> + .filter = tracing_iter_filter,
> +};
> +
> +static int init_subsystem(void)
> +{
> + return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_map_iter_kfunc_set);
> +}
> +late_initcall(init_subsystem);
> --
> 2.34.1
>
next prev parent reply other threads:[~2023-06-22 20:12 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-22 9:53 [RFC v2 PATCH bpf-next 0/4] bpf: add percpu stats for bpf_map Anton Protopopov
2023-06-22 9:53 ` [RFC v2 PATCH bpf-next 1/4] bpf: add percpu stats for bpf_map elements insertions/deletions Anton Protopopov
2023-06-22 20:11 ` Alexei Starovoitov [this message]
2023-06-23 12:47 ` Anton Protopopov
2023-06-23 10:51 ` Daniel Borkmann
2023-06-23 12:35 ` Anton Protopopov
2023-06-22 9:53 ` [RFC v2 PATCH bpf-next 2/4] bpf: populate the per-cpu insertions/deletions counters for hashmaps Anton Protopopov
2023-06-22 20:18 ` Alexei Starovoitov
2023-06-22 9:53 ` [RFC v2 PATCH bpf-next 3/4] bpf: make preloaded map iterators to display map elements count Anton Protopopov
2023-06-22 9:58 ` [RFC v2 PATCH bpf-next 4/4] selftests/bpf: test map percpu stats Anton Protopopov
2023-06-22 20:20 ` Alexei Starovoitov
2023-06-26 14:37 ` Anton Protopopov
2023-06-23 9:53 ` [RFC v2 PATCH bpf-next 0/4] bpf: add percpu stats for bpf_map Daniel Borkmann
2023-06-24 0:17 ` Alexei Starovoitov
2023-06-26 8:50 ` Daniel Borkmann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230622201158.s56vbdas5rcilwbd@macbook-pro-8.dhcp.thefacebook.com \
--to=alexei.starovoitov@gmail.com \
--cc=andrii@kernel.org \
--cc=aspsk@isovalent.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=haoluo@google.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=martin.lau@linux.dev \
--cc=sdf@google.com \
--cc=song@kernel.org \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).