linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH] libbpf: Support setting map max_entries at runtime
@ 2020-08-31 22:49 Barret Rhoden
  2020-09-03  5:28 ` Andrii Nakryiko
  0 siblings, 1 reply; 2+ messages in thread
From: Barret Rhoden @ 2020-08-31 22:49 UTC (permalink / raw)
  To: Alexei Starovoitov, Daniel Borkmann, Martin KaFai Lau, Song Liu,
	Yonghong Song, Andrii Nakryiko, John Fastabend, KP Singh
  Cc: netdev, bpf, linux-kernel

The max_entries for a BPF map may depend on runtime parameters.
Currently, we need to know the maximum value at BPF compile time.  For
instance, if you want an array map with NR_CPUS entries, you would hard
code your architecture's largest value for CONFIG_NR_CPUS.  This wastes
memory at runtime.

For the NR_CPU case, one could use a PERCPU map type, but those maps are
limited in functionality.  For instance, BPF programs can only access
their own PERCPU part of the map, and the maps are not mmappable.

This commit allows the use of sentinel values in BPF map definitions,
which libbpf patches at runtime.

For starters, we support NUM_POSSIBLE_CPUS: e.g.

struct {
        __uint(type, BPF_MAP_TYPE_ARRAY);
        __uint(max_entries, NUM_POSSIBLE_CPUS);
        __type(key, u32);
        __type(value, struct cpu_data);
} cpu_blobs SEC(".maps");

This can be extended to other runtime dependent values, such as the
maximum number of threads (/proc/sys/kernel/threads-max).

Signed-off-by: Barret Rhoden <brho@google.com>
---
 tools/lib/bpf/bpf_helpers.h |  4 ++++
 tools/lib/bpf/libbpf.c      | 40 ++++++++++++++++++++++++++++++-------
 tools/lib/bpf/libbpf.h      |  4 ++++
 3 files changed, 41 insertions(+), 7 deletions(-)

diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index f67dce2af802..38b431d85ac6 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -74,6 +74,10 @@ enum libbpf_tristate {
 	TRI_MODULE = 2,
 };
 
+enum libbpf_max_entries {
+	NUM_POSSIBLE_CPUS = (unsigned int)-1,
+};
+
 #define __kconfig __attribute__((section(".kconfig")))
 
 #endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 11e4725b8b1c..7d0e9792e015 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1868,36 +1868,55 @@ resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
  * type definition, while using only sizeof(void *) space in ELF data section.
  */
-static bool get_map_field_int(const char *map_name, const struct btf *btf,
-			      const struct btf_member *m, __u32 *res)
+static struct btf_array *get_map_field_arr_info(const char *map_name,
+						const struct btf *btf,
+						const struct btf_member *m)
 {
 	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
 	const char *name = btf__name_by_offset(btf, m->name_off);
-	const struct btf_array *arr_info;
 	const struct btf_type *arr_t;
 
 	if (!btf_is_ptr(t)) {
 		pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
 			map_name, name, btf_kind(t));
-		return false;
+		return NULL;
 	}
 
 	arr_t = btf__type_by_id(btf, t->type);
 	if (!arr_t) {
 		pr_warn("map '%s': attr '%s': type [%u] not found.\n",
 			map_name, name, t->type);
-		return false;
+		return NULL;
 	}
 	if (!btf_is_array(arr_t)) {
 		pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
 			map_name, name, btf_kind(arr_t));
-		return false;
+		return NULL;
 	}
-	arr_info = btf_array(arr_t);
+	return btf_array(arr_t);
+}
+
+static bool get_map_field_int(const char *map_name, const struct btf *btf,
+			      const struct btf_member *m, __u32 *res)
+{
+	const struct btf_array *arr_info;
+
+	arr_info = get_map_field_arr_info(map_name, btf, m);
+	if (arr_info == NULL)
+		return false;
 	*res = arr_info->nelems;
 	return true;
 }
 
+static void set_map_field_int(const char *map_name, const struct btf *btf,
+			      const struct btf_member *m, __u32 val)
+{
+	struct btf_array *arr_info;
+
+	arr_info = get_map_field_arr_info(map_name, btf, m);
+	arr_info->nelems = val;
+}
+
 static int build_map_pin_path(struct bpf_map *map, const char *path)
 {
 	char buf[PATH_MAX];
@@ -1951,6 +1970,13 @@ static int parse_btf_map_def(struct bpf_object *obj,
 				return -EINVAL;
 			pr_debug("map '%s': found max_entries = %u.\n",
 				 map->name, map->def.max_entries);
+			if (map->def.max_entries == NUM_POSSIBLE_CPUS) {
+				map->def.max_entries = libbpf_num_possible_cpus();
+				set_map_field_int(map->name, obj->btf, m,
+						  map->def.max_entries);
+				pr_debug("map '%s': adjusting max_entries = %u.\n",
+					 map->name, map->def.max_entries);
+			}
 		} else if (strcmp(name, "map_flags") == 0) {
 			if (!get_map_field_int(map->name, obj->btf, m,
 					       &map->def.map_flags))
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 334437af3014..42cba5bb1b04 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -717,6 +717,10 @@ enum libbpf_tristate {
 	TRI_MODULE = 2,
 };
 
+enum libbpf_max_entries {
+	NUM_POSSIBLE_CPUS = -1,
+};
+
 #ifdef __cplusplus
 } /* extern "C" */
 #endif
-- 
2.28.0.402.g5ffc5be6b7-goog


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [RFC PATCH] libbpf: Support setting map max_entries at runtime
  2020-08-31 22:49 [RFC PATCH] libbpf: Support setting map max_entries at runtime Barret Rhoden
@ 2020-09-03  5:28 ` Andrii Nakryiko
  0 siblings, 0 replies; 2+ messages in thread
From: Andrii Nakryiko @ 2020-09-03  5:28 UTC (permalink / raw)
  To: Barret Rhoden
  Cc: Alexei Starovoitov, Daniel Borkmann, Martin KaFai Lau, Song Liu,
	Yonghong Song, Andrii Nakryiko, John Fastabend, KP Singh,
	Networking, bpf, open list

On Mon, Aug 31, 2020 at 4:03 PM Barret Rhoden <brho@google.com> wrote:
>
> The max_entries for a BPF map may depend on runtime parameters.
> Currently, we need to know the maximum value at BPF compile time.  For
> instance, if you want an array map with NR_CPUS entries, you would hard
> code your architecture's largest value for CONFIG_NR_CPUS.  This wastes
> memory at runtime.
>
> For the NR_CPU case, one could use a PERCPU map type, but those maps are
> limited in functionality.  For instance, BPF programs can only access
> their own PERCPU part of the map, and the maps are not mmappable.
>
> This commit allows the use of sentinel values in BPF map definitions,
> which libbpf patches at runtime.
>
> For starters, we support NUM_POSSIBLE_CPUS: e.g.
>
> struct {
>         __uint(type, BPF_MAP_TYPE_ARRAY);
>         __uint(max_entries, NUM_POSSIBLE_CPUS);
>         __type(key, u32);
>         __type(value, struct cpu_data);
> } cpu_blobs SEC(".maps");
>
> This can be extended to other runtime dependent values, such as the
> maximum number of threads (/proc/sys/kernel/threads-max).
>
> Signed-off-by: Barret Rhoden <brho@google.com>
> ---

libbpf provides bpf_map__set_max_entries() API exactly for such use
cases, please use that.

>  tools/lib/bpf/bpf_helpers.h |  4 ++++
>  tools/lib/bpf/libbpf.c      | 40 ++++++++++++++++++++++++++++++-------
>  tools/lib/bpf/libbpf.h      |  4 ++++
>  3 files changed, 41 insertions(+), 7 deletions(-)
>

[...]

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-09-03  5:29 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-31 22:49 [RFC PATCH] libbpf: Support setting map max_entries at runtime Barret Rhoden
2020-09-03  5:28 ` Andrii Nakryiko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).