stable.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 bpf 1/2] bpf: fix map leak in HASH_OF_MAPS map
@ 2020-07-29  4:09 Andrii Nakryiko
  2020-07-29  4:09 ` [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks Andrii Nakryiko
  2020-07-29 23:44 ` [PATCH v4 bpf 1/2] bpf: fix map leak in HASH_OF_MAPS map Daniel Borkmann
  0 siblings, 2 replies; 7+ messages in thread
From: Andrii Nakryiko @ 2020-07-29  4:09 UTC (permalink / raw)
  To: bpf, netdev, ast, daniel
  Cc: andrii.nakryiko, kernel-team, Andrii Nakryiko, Song Liu, stable

Fix HASH_OF_MAPS bug of not putting inner map pointer on bpf_map_elem_update()
operation. This is due to per-cpu extra_elems optimization, which bypassed
free_htab_elem() logic doing proper clean ups. Make sure that inner map is put
properly in optimized case as well.

Fixes: 8c290e60fa2a ("bpf: fix hashmap extra_elems logic")
Acked-by: Song Liu <songliubraving@fb.com>
Cc: <stable@vger.kernel.org> # v4.14+
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
---
 kernel/bpf/hashtab.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index b4b288a3c3c9..b32cc8ce8ff6 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head)
 	htab_elem_free(htab, l);
 }
 
-static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
 {
 	struct bpf_map *map = &htab->map;
+	void *ptr;
 
 	if (map->ops->map_fd_put_ptr) {
-		void *ptr = fd_htab_map_get_ptr(map, l);
-
+		ptr = fd_htab_map_get_ptr(map, l);
 		map->ops->map_fd_put_ptr(ptr);
 	}
+}
+
+static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+{
+	htab_put_fd_value(htab, l);
 
 	if (htab_is_prealloc(htab)) {
 		__pcpu_freelist_push(&htab->freelist, &l->fnode);
@@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 			 */
 			pl_new = this_cpu_ptr(htab->extra_elems);
 			l_new = *pl_new;
+			htab_put_fd_value(htab, old_elem);
 			*pl_new = old_elem;
 		} else {
 			struct pcpu_freelist_node *l;
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks
  2020-07-29  4:09 [PATCH v4 bpf 1/2] bpf: fix map leak in HASH_OF_MAPS map Andrii Nakryiko
@ 2020-07-29  4:09 ` Andrii Nakryiko
  2020-07-29 14:29   ` Jakub Sitnicki
  2020-07-29 23:44 ` [PATCH v4 bpf 1/2] bpf: fix map leak in HASH_OF_MAPS map Daniel Borkmann
  1 sibling, 1 reply; 7+ messages in thread
From: Andrii Nakryiko @ 2020-07-29  4:09 UTC (permalink / raw)
  To: bpf, netdev, ast, daniel
  Cc: andrii.nakryiko, kernel-team, Andrii Nakryiko, Song Liu, stable

Add test validating that all inner maps are released properly after skeleton
is destroyed. To ensure determinism, trigger kernel-side synchronize_rcu()
before checking map existence by their IDs.

Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
---
 .../selftests/bpf/prog_tests/btf_map_in_map.c | 124 ++++++++++++++++--
 1 file changed, 110 insertions(+), 14 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
index f7ee8fa377ad..f6eee3fb933c 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
@@ -5,10 +5,60 @@
 
 #include "test_btf_map_in_map.skel.h"
 
+static int duration;
+
+static __u32 bpf_map_id(struct bpf_map *map)
+{
+	struct bpf_map_info info;
+	__u32 info_len = sizeof(info);
+	int err;
+
+	memset(&info, 0, info_len);
+	err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
+	if (err)
+		return 0;
+	return info.id;
+}
+
+/*
+ * Trigger synchronize_cpu() in kernel.
+ *
+ * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger
+ * synchronize_rcu(), if looking up/updating non-NULL element. Use this fact
+ * to trigger synchronize_cpu(): create map-in-map, create a trivial ARRAY
+ * map, update map-in-map with ARRAY inner map. Then cleanup. At the end, at
+ * least one synchronize_rcu() would be called.
+ */
+static int kern_sync_rcu(void)
+{
+	int inner_map_fd, outer_map_fd, err, zero = 0;
+
+	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
+	if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
+		return -1;
+
+	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
+					     sizeof(int), inner_map_fd, 1, 0);
+	if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
+		close(inner_map_fd);
+		return -1;
+	}
+
+	err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
+	if (err)
+		err = -errno;
+	CHECK(err, "outer_map_update", "failed %d\n", err);
+	close(inner_map_fd);
+	close(outer_map_fd);
+	return err;
+}
+
 void test_btf_map_in_map(void)
 {
-	int duration = 0, err, key = 0, val;
-	struct test_btf_map_in_map* skel;
+	int err, key = 0, val, i;
+	struct test_btf_map_in_map *skel;
+	int outer_arr_fd, outer_hash_fd;
+	int fd, map1_fd, map2_fd, map1_id, map2_id;
 
 	skel = test_btf_map_in_map__open_and_load();
 	if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
@@ -18,32 +68,78 @@ void test_btf_map_in_map(void)
 	if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
 		goto cleanup;
 
+	map1_fd = bpf_map__fd(skel->maps.inner_map1);
+	map2_fd = bpf_map__fd(skel->maps.inner_map2);
+	outer_arr_fd = bpf_map__fd(skel->maps.outer_arr);
+	outer_hash_fd = bpf_map__fd(skel->maps.outer_hash);
+
 	/* inner1 = input, inner2 = input + 1 */
-	val = bpf_map__fd(skel->maps.inner_map1);
-	bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
-	val = bpf_map__fd(skel->maps.inner_map2);
-	bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
+	map1_fd = bpf_map__fd(skel->maps.inner_map1);
+	bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0);
+	map2_fd = bpf_map__fd(skel->maps.inner_map2);
+	bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0);
 	skel->bss->input = 1;
 	usleep(1);
 
-	bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
+	bpf_map_lookup_elem(map1_fd, &key, &val);
 	CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
-	bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
+	bpf_map_lookup_elem(map2_fd, &key, &val);
 	CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
 
 	/* inner1 = input + 1, inner2 = input */
-	val = bpf_map__fd(skel->maps.inner_map2);
-	bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
-	val = bpf_map__fd(skel->maps.inner_map1);
-	bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
+	bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0);
+	bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0);
 	skel->bss->input = 3;
 	usleep(1);
 
-	bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
+	bpf_map_lookup_elem(map1_fd, &key, &val);
 	CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
-	bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
+	bpf_map_lookup_elem(map2_fd, &key, &val);
 	CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
 
+	for (i = 0; i < 5; i++) {
+		val = i % 2 ? map1_fd : map2_fd;
+		err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0);
+		if (CHECK_FAIL(err)) {
+			printf("failed to update hash_of_maps on iter #%d\n", i);
+			goto cleanup;
+		}
+		err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0);
+		if (CHECK_FAIL(err)) {
+			printf("failed to update hash_of_maps on iter #%d\n", i);
+			goto cleanup;
+		}
+	}
+
+	map1_id = bpf_map_id(skel->maps.inner_map1);
+	map2_id = bpf_map_id(skel->maps.inner_map2);
+	CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
+	CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
+
+	test_btf_map_in_map__destroy(skel);
+	skel = NULL;
+
+	/* we need to either wait for or force synchronize_rcu(), before
+	 * checking for "still exists" condition, otherwise map could still be
+	 * resolvable by ID, causing false positives.
+	 *
+	 * Older kernels (5.8 and earlier) freed map only after two
+	 * synchronize_rcu()s, so trigger two, to be entirely sure.
+	 */
+	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+
+	fd = bpf_map_get_fd_by_id(map1_id);
+	if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
+		close(fd);
+		goto cleanup;
+	}
+	fd = bpf_map_get_fd_by_id(map2_id);
+	if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
+		close(fd);
+		goto cleanup;
+	}
+
 cleanup:
 	test_btf_map_in_map__destroy(skel);
 }
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks
  2020-07-29  4:09 ` [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks Andrii Nakryiko
@ 2020-07-29 14:29   ` Jakub Sitnicki
  2020-07-29 17:48     ` Andrii Nakryiko
  0 siblings, 1 reply; 7+ messages in thread
From: Jakub Sitnicki @ 2020-07-29 14:29 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: bpf, netdev, ast, daniel, andrii.nakryiko, kernel-team, Song Liu, stable

On Wed, Jul 29, 2020 at 06:09 AM CEST, Andrii Nakryiko wrote:
> Add test validating that all inner maps are released properly after skeleton
> is destroyed. To ensure determinism, trigger kernel-side synchronize_rcu()
> before checking map existence by their IDs.
>
> Acked-by: Song Liu <songliubraving@fb.com>
> Signed-off-by: Andrii Nakryiko <andriin@fb.com>
> ---
>  .../selftests/bpf/prog_tests/btf_map_in_map.c | 124 ++++++++++++++++--
>  1 file changed, 110 insertions(+), 14 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
> index f7ee8fa377ad..f6eee3fb933c 100644
> --- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
> +++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
> @@ -5,10 +5,60 @@
>
>  #include "test_btf_map_in_map.skel.h"
>
> +static int duration;
> +
> +static __u32 bpf_map_id(struct bpf_map *map)
> +{
> +	struct bpf_map_info info;
> +	__u32 info_len = sizeof(info);
> +	int err;
> +
> +	memset(&info, 0, info_len);
> +	err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
> +	if (err)
> +		return 0;
> +	return info.id;
> +}
> +
> +/*
> + * Trigger synchronize_cpu() in kernel.

Nit: synchronize_*r*cu().

> + *
> + * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger
> + * synchronize_rcu(), if looking up/updating non-NULL element. Use this fact
> + * to trigger synchronize_cpu(): create map-in-map, create a trivial ARRAY
> + * map, update map-in-map with ARRAY inner map. Then cleanup. At the end, at
> + * least one synchronize_rcu() would be called.
> + */

That's a cool trick. I'm a bit confused by "looking up/updating non-NULL
element". It looks like you're updating an element that is NULL/unset in
the code below. What am I missing?

> +static int kern_sync_rcu(void)
> +{
> +	int inner_map_fd, outer_map_fd, err, zero = 0;
> +
> +	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
> +	if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
> +		return -1;
> +
> +	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
> +					     sizeof(int), inner_map_fd, 1, 0);
> +	if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
> +		close(inner_map_fd);
> +		return -1;
> +	}
> +
> +	err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
> +	if (err)
> +		err = -errno;
> +	CHECK(err, "outer_map_update", "failed %d\n", err);
> +	close(inner_map_fd);
> +	close(outer_map_fd);
> +	return err;
> +}
> +
>  void test_btf_map_in_map(void)
>  {
> -	int duration = 0, err, key = 0, val;
> -	struct test_btf_map_in_map* skel;
> +	int err, key = 0, val, i;
> +	struct test_btf_map_in_map *skel;
> +	int outer_arr_fd, outer_hash_fd;
> +	int fd, map1_fd, map2_fd, map1_id, map2_id;
>
>  	skel = test_btf_map_in_map__open_and_load();
>  	if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
> @@ -18,32 +68,78 @@ void test_btf_map_in_map(void)
>  	if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
>  		goto cleanup;
>
> +	map1_fd = bpf_map__fd(skel->maps.inner_map1);
> +	map2_fd = bpf_map__fd(skel->maps.inner_map2);
> +	outer_arr_fd = bpf_map__fd(skel->maps.outer_arr);
> +	outer_hash_fd = bpf_map__fd(skel->maps.outer_hash);
> +
>  	/* inner1 = input, inner2 = input + 1 */
> -	val = bpf_map__fd(skel->maps.inner_map1);
> -	bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
> -	val = bpf_map__fd(skel->maps.inner_map2);
> -	bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
> +	map1_fd = bpf_map__fd(skel->maps.inner_map1);
> +	bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0);
> +	map2_fd = bpf_map__fd(skel->maps.inner_map2);
> +	bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0);
>  	skel->bss->input = 1;
>  	usleep(1);
>
> -	bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
> +	bpf_map_lookup_elem(map1_fd, &key, &val);
>  	CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
> -	bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
> +	bpf_map_lookup_elem(map2_fd, &key, &val);
>  	CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
>
>  	/* inner1 = input + 1, inner2 = input */
> -	val = bpf_map__fd(skel->maps.inner_map2);
> -	bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
> -	val = bpf_map__fd(skel->maps.inner_map1);
> -	bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
> +	bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0);
> +	bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0);
>  	skel->bss->input = 3;
>  	usleep(1);
>
> -	bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
> +	bpf_map_lookup_elem(map1_fd, &key, &val);
>  	CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
> -	bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
> +	bpf_map_lookup_elem(map2_fd, &key, &val);
>  	CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
>
> +	for (i = 0; i < 5; i++) {
> +		val = i % 2 ? map1_fd : map2_fd;
> +		err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0);
> +		if (CHECK_FAIL(err)) {
> +			printf("failed to update hash_of_maps on iter #%d\n", i);
> +			goto cleanup;
> +		}
> +		err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0);
> +		if (CHECK_FAIL(err)) {
> +			printf("failed to update hash_of_maps on iter #%d\n", i);
> +			goto cleanup;
> +		}
> +	}
> +
> +	map1_id = bpf_map_id(skel->maps.inner_map1);
> +	map2_id = bpf_map_id(skel->maps.inner_map2);
> +	CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
> +	CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
> +
> +	test_btf_map_in_map__destroy(skel);
> +	skel = NULL;
> +
> +	/* we need to either wait for or force synchronize_rcu(), before
> +	 * checking for "still exists" condition, otherwise map could still be
> +	 * resolvable by ID, causing false positives.
> +	 *
> +	 * Older kernels (5.8 and earlier) freed map only after two
> +	 * synchronize_rcu()s, so trigger two, to be entirely sure.
> +	 */
> +	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
> +	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
> +
> +	fd = bpf_map_get_fd_by_id(map1_id);
> +	if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
> +		close(fd);
> +		goto cleanup;
> +	}
> +	fd = bpf_map_get_fd_by_id(map2_id);
> +	if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
> +		close(fd);
> +		goto cleanup;
> +	}
> +
>  cleanup:
>  	test_btf_map_in_map__destroy(skel);
>  }

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks
  2020-07-29 14:29   ` Jakub Sitnicki
@ 2020-07-29 17:48     ` Andrii Nakryiko
  2020-07-29 20:29       ` Jakub Sitnicki
  2020-07-29 21:17       ` Daniel Borkmann
  0 siblings, 2 replies; 7+ messages in thread
From: Andrii Nakryiko @ 2020-07-29 17:48 UTC (permalink / raw)
  To: Jakub Sitnicki
  Cc: Andrii Nakryiko, bpf, Networking, Alexei Starovoitov,
	Daniel Borkmann, Kernel Team, Song Liu, linux- stable

On Wed, Jul 29, 2020 at 7:29 AM Jakub Sitnicki <jakub@cloudflare.com> wrote:
>
> On Wed, Jul 29, 2020 at 06:09 AM CEST, Andrii Nakryiko wrote:
> > Add test validating that all inner maps are released properly after skeleton
> > is destroyed. To ensure determinism, trigger kernel-side synchronize_rcu()
> > before checking map existence by their IDs.
> >
> > Acked-by: Song Liu <songliubraving@fb.com>
> > Signed-off-by: Andrii Nakryiko <andriin@fb.com>
> > ---
> >  .../selftests/bpf/prog_tests/btf_map_in_map.c | 124 ++++++++++++++++--
> >  1 file changed, 110 insertions(+), 14 deletions(-)
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
> > index f7ee8fa377ad..f6eee3fb933c 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
> > @@ -5,10 +5,60 @@
> >
> >  #include "test_btf_map_in_map.skel.h"
> >
> > +static int duration;
> > +
> > +static __u32 bpf_map_id(struct bpf_map *map)
> > +{
> > +     struct bpf_map_info info;
> > +     __u32 info_len = sizeof(info);
> > +     int err;
> > +
> > +     memset(&info, 0, info_len);
> > +     err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
> > +     if (err)
> > +             return 0;
> > +     return info.id;
> > +}
> > +
> > +/*
> > + * Trigger synchronize_cpu() in kernel.
>
> Nit: synchronize_*r*cu().

welp, yeah

>
> > + *
> > + * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger
> > + * synchronize_rcu(), if looking up/updating non-NULL element. Use this fact
> > + * to trigger synchronize_cpu(): create map-in-map, create a trivial ARRAY
> > + * map, update map-in-map with ARRAY inner map. Then cleanup. At the end, at
> > + * least one synchronize_rcu() would be called.
> > + */
>
> That's a cool trick. I'm a bit confused by "looking up/updating non-NULL
> element". It looks like you're updating an element that is NULL/unset in
> the code below. What am I missing?

I was basically trying to say that it has to be a successful lookup or
update. For lookup that means looking up non-NULL (existing) entry.
For update -- setting valid inner map FD.

Not sure fixing this and typo above is worth it to post v5.

>
> > +static int kern_sync_rcu(void)
> > +{
> > +     int inner_map_fd, outer_map_fd, err, zero = 0;
> > +
> > +     inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
> > +     if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
> > +             return -1;
> > +
> > +     outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
> > +                                          sizeof(int), inner_map_fd, 1, 0);
> > +     if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
> > +             close(inner_map_fd);
> > +             return -1;
> > +     }
> > +
> > +     err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
> > +     if (err)
> > +             err = -errno;
> > +     CHECK(err, "outer_map_update", "failed %d\n", err);
> > +     close(inner_map_fd);
> > +     close(outer_map_fd);
> > +     return err;
> > +}
> > +

[...]

trimming's good ;)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks
  2020-07-29 17:48     ` Andrii Nakryiko
@ 2020-07-29 20:29       ` Jakub Sitnicki
  2020-07-29 21:17       ` Daniel Borkmann
  1 sibling, 0 replies; 7+ messages in thread
From: Jakub Sitnicki @ 2020-07-29 20:29 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: Andrii Nakryiko, bpf, Networking, Alexei Starovoitov,
	Daniel Borkmann, Kernel Team, Song Liu, linux- stable

On Wed, Jul 29, 2020 at 07:48 PM CEST, Andrii Nakryiko wrote:
> On Wed, Jul 29, 2020 at 7:29 AM Jakub Sitnicki <jakub@cloudflare.com> wrote:
>>
>> On Wed, Jul 29, 2020 at 06:09 AM CEST, Andrii Nakryiko wrote:

[...]

>> > +/*
>> > + * Trigger synchronize_cpu() in kernel.
>>
>> Nit: synchronize_*r*cu().
>
> welp, yeah
>
>>
>> > + *
>> > + * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger
>> > + * synchronize_rcu(), if looking up/updating non-NULL element. Use this fact
>> > + * to trigger synchronize_cpu(): create map-in-map, create a trivial ARRAY
>> > + * map, update map-in-map with ARRAY inner map. Then cleanup. At the end, at
>> > + * least one synchronize_rcu() would be called.
>> > + */
>>
>> That's a cool trick. I'm a bit confused by "looking up/updating non-NULL
>> element". It looks like you're updating an element that is NULL/unset in
>> the code below. What am I missing?
>
> I was basically trying to say that it has to be a successful lookup or
> update. For lookup that means looking up non-NULL (existing) entry.
> For update -- setting valid inner map FD.
>
> Not sure fixing this and typo above is worth it to post v5.

I just wanted to understand that the helper is working as intended. It
seems handy. I agree that it's not worth respinning the patches just for
this.

>
>>
>> > +static int kern_sync_rcu(void)
>> > +{
>> > +     int inner_map_fd, outer_map_fd, err, zero = 0;
>> > +
>> > +     inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
>> > +     if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
>> > +             return -1;
>> > +
>> > +     outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
>> > +                                          sizeof(int), inner_map_fd, 1, 0);
>> > +     if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
>> > +             close(inner_map_fd);
>> > +             return -1;
>> > +     }
>> > +
>> > +     err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
>> > +     if (err)
>> > +             err = -errno;
>> > +     CHECK(err, "outer_map_update", "failed %d\n", err);
>> > +     close(inner_map_fd);
>> > +     close(outer_map_fd);
>> > +     return err;
>> > +}
>> > +
>
> [...]
>
> trimming's good ;)

You caught me. Just being lazy. No excuses :-)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks
  2020-07-29 17:48     ` Andrii Nakryiko
  2020-07-29 20:29       ` Jakub Sitnicki
@ 2020-07-29 21:17       ` Daniel Borkmann
  1 sibling, 0 replies; 7+ messages in thread
From: Daniel Borkmann @ 2020-07-29 21:17 UTC (permalink / raw)
  To: Andrii Nakryiko, Jakub Sitnicki
  Cc: Andrii Nakryiko, bpf, Networking, Alexei Starovoitov,
	Kernel Team, Song Liu, linux- stable

On 7/29/20 7:48 PM, Andrii Nakryiko wrote:
> On Wed, Jul 29, 2020 at 7:29 AM Jakub Sitnicki <jakub@cloudflare.com> wrote:
>>
>> On Wed, Jul 29, 2020 at 06:09 AM CEST, Andrii Nakryiko wrote:
>>> Add test validating that all inner maps are released properly after skeleton
>>> is destroyed. To ensure determinism, trigger kernel-side synchronize_rcu()
>>> before checking map existence by their IDs.
>>>
>>> Acked-by: Song Liu <songliubraving@fb.com>
>>> Signed-off-by: Andrii Nakryiko <andriin@fb.com>
[...]
>>> +/*
>>> + * Trigger synchronize_cpu() in kernel.
>>
>> Nit: synchronize_*r*cu().
> 
> welp, yeah
> 
>>
>>> + *
>>> + * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger
>>> + * synchronize_rcu(), if looking up/updating non-NULL element. Use this fact
>>> + * to trigger synchronize_cpu(): create map-in-map, create a trivial ARRAY
>>> + * map, update map-in-map with ARRAY inner map. Then cleanup. At the end, at
>>> + * least one synchronize_rcu() would be called.
>>> + */
>>
>> That's a cool trick. I'm a bit confused by "looking up/updating non-NULL
>> element". It looks like you're updating an element that is NULL/unset in
>> the code below. What am I missing?
> 
> I was basically trying to say that it has to be a successful lookup or
> update. For lookup that means looking up non-NULL (existing) entry.
> For update -- setting valid inner map FD.
> 
> Not sure fixing this and typo above is worth it to post v5.

Nope, I'll fix it up while applying.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v4 bpf 1/2] bpf: fix map leak in HASH_OF_MAPS map
  2020-07-29  4:09 [PATCH v4 bpf 1/2] bpf: fix map leak in HASH_OF_MAPS map Andrii Nakryiko
  2020-07-29  4:09 ` [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks Andrii Nakryiko
@ 2020-07-29 23:44 ` Daniel Borkmann
  1 sibling, 0 replies; 7+ messages in thread
From: Daniel Borkmann @ 2020-07-29 23:44 UTC (permalink / raw)
  To: Andrii Nakryiko, bpf, netdev, ast
  Cc: andrii.nakryiko, kernel-team, Song Liu, stable

On 7/29/20 6:09 AM, Andrii Nakryiko wrote:
> Fix HASH_OF_MAPS bug of not putting inner map pointer on bpf_map_elem_update()
> operation. This is due to per-cpu extra_elems optimization, which bypassed
> free_htab_elem() logic doing proper clean ups. Make sure that inner map is put
> properly in optimized case as well.
> 
> Fixes: 8c290e60fa2a ("bpf: fix hashmap extra_elems logic")
> Acked-by: Song Liu <songliubraving@fb.com>
> Cc: <stable@vger.kernel.org> # v4.14+
> Signed-off-by: Andrii Nakryiko <andriin@fb.com>

Both applied, thanks!

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-07-29 23:44 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-29  4:09 [PATCH v4 bpf 1/2] bpf: fix map leak in HASH_OF_MAPS map Andrii Nakryiko
2020-07-29  4:09 ` [PATCH v4 bpf 2/2] selftests/bpf: extend map-in-map selftest to detect memory leaks Andrii Nakryiko
2020-07-29 14:29   ` Jakub Sitnicki
2020-07-29 17:48     ` Andrii Nakryiko
2020-07-29 20:29       ` Jakub Sitnicki
2020-07-29 21:17       ` Daniel Borkmann
2020-07-29 23:44 ` [PATCH v4 bpf 1/2] bpf: fix map leak in HASH_OF_MAPS map Daniel Borkmann

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).