bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH bpf] selftest/bpf: Validate initial values of per-cpu hash elems
@ 2020-10-29 11:17 David Verbeiren
  2020-10-29 18:36 ` Song Liu
  0 siblings, 1 reply; 6+ messages in thread
From: David Verbeiren @ 2020-10-29 11:17 UTC (permalink / raw)
  To: bpf; +Cc: netdev, David Verbeiren, Andrii Nakryiko

Tests that when per-cpu hash map or LRU hash map elements are
re-used as a result of a bpf program inserting elements, the
element values for the other CPUs than the one executing the
BPF code are reset to 0.

This validates the fix proposed in:
https://lkml.kernel.org/bpf/20201027221324.27894-1-david.verbeiren@tessares.net/

Change-Id: I38bc7b3744ed40704a7b2cc6efa179fb344c4bee
Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: David Verbeiren <david.verbeiren@tessares.net>
---
 .../selftests/bpf/prog_tests/map_init.c       | 204 ++++++++++++++++++
 1 file changed, 204 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/map_init.c

diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c
new file mode 100644
index 000000000000..9640cf925908
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_init.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2020 Tessares SA <http://www.tessares.net>
+
+#include <test_progs.h>
+
+#define TEST_VALUE 0x1234
+
+static int nr_cpus;
+static int duration;
+static char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+typedef unsigned long long map_key_t;
+typedef unsigned long long map_value_t;
+typedef struct {
+	map_value_t v; /* padding */
+} __bpf_percpu_val_align pcpu_map_value_t;
+
+/* executes bpf program that updates map with key, value */
+static int bpf_prog_insert_elem(int fd, map_key_t key, map_value_t value)
+{
+	struct bpf_load_program_attr prog;
+	struct bpf_insn insns[] = {
+		BPF_LD_IMM64(BPF_REG_8, key),
+		BPF_LD_IMM64(BPF_REG_9, value),
+
+		/* update: R1=fd, R2=&key, R3=&value, R4=flags */
+		BPF_LD_MAP_FD(BPF_REG_1, fd),
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+		BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_8, 0),
+		BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
+		BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_9, 0),
+		BPF_MOV64_IMM(BPF_REG_4, 0),
+		BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+
+		BPF_MOV64_IMM(BPF_REG_0, 0),
+		BPF_EXIT_INSN(),
+	};
+	char buf[64] = {};
+	int pfd, err;
+	__u32 retval = 0;
+
+	memset(&prog, 0, sizeof(prog));
+	prog.prog_type = BPF_PROG_TYPE_SCHED_CLS;
+	prog.insns = insns;
+	prog.insns_cnt = ARRAY_SIZE(insns);
+	prog.license = "GPL";
+
+	pfd = bpf_load_program_xattr(&prog, bpf_log_buf, BPF_LOG_BUF_SIZE);
+	if (CHECK(pfd < 0, "bpf_load_program_xattr", "failed: %s\n%s\n",
+		  strerror(errno), bpf_log_buf))
+		return -1;
+
+	err = bpf_prog_test_run(pfd, 1, buf, sizeof(buf), NULL, NULL,
+				&retval, NULL);
+	if (CHECK(err || retval, "bpf_prog_test_run",
+		  "err=%d retval=%d errno=%d\n", err, retval, errno))
+		err = -1;
+
+	close(pfd);
+
+	return err;
+}
+
+static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
+{
+	int i, nzCnt = 0;
+	map_value_t val;
+
+	for (i = 0; i < nr_cpus; i++) {
+		val = bpf_percpu(value, i);
+		if (val) {
+			if (val != expected) {
+				PRINT_FAIL("Unexpected value (cpu %d): 0x%llx\n",
+					   i, val);
+				return -1;
+			}
+			nzCnt++;
+		}
+	}
+
+	if (CHECK(nzCnt != 1, "non-zero-count",
+		   "Map value set for %d CPUs instead of 1!\n", nzCnt))
+		return -1;
+
+	return 0;
+}
+
+static int map_setup(int map_type, int max, int num)
+{
+	pcpu_map_value_t value[nr_cpus];
+	int map_fd, i, err;
+	map_key_t key;
+
+	map_fd = bpf_create_map(map_type, sizeof(key),
+				sizeof(pcpu_map_value_t), 2, 0);
+	if (CHECK(map_fd < 0, "bpf_create_map", "failed: %s\n",
+		  strerror(errno)))
+		return -1;
+
+	for (i = 0; i < nr_cpus; i++)
+		bpf_percpu(value, i) = 0xdeadbeef;
+
+	for (key = 1; key <= num; key++) {
+		err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
+		if (CHECK(err, "bpf_map_update_elem", "(key=%llu) failed: %s\n",
+			  key, strerror(errno))) {
+			close(map_fd);
+			return -1;
+		}
+	}
+
+	return map_fd;
+}
+
+/* Add key=1 elem with values set for all CPUs
+ * Delete elem key=1
+ * Run bpf prog that inserts new key=1 elem with value=0x1234
+ *   (bpf prog can only set value for current CPU)
+ * Lookup Key=1 and check value is as expected for all CPUs:
+ *   value set by bpf prog for one CPU, 0 for all others
+ */
+static void test_pcpu_map_init(void)
+{
+	pcpu_map_value_t value[nr_cpus];
+	int map_fd, err;
+	map_key_t key;
+
+	/* set up map with 1 element, value filled for all CPUs */
+	map_fd = map_setup(BPF_MAP_TYPE_PERCPU_HASH, 2, 1);
+	if (CHECK(map_fd < 0, "map_setup", "failed\n"))
+		return;
+
+	/* delete key=1 element so it will later be re-used*/
+	key = 1;
+	err = bpf_map_delete_elem(map_fd, &key);
+	if (CHECK(err, "bpf_map_delete_elem", "failed: %s\n", strerror(errno)))
+		goto error_map;
+
+	/* run bpf prog that inserts new elem, re-using the slot just freed */
+	err = bpf_prog_insert_elem(map_fd, key, TEST_VALUE);
+	if (!ASSERT_OK(err, "bpf_prog_insert_elem"))
+		goto error_map;
+
+	/* check that key=1 was re-created by bpf prog */
+	err = bpf_map_lookup_elem(map_fd, &key, value);
+	if (CHECK(err, "bpf_map_lookup_elem", "failed: %s\n", strerror(errno)))
+		goto error_map;
+
+	/* and has expected value for just a single CPU, 0 for all others */
+	check_values_one_cpu(value, TEST_VALUE);
+
+error_map:
+	close(map_fd);
+}
+
+/* Add key=1 and key=2 elems with values set for all CPUs
+ * Run bpf prog that inserts new key=3 elem
+ *   (only for current cpu; other cpus should have initial value = 0)
+ * Lookup Key=1 and check value is as expected for all CPUs
+ */
+static void test_pcpu_lru_map_init(void)
+{
+	pcpu_map_value_t value[nr_cpus];
+	int map_fd, err;
+	map_key_t key;
+
+	/* Set up LRU map with 2 elements, values filled for all CPUs.
+	 * With these 2 elements, the LRU map is full
+	 */
+	map_fd = map_setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, 2);
+	if (CHECK(map_fd < 0, "map_setup", "failed\n"))
+		return;
+
+	/* run bpf prog that inserts new key=3 element, re-using LRU slot */
+	key = 3;
+	err = bpf_prog_insert_elem(map_fd, key, TEST_VALUE);
+	if (!ASSERT_OK(err, "bpf_prog_insert_elem"))
+		goto error_map;
+
+	/* check that key=3 present */
+	err = bpf_map_lookup_elem(map_fd, &key, value);
+	if (CHECK(err, "bpf_map_lookup_elem", "failed: %s\n", strerror(errno)))
+		goto error_map;
+
+	/* and has expected value for just a single CPU, 0 for all others */
+	check_values_one_cpu(value, TEST_VALUE);
+
+error_map:
+	close(map_fd);
+}
+
+void test_map_init(void)
+{
+	nr_cpus = bpf_num_possible_cpus();
+	if (CHECK(nr_cpus <= 1, "nr_cpus", "> 1 needed for this test"))
+		return;
+
+	if (test__start_subtest("pcpu_map_init"))
+		test_pcpu_map_init();
+	if (test__start_subtest("pcpu_lru_map_init"))
+		test_pcpu_lru_map_init();
+}
-- 
2.29.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf] selftest/bpf: Validate initial values of per-cpu hash elems
  2020-10-29 11:17 [PATCH bpf] selftest/bpf: Validate initial values of per-cpu hash elems David Verbeiren
@ 2020-10-29 18:36 ` Song Liu
  2020-10-29 22:37   ` Andrii Nakryiko
  0 siblings, 1 reply; 6+ messages in thread
From: Song Liu @ 2020-10-29 18:36 UTC (permalink / raw)
  To: David Verbeiren; +Cc: bpf, Networking, Andrii Nakryiko

On Thu, Oct 29, 2020 at 4:19 AM David Verbeiren
<david.verbeiren@tessares.net> wrote:
>
> Tests that when per-cpu hash map or LRU hash map elements are
> re-used as a result of a bpf program inserting elements, the
> element values for the other CPUs than the one executing the
> BPF code are reset to 0.
>
> This validates the fix proposed in:
> https://lkml.kernel.org/bpf/20201027221324.27894-1-david.verbeiren@tessares.net/
>
> Change-Id: I38bc7b3744ed40704a7b2cc6efa179fb344c4bee
> Suggested-by: Andrii Nakryiko <andrii@kernel.org>
> Signed-off-by: David Verbeiren <david.verbeiren@tessares.net>
> ---
>  .../selftests/bpf/prog_tests/map_init.c       | 204 ++++++++++++++++++
>  1 file changed, 204 insertions(+)
>  create mode 100644 tools/testing/selftests/bpf/prog_tests/map_init.c
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c
> new file mode 100644
> index 000000000000..9640cf925908
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/map_init.c
> @@ -0,0 +1,204 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +// Copyright (c) 2020 Tessares SA <http://www.tessares.net>
> +
> +#include <test_progs.h>
> +
> +#define TEST_VALUE 0x1234
> +
> +static int nr_cpus;
> +static int duration;
> +static char bpf_log_buf[BPF_LOG_BUF_SIZE];
> +
> +typedef unsigned long long map_key_t;
> +typedef unsigned long long map_value_t;
> +typedef struct {
> +       map_value_t v; /* padding */
> +} __bpf_percpu_val_align pcpu_map_value_t;
> +
> +/* executes bpf program that updates map with key, value */
> +static int bpf_prog_insert_elem(int fd, map_key_t key, map_value_t value)
> +{
> +       struct bpf_load_program_attr prog;
> +       struct bpf_insn insns[] = {
> +               BPF_LD_IMM64(BPF_REG_8, key),
> +               BPF_LD_IMM64(BPF_REG_9, value),
> +
> +               /* update: R1=fd, R2=&key, R3=&value, R4=flags */
> +               BPF_LD_MAP_FD(BPF_REG_1, fd),
> +               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
> +               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
> +               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_8, 0),
> +               BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
> +               BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
> +               BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_9, 0),
> +               BPF_MOV64_IMM(BPF_REG_4, 0),
> +               BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
> +
> +               BPF_MOV64_IMM(BPF_REG_0, 0),
> +               BPF_EXIT_INSN(),
> +       };

Impressive hand written assembly. ;-) I would recommend using skeleton
for future work. For example:

    BPF program: selftests/bpf/progs/bpf_iter_bpf_map.c
    Use the program in tests:
selftests/bpf/prog_tests/bpf_iter.c:#include "bpf_iter_bpf_map.skel.h"


> +       char buf[64] = {};
> +       int pfd, err;
> +       __u32 retval = 0;
> +
> +       memset(&prog, 0, sizeof(prog));
> +       prog.prog_type = BPF_PROG_TYPE_SCHED_CLS;
> +       prog.insns = insns;
> +       prog.insns_cnt = ARRAY_SIZE(insns);
> +       prog.license = "GPL";
> +
> +       pfd = bpf_load_program_xattr(&prog, bpf_log_buf, BPF_LOG_BUF_SIZE);
> +       if (CHECK(pfd < 0, "bpf_load_program_xattr", "failed: %s\n%s\n",
> +                 strerror(errno), bpf_log_buf))
> +               return -1;
> +
> +       err = bpf_prog_test_run(pfd, 1, buf, sizeof(buf), NULL, NULL,
> +                               &retval, NULL);
> +       if (CHECK(err || retval, "bpf_prog_test_run",
> +                 "err=%d retval=%d errno=%d\n", err, retval, errno))
> +               err = -1;
> +
> +       close(pfd);
> +
> +       return err;
> +}
> +
> +static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
> +{
> +       int i, nzCnt = 0;
> +       map_value_t val;
> +
> +       for (i = 0; i < nr_cpus; i++) {
> +               val = bpf_percpu(value, i);
> +               if (val) {
> +                       if (val != expected) {
> +                               PRINT_FAIL("Unexpected value (cpu %d): 0x%llx\n",
> +                                          i, val);

I guess we can also use CHECK() here?

> +                               return -1;
> +                       }
[...]

> +
> +       /* delete key=1 element so it will later be re-used*/
> +       key = 1;
> +       err = bpf_map_delete_elem(map_fd, &key);
> +       if (CHECK(err, "bpf_map_delete_elem", "failed: %s\n", strerror(errno)))
> +               goto error_map;
> +
> +       /* run bpf prog that inserts new elem, re-using the slot just freed */
> +       err = bpf_prog_insert_elem(map_fd, key, TEST_VALUE);
> +       if (!ASSERT_OK(err, "bpf_prog_insert_elem"))
> +               goto error_map;

What's the reason to use ASSERT_OK() instead of CHECK()?

> +
> +       /* check that key=1 was re-created by bpf prog */
> +       err = bpf_map_lookup_elem(map_fd, &key, value);
> +       if (CHECK(err, "bpf_map_lookup_elem", "failed: %s\n", strerror(errno)))
> +               goto error_map;
> +
> +       /* and has expected value for just a single CPU, 0 for all others */
> +       check_values_one_cpu(value, TEST_VALUE);
> +
> +error_map:
> +       close(map_fd);
> +}
> +
> +/* Add key=1 and key=2 elems with values set for all CPUs
> + * Run bpf prog that inserts new key=3 elem
> + *   (only for current cpu; other cpus should have initial value = 0)
> + * Lookup Key=1 and check value is as expected for all CPUs
> + */
> +static void test_pcpu_lru_map_init(void)
> +{
> +       pcpu_map_value_t value[nr_cpus];
> +       int map_fd, err;
> +       map_key_t key;
> +
> +       /* Set up LRU map with 2 elements, values filled for all CPUs.
> +        * With these 2 elements, the LRU map is full
> +        */
> +       map_fd = map_setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, 2);
> +       if (CHECK(map_fd < 0, "map_setup", "failed\n"))
> +               return;
> +
> +       /* run bpf prog that inserts new key=3 element, re-using LRU slot */
> +       key = 3;
> +       err = bpf_prog_insert_elem(map_fd, key, TEST_VALUE);
> +       if (!ASSERT_OK(err, "bpf_prog_insert_elem"))
> +               goto error_map;

ditto

> +
> +       /* check that key=3 present */
> +       err = bpf_map_lookup_elem(map_fd, &key, value);
> +       if (CHECK(err, "bpf_map_lookup_elem", "failed: %s\n", strerror(errno)))
> +               goto error_map;
> +
> +       /* and has expected value for just a single CPU, 0 for all others */
> +       check_values_one_cpu(value, TEST_VALUE);
> +
> +error_map:
> +       close(map_fd);
> +}
> +
> +void test_map_init(void)
> +{
> +       nr_cpus = bpf_num_possible_cpus();
> +       if (CHECK(nr_cpus <= 1, "nr_cpus", "> 1 needed for this test"))
> +               return;

Instead of failing the test, let's skip the tests with something like:

                printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
                test__skip();

> +
> +       if (test__start_subtest("pcpu_map_init"))
> +               test_pcpu_map_init();
> +       if (test__start_subtest("pcpu_lru_map_init"))
> +               test_pcpu_lru_map_init();
> +}
> --
> 2.29.0
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf] selftest/bpf: Validate initial values of per-cpu hash elems
  2020-10-29 18:36 ` Song Liu
@ 2020-10-29 22:37   ` Andrii Nakryiko
  2020-10-29 23:37     ` Song Liu
  2020-11-02 11:41     ` David Verbeiren
  0 siblings, 2 replies; 6+ messages in thread
From: Andrii Nakryiko @ 2020-10-29 22:37 UTC (permalink / raw)
  To: Song Liu; +Cc: David Verbeiren, bpf, Networking, Andrii Nakryiko

On Thu, Oct 29, 2020 at 11:36 AM Song Liu <song@kernel.org> wrote:
>
> On Thu, Oct 29, 2020 at 4:19 AM David Verbeiren
> <david.verbeiren@tessares.net> wrote:
> >
> > Tests that when per-cpu hash map or LRU hash map elements are
> > re-used as a result of a bpf program inserting elements, the
> > element values for the other CPUs than the one executing the
> > BPF code are reset to 0.
> >
> > This validates the fix proposed in:
> > https://lkml.kernel.org/bpf/20201027221324.27894-1-david.verbeiren@tessares.net/
> >
> > Change-Id: I38bc7b3744ed40704a7b2cc6efa179fb344c4bee
> > Suggested-by: Andrii Nakryiko <andrii@kernel.org>
> > Signed-off-by: David Verbeiren <david.verbeiren@tessares.net>
> > ---
> >  .../selftests/bpf/prog_tests/map_init.c       | 204 ++++++++++++++++++
> >  1 file changed, 204 insertions(+)
> >  create mode 100644 tools/testing/selftests/bpf/prog_tests/map_init.c
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c
> > new file mode 100644
> > index 000000000000..9640cf925908
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/prog_tests/map_init.c
> > @@ -0,0 +1,204 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +// Copyright (c) 2020 Tessares SA <http://www.tessares.net>
> > +
> > +#include <test_progs.h>
> > +
> > +#define TEST_VALUE 0x1234
> > +
> > +static int nr_cpus;
> > +static int duration;
> > +static char bpf_log_buf[BPF_LOG_BUF_SIZE];
> > +
> > +typedef unsigned long long map_key_t;
> > +typedef unsigned long long map_value_t;
> > +typedef struct {
> > +       map_value_t v; /* padding */
> > +} __bpf_percpu_val_align pcpu_map_value_t;
> > +
> > +/* executes bpf program that updates map with key, value */
> > +static int bpf_prog_insert_elem(int fd, map_key_t key, map_value_t value)
> > +{
> > +       struct bpf_load_program_attr prog;
> > +       struct bpf_insn insns[] = {
> > +               BPF_LD_IMM64(BPF_REG_8, key),
> > +               BPF_LD_IMM64(BPF_REG_9, value),
> > +
> > +               /* update: R1=fd, R2=&key, R3=&value, R4=flags */
> > +               BPF_LD_MAP_FD(BPF_REG_1, fd),
> > +               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
> > +               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
> > +               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_8, 0),
> > +               BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
> > +               BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
> > +               BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_9, 0),
> > +               BPF_MOV64_IMM(BPF_REG_4, 0),
> > +               BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
> > +
> > +               BPF_MOV64_IMM(BPF_REG_0, 0),
> > +               BPF_EXIT_INSN(),
> > +       };
>
> Impressive hand written assembly. ;-) I would recommend using skeleton
> for future work. For example:
>
>     BPF program: selftests/bpf/progs/bpf_iter_bpf_map.c
>     Use the program in tests:
> selftests/bpf/prog_tests/bpf_iter.c:#include "bpf_iter_bpf_map.skel.h"
>

Let's keep a manually-constructed assembly to test_verifier tests only.

David, please also check progs/test_endian.c and prog_tests/endian.c
as one of the most minimal self-tests with no added complexity, but
complete end-to-end setup.


>
> > +       char buf[64] = {};
> > +       int pfd, err;
> > +       __u32 retval = 0;
> > +
> > +       memset(&prog, 0, sizeof(prog));
> > +       prog.prog_type = BPF_PROG_TYPE_SCHED_CLS;
> > +       prog.insns = insns;
> > +       prog.insns_cnt = ARRAY_SIZE(insns);
> > +       prog.license = "GPL";
> > +
> > +       pfd = bpf_load_program_xattr(&prog, bpf_log_buf, BPF_LOG_BUF_SIZE);
> > +       if (CHECK(pfd < 0, "bpf_load_program_xattr", "failed: %s\n%s\n",
> > +                 strerror(errno), bpf_log_buf))
> > +               return -1;
> > +
> > +       err = bpf_prog_test_run(pfd, 1, buf, sizeof(buf), NULL, NULL,
> > +                               &retval, NULL);
> > +       if (CHECK(err || retval, "bpf_prog_test_run",
> > +                 "err=%d retval=%d errno=%d\n", err, retval, errno))
> > +               err = -1;
> > +
> > +       close(pfd);
> > +
> > +       return err;
> > +}
> > +
> > +static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
> > +{
> > +       int i, nzCnt = 0;
> > +       map_value_t val;
> > +
> > +       for (i = 0; i < nr_cpus; i++) {
> > +               val = bpf_percpu(value, i);
> > +               if (val) {
> > +                       if (val != expected) {
> > +                               PRINT_FAIL("Unexpected value (cpu %d): 0x%llx\n",
> > +                                          i, val);
>
> I guess we can also use CHECK() here?
>
> > +                               return -1;
> > +                       }
> [...]
>
> > +
> > +       /* delete key=1 element so it will later be re-used*/
> > +       key = 1;
> > +       err = bpf_map_delete_elem(map_fd, &key);
> > +       if (CHECK(err, "bpf_map_delete_elem", "failed: %s\n", strerror(errno)))
> > +               goto error_map;
> > +
> > +       /* run bpf prog that inserts new elem, re-using the slot just freed */
> > +       err = bpf_prog_insert_elem(map_fd, key, TEST_VALUE);
> > +       if (!ASSERT_OK(err, "bpf_prog_insert_elem"))
> > +               goto error_map;
>
> What's the reason to use ASSERT_OK() instead of CHECK()?

I've recently added the ASSERT_xxx() family of macros to accommodate
most common checks and provide sensible details printing. So I now
always prefer ASSERT() macroses, it saves a bunch of typing and time.

>
> > +
> > +       /* check that key=1 was re-created by bpf prog */
> > +       err = bpf_map_lookup_elem(map_fd, &key, value);
> > +       if (CHECK(err, "bpf_map_lookup_elem", "failed: %s\n", strerror(errno)))
> > +               goto error_map;
> > +
> > +       /* and has expected value for just a single CPU, 0 for all others */
> > +       check_values_one_cpu(value, TEST_VALUE);
> > +
> > +error_map:
> > +       close(map_fd);
> > +}
> > +
> > +/* Add key=1 and key=2 elems with values set for all CPUs
> > + * Run bpf prog that inserts new key=3 elem
> > + *   (only for current cpu; other cpus should have initial value = 0)
> > + * Lookup Key=1 and check value is as expected for all CPUs
> > + */
> > +static void test_pcpu_lru_map_init(void)
> > +{
> > +       pcpu_map_value_t value[nr_cpus];
> > +       int map_fd, err;
> > +       map_key_t key;
> > +
> > +       /* Set up LRU map with 2 elements, values filled for all CPUs.
> > +        * With these 2 elements, the LRU map is full
> > +        */
> > +       map_fd = map_setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, 2);
> > +       if (CHECK(map_fd < 0, "map_setup", "failed\n"))
> > +               return;
> > +
> > +       /* run bpf prog that inserts new key=3 element, re-using LRU slot */
> > +       key = 3;
> > +       err = bpf_prog_insert_elem(map_fd, key, TEST_VALUE);
> > +       if (!ASSERT_OK(err, "bpf_prog_insert_elem"))
> > +               goto error_map;
>
> ditto
>
> > +
> > +       /* check that key=3 present */
> > +       err = bpf_map_lookup_elem(map_fd, &key, value);
> > +       if (CHECK(err, "bpf_map_lookup_elem", "failed: %s\n", strerror(errno)))
> > +               goto error_map;
> > +
> > +       /* and has expected value for just a single CPU, 0 for all others */
> > +       check_values_one_cpu(value, TEST_VALUE);
> > +
> > +error_map:
> > +       close(map_fd);
> > +}
> > +
> > +void test_map_init(void)
> > +{
> > +       nr_cpus = bpf_num_possible_cpus();
> > +       if (CHECK(nr_cpus <= 1, "nr_cpus", "> 1 needed for this test"))
> > +               return;
>
> Instead of failing the test, let's skip the tests with something like:
>
>                 printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
>                 test__skip();
>

+1

> > +
> > +       if (test__start_subtest("pcpu_map_init"))
> > +               test_pcpu_map_init();
> > +       if (test__start_subtest("pcpu_lru_map_init"))
> > +               test_pcpu_lru_map_init();
> > +}
> > --
> > 2.29.0
> >

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf] selftest/bpf: Validate initial values of per-cpu hash elems
  2020-10-29 22:37   ` Andrii Nakryiko
@ 2020-10-29 23:37     ` Song Liu
  2020-11-02 11:41     ` David Verbeiren
  1 sibling, 0 replies; 6+ messages in thread
From: Song Liu @ 2020-10-29 23:37 UTC (permalink / raw)
  To: Andrii Nakryiko; +Cc: David Verbeiren, bpf, Networking, Andrii Nakryiko

On Thu, Oct 29, 2020 at 3:37 PM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Thu, Oct 29, 2020 at 11:36 AM Song Liu <song@kernel.org> wrote:
> >
> > On Thu, Oct 29, 2020 at 4:19 AM David Verbeiren
> > <david.verbeiren@tessares.net> wrote:
> > >
> > > Tests that when per-cpu hash map or LRU hash map elements are
> > > re-used as a result of a bpf program inserting elements, the
> > > element values for the other CPUs than the one executing the
> > > BPF code are reset to 0.

[...]

> >
> > > +                               return -1;
> > > +                       }
> > [...]
> >
> > > +
> > > +       /* delete key=1 element so it will later be re-used*/
> > > +       key = 1;
> > > +       err = bpf_map_delete_elem(map_fd, &key);
> > > +       if (CHECK(err, "bpf_map_delete_elem", "failed: %s\n", strerror(errno)))
> > > +               goto error_map;
> > > +
> > > +       /* run bpf prog that inserts new elem, re-using the slot just freed */
> > > +       err = bpf_prog_insert_elem(map_fd, key, TEST_VALUE);
> > > +       if (!ASSERT_OK(err, "bpf_prog_insert_elem"))
> > > +               goto error_map;
> >
> > What's the reason to use ASSERT_OK() instead of CHECK()?
>
> I've recently added the ASSERT_xxx() family of macros to accommodate
> most common checks and provide sensible details printing. So I now
> always prefer ASSERT() macroses, it saves a bunch of typing and time.

I see. It is definitely less typing. :)

Thanks,
Song

[...]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf] selftest/bpf: Validate initial values of per-cpu hash elems
  2020-10-29 22:37   ` Andrii Nakryiko
  2020-10-29 23:37     ` Song Liu
@ 2020-11-02 11:41     ` David Verbeiren
  2020-11-03 18:28       ` Andrii Nakryiko
  1 sibling, 1 reply; 6+ messages in thread
From: David Verbeiren @ 2020-11-02 11:41 UTC (permalink / raw)
  To: Andrii Nakryiko; +Cc: Song Liu, bpf, Networking, Andrii Nakryiko

On Thu, Oct 29, 2020 at 11:37 PM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Thu, Oct 29, 2020 at 11:36 AM Song Liu <song@kernel.org> wrote:
> >
> > On Thu, Oct 29, 2020 at 4:19 AM David Verbeiren
> > <david.verbeiren@tessares.net> wrote:
> > >
> > > Tests that when per-cpu hash map or LRU hash map elements are
> > > re-used as a result of a bpf program inserting elements, the
> > > element values for the other CPUs than the one executing the
> > > BPF code are reset to 0.
> > >
> > > This validates the fix proposed in:
> > > https://lkml.kernel.org/bpf/20201027221324.27894-1-david.verbeiren@tessares.net/
[...]
> > > ---
> > > +
> > > +/* executes bpf program that updates map with key, value */
> > > +static int bpf_prog_insert_elem(int fd, map_key_t key, map_value_t value)
> > > +{
> > > +       struct bpf_load_program_attr prog;
> > > +       struct bpf_insn insns[] = {
> > > +               BPF_LD_IMM64(BPF_REG_8, key),
> > > +               BPF_LD_IMM64(BPF_REG_9, value),
> > > +
> > > +               /* update: R1=fd, R2=&key, R3=&value, R4=flags */
> > > +               BPF_LD_MAP_FD(BPF_REG_1, fd),
> > > +               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
> > > +               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
> > > +               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_8, 0),
> > > +               BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
> > > +               BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
> > > +               BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_9, 0),
> > > +               BPF_MOV64_IMM(BPF_REG_4, 0),
> > > +               BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
> > > +
> > > +               BPF_MOV64_IMM(BPF_REG_0, 0),
> > > +               BPF_EXIT_INSN(),
> > > +       };
> >
> > Impressive hand written assembly. ;-) I would recommend using skeleton
> > for future work. For example:
> >
> >     BPF program: selftests/bpf/progs/bpf_iter_bpf_map.c
> >     Use the program in tests:
> > selftests/bpf/prog_tests/bpf_iter.c:#include "bpf_iter_bpf_map.skel.h"
> >
>
> Let's keep a manually-constructed assembly to test_verifier tests only.
>
> David, please also check progs/test_endian.c and prog_tests/endian.c
> as one of the most minimal self-tests with no added complexity, but
> complete end-to-end setup.

Thanks for the suggestion, Andrii. I tried using the same simple setup
as prog_tests/endian.c but unfortunately when using sys_enter
tracepoint, the bpf program runs several times, on various cpus.
This invalidates the check in userspace to verify that the value was
updated for only one cpu and was initialized to 0 for the other ones.
I tried to change the bpf program so it would only run once but I bumped
into the limitation that the return value of __sync_fetch_annd_add()
(and family) cannot be used. Any suggestion for this? Can I combine
skeleton with bpf_prog_test_run()?

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf] selftest/bpf: Validate initial values of per-cpu hash elems
  2020-11-02 11:41     ` David Verbeiren
@ 2020-11-03 18:28       ` Andrii Nakryiko
  0 siblings, 0 replies; 6+ messages in thread
From: Andrii Nakryiko @ 2020-11-03 18:28 UTC (permalink / raw)
  To: David Verbeiren; +Cc: Song Liu, bpf, Networking, Andrii Nakryiko

On Mon, Nov 2, 2020 at 3:41 AM David Verbeiren
<david.verbeiren@tessares.net> wrote:
>
> On Thu, Oct 29, 2020 at 11:37 PM Andrii Nakryiko
> <andrii.nakryiko@gmail.com> wrote:
> >
> > On Thu, Oct 29, 2020 at 11:36 AM Song Liu <song@kernel.org> wrote:
> > >
> > > On Thu, Oct 29, 2020 at 4:19 AM David Verbeiren
> > > <david.verbeiren@tessares.net> wrote:
> > > >
> > > > Tests that when per-cpu hash map or LRU hash map elements are
> > > > re-used as a result of a bpf program inserting elements, the
> > > > element values for the other CPUs than the one executing the
> > > > BPF code are reset to 0.
> > > >
> > > > This validates the fix proposed in:
> > > > https://lkml.kernel.org/bpf/20201027221324.27894-1-david.verbeiren@tessares.net/
> [...]
> > > > ---
> > > > +
> > > > +/* executes bpf program that updates map with key, value */
> > > > +static int bpf_prog_insert_elem(int fd, map_key_t key, map_value_t value)
> > > > +{
> > > > +       struct bpf_load_program_attr prog;
> > > > +       struct bpf_insn insns[] = {
> > > > +               BPF_LD_IMM64(BPF_REG_8, key),
> > > > +               BPF_LD_IMM64(BPF_REG_9, value),
> > > > +
> > > > +               /* update: R1=fd, R2=&key, R3=&value, R4=flags */
> > > > +               BPF_LD_MAP_FD(BPF_REG_1, fd),
> > > > +               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
> > > > +               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
> > > > +               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_8, 0),
> > > > +               BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
> > > > +               BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
> > > > +               BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_9, 0),
> > > > +               BPF_MOV64_IMM(BPF_REG_4, 0),
> > > > +               BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
> > > > +
> > > > +               BPF_MOV64_IMM(BPF_REG_0, 0),
> > > > +               BPF_EXIT_INSN(),
> > > > +       };
> > >
> > > Impressive hand written assembly. ;-) I would recommend using skeleton
> > > for future work. For example:
> > >
> > >     BPF program: selftests/bpf/progs/bpf_iter_bpf_map.c
> > >     Use the program in tests:
> > > selftests/bpf/prog_tests/bpf_iter.c:#include "bpf_iter_bpf_map.skel.h"
> > >
> >
> > Let's keep a manually-constructed assembly to test_verifier tests only.
> >
> > David, please also check progs/test_endian.c and prog_tests/endian.c
> > as one of the most minimal self-tests with no added complexity, but
> > complete end-to-end setup.
>
> Thanks for the suggestion, Andrii. I tried using the same simple setup
> as prog_tests/endian.c but unfortunately when using sys_enter
> tracepoint, the bpf program runs several times, on various cpus.
> This invalidates the check in userspace to verify that the value was
> updated for only one cpu and was initialized to 0 for the other ones.
> I tried to change the bpf program so it would only run once but I bumped
> into the limitation that the return value of __sync_fetch_annd_add()
> (and family) cannot be used. Any suggestion for this? Can I combine
> skeleton with bpf_prog_test_run()?

Replied to the new version of the patch. You can use a bit more
selective tracepoint (so the test won't accidentally trigger it
multiple times) and filter on thread ID. And yes, of course you can
use bpf_prog_test_run() with skeleton. In the end it's all about FDs,
which you easily get from skeleton.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-11-03 18:28 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-29 11:17 [PATCH bpf] selftest/bpf: Validate initial values of per-cpu hash elems David Verbeiren
2020-10-29 18:36 ` Song Liu
2020-10-29 22:37   ` Andrii Nakryiko
2020-10-29 23:37     ` Song Liu
2020-11-02 11:41     ` David Verbeiren
2020-11-03 18:28       ` Andrii Nakryiko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).