bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrii Nakryiko <andrii@kernel.org>
To: <bpf@vger.kernel.org>, <ast@kernel.org>, <daniel@iogearbox.net>
Cc: <andrii@kernel.org>, <kernel-team@fb.com>,
	Yucong Sun <sunyucong@gmail.com>
Subject: [PATCH bpf-next 4/4] selftests/bpf: split out bpf_verif_scale selftests into multiple tests
Date: Fri, 22 Oct 2021 15:32:28 -0700	[thread overview]
Message-ID: <20211022223228.99920-5-andrii@kernel.org> (raw)
In-Reply-To: <20211022223228.99920-1-andrii@kernel.org>

Instead of using subtests in bpf_verif_scale selftest, turn each scale
sub-test into its own test. Each subtest is compltely independent and
just reuses a bit of common test running logic, so the conversion is
trivial. For convenience, keep all of BPF verifier scale tests in one
file.

This conversion shaves off a significant amount of time when running
test_progs in parallel mode. E.g., just running scale tests (-t verif_scale):

BEFORE
======
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED

real    0m22.894s
user    0m0.012s
sys     0m22.797s

AFTER
=====
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED

real    0m12.044s
user    0m0.024s
sys     0m27.869s

Ten second saving right there. test_progs -j is not yet ready to be
turned on by default, unfortunately, and some tests fail almost every
time, but this is a good improvement nevertheless. Ignoring few
failures, here is sequential vs parallel run times when running all
tests now:

SEQUENTIAL
==========
Summary: 206/953 PASSED, 4 SKIPPED, 0 FAILED

real    1m5.625s
user    0m4.211s
sys     0m31.650s

PARALLEL
========
Summary: 204/952 PASSED, 4 SKIPPED, 2 FAILED

real    0m35.550s
user    0m4.998s
sys     0m39.890s

Cc: Yucong Sun <sunyucong@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 .../bpf/prog_tests/bpf_verif_scale.c          | 220 ++++++++++++------
 1 file changed, 152 insertions(+), 68 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index 3d002c245d2b..867349e4ed9e 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -39,82 +39,166 @@ struct scale_test_def {
 	bool fails;
 };
 
-void test_bpf_verif_scale(void)
-{
-	struct scale_test_def tests[] = {
-		{ "loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */ },
-
-		{ "test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS },
-		{ "test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS },
-		{ "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS },
-
-		{ "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-		{ "pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
-		/* full unroll by llvm */
-		{ "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-		{ "pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-		{ "pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
-		/* partial unroll. llvm will unroll loop ~150 times.
-		 * C loop count -> 600.
-		 * Asm loop count -> 4.
-		 * 16k insns in loop body.
-		 * Total of 5 such loops. Total program size ~82k insns.
-		 */
-		{ "pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
-		/* no unroll at all.
-		 * C loop count -> 600.
-		 * ASM loop count -> 600.
-		 * ~110 insns in loop body.
-		 * Total of 5 such loops. Total program size ~1500 insns.
-		 */
-		{ "pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
-		{ "loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-		{ "loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-		{ "loop4.o", BPF_PROG_TYPE_SCHED_CLS },
-		{ "loop5.o", BPF_PROG_TYPE_SCHED_CLS },
-		{ "loop6.o", BPF_PROG_TYPE_KPROBE },
-
-		/* partial unroll. 19k insn in a loop.
-		 * Total program size 20.8k insn.
-		 * ~350k processed_insns
-		 */
-		{ "strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
-		/* no unroll, tiny loops */
-		{ "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-		{ "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
-		/* non-inlined subprogs */
-		{ "strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
-
-		{ "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
-		{ "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
-
-		{ "test_xdp_loop.o", BPF_PROG_TYPE_XDP },
-		{ "test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL },
-	};
+static void scale_test(const char *file,
+		       enum bpf_prog_type attach_type,
+		       bool should_fail)
+{
 	libbpf_print_fn_t old_print_fn = NULL;
-	int err, i;
+	int err;
 
 	if (env.verifier_stats) {
 		test__force_log();
 		old_print_fn = libbpf_set_print(libbpf_debug_print);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(tests); i++) {
-		const struct scale_test_def *test = &tests[i];
-
-		if (!test__start_subtest(test->file))
-			continue;
-
-		err = check_load(test->file, test->attach_type);
-		CHECK_FAIL(err && !test->fails);
-	}
+	err = check_load(file, attach_type);
+	if (should_fail)
+		ASSERT_ERR(err, "expect_error");
+	else
+		ASSERT_OK(err, "expect_success");
 
 	if (env.verifier_stats)
 		libbpf_set_print(old_print_fn);
 }
+
+void test_verif_scale1()
+{
+	scale_test("test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale2()
+{
+	scale_test("test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale3()
+{
+	scale_test("test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale_pyperf_global()
+{
+	scale_test("pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf_subprogs()
+{
+	scale_test("pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf50()
+{
+	/* full unroll by llvm */
+	scale_test("pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf100()
+{
+	/* full unroll by llvm */
+	scale_test("pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf180()
+{
+	/* full unroll by llvm */
+	scale_test("pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf600()
+{
+	/* partial unroll. llvm will unroll loop ~150 times.
+	 * C loop count -> 600.
+	 * Asm loop count -> 4.
+	 * 16k insns in loop body.
+	 * Total of 5 such loops. Total program size ~82k insns.
+	 */
+	scale_test("pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_pyperf600_nounroll()
+{
+	/* no unroll at all.
+	 * C loop count -> 600.
+	 * ASM loop count -> 600.
+	 * ~110 insns in loop body.
+	 * Total of 5 such loops. Total program size ~1500 insns.
+	 */
+	scale_test("pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_loop1()
+{
+	scale_test("loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_loop2()
+{
+	scale_test("loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_loop3_fail()
+{
+	scale_test("loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */);
+}
+
+void test_verif_scale_loop4()
+{
+	scale_test("loop4.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale_loop5()
+{
+	scale_test("loop5.o", BPF_PROG_TYPE_SCHED_CLS, false);
+}
+
+void test_verif_scale_loop6()
+{
+	scale_test("loop6.o", BPF_PROG_TYPE_KPROBE, false);
+}
+
+void test_verif_scale_strobemeta()
+{
+	/* partial unroll. 19k insn in a loop.
+	 * Total program size 20.8k insn.
+	 * ~350k processed_insns
+	 */
+	scale_test("strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_strobemeta_nounroll1()
+{
+	/* no unroll, tiny loops */
+	scale_test("strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_strobemeta_nounroll2()
+{
+	/* no unroll, tiny loops */
+	scale_test("strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_strobemeta_subprogs()
+{
+	/* non-inlined subprogs */
+	scale_test("strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
+}
+
+void test_verif_scale_sysctl_loop1()
+{
+	scale_test("test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false);
+}
+
+void test_verif_scale_sysctl_loop2()
+{
+	scale_test("test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false);
+}
+
+void test_verif_scale_xdp_loop()
+{
+	scale_test("test_xdp_loop.o", BPF_PROG_TYPE_XDP, false);
+}
+
+void test_verif_scale_seg6_loop()
+{
+	scale_test("test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false);
+}
-- 
2.30.2


  parent reply	other threads:[~2021-10-22 22:32 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-22 22:32 [PATCH bpf-next 0/4] Parallelize verif_scale selftests Andrii Nakryiko
2021-10-22 22:32 ` [PATCH bpf-next 1/4] selftests/bpf: normalize selftest entry points Andrii Nakryiko
2021-10-22 22:32 ` [PATCH bpf-next 2/4] selftests/bpf: support multiple tests per file Andrii Nakryiko
2021-10-25 20:12   ` sunyucong
2021-10-25 20:39     ` Andrii Nakryiko
2021-10-25 20:55       ` sunyucong
2021-10-25 21:09         ` Andrii Nakryiko
2021-10-22 22:32 ` [PATCH bpf-next 3/4] selftests/bpf: mark tc_redirect selftest as serial Andrii Nakryiko
2021-10-22 22:32 ` Andrii Nakryiko [this message]
2021-10-25 20:15 ` [PATCH bpf-next 0/4] Parallelize verif_scale selftests sunyucong
2021-10-26  1:12   ` Alexei Starovoitov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211022223228.99920-5-andrii@kernel.org \
    --to=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kernel-team@fb.com \
    --cc=sunyucong@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).