bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Dave Marchevsky <davemarchevsky@fb.com>
To: Yonghong Song <yhs@fb.com>, bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>,
	Kernel Team <kernel-team@fb.com>, Tejun Heo <tj@kernel.org>
Subject: Re: [RFC PATCH bpf-next 11/11] selftests/bpf: Add rbtree map tests
Date: Wed, 10 Aug 2022 13:48:32 -0400	[thread overview]
Message-ID: <e34cd3ef-81da-4993-da6c-ff104b585423@fb.com> (raw)
In-Reply-To: <be9fa91f-820e-27d0-f66b-9c2e1164681c@fb.com>

On 7/28/22 3:18 AM, Yonghong Song wrote:   
> 
> 
> On 7/22/22 11:34 AM, Dave Marchevsky wrote:
>> Add tests demonstrating happy path of rbtree map usage as well as
>> exercising numerous failure paths and conditions. Structure of failing
>> test runner is based on dynptr tests.
>>
>> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
>> ---
>>   .../selftests/bpf/prog_tests/rbtree_map.c     | 164 ++++++++++++
>>   .../testing/selftests/bpf/progs/rbtree_map.c  | 111 ++++++++
>>   .../selftests/bpf/progs/rbtree_map_fail.c     | 236 ++++++++++++++++++
>>   .../bpf/progs/rbtree_map_load_fail.c          |  24 ++
>>   4 files changed, 535 insertions(+)
>>   create mode 100644 tools/testing/selftests/bpf/prog_tests/rbtree_map.c
>>   create mode 100644 tools/testing/selftests/bpf/progs/rbtree_map.c
>>   create mode 100644 tools/testing/selftests/bpf/progs/rbtree_map_fail.c
>>   create mode 100644 tools/testing/selftests/bpf/progs/rbtree_map_load_fail.c
>>
>> diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree_map.c b/tools/testing/selftests/bpf/prog_tests/rbtree_map.c
>> new file mode 100644
>> index 000000000000..17cadcd05ee4
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/prog_tests/rbtree_map.c
>> @@ -0,0 +1,164 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
>> +
>> +#include <sys/syscall.h>
>> +#include <test_progs.h>
>> +#include "rbtree_map.skel.h"
>> +#include "rbtree_map_fail.skel.h"
>> +#include "rbtree_map_load_fail.skel.h"
>> +
>> +static size_t log_buf_sz = 1048576; /* 1 MB */
>> +static char obj_log_buf[1048576];
>> +
>> +static struct {
>> +    const char *prog_name;
>> +    const char *expected_err_msg;
>> +} rbtree_prog_load_fail_tests[] = {
>> +    {"rb_node__field_store", "only read is supported"},
>> +    {"rb_node__alloc_no_add", "Unreleased reference id=2 alloc_insn=3"},
>> +    {"rb_node__two_alloc_one_add", "Unreleased reference id=2 alloc_insn=3"},
>> +    {"rb_node__remove_no_free", "Unreleased reference id=5 alloc_insn=28"},
>> +    {"rb_tree__add_wrong_type", "rbtree: R2 is of type task_struct but node_data is expected"},
>> +    {"rb_tree__conditional_release_helper_usage",
>> +        "R2 type=ptr_cond_rel_ expected=ptr_"},
>> +};
>> +
>> +void test_rbtree_map_load_fail(void)
>> +{
>> +    struct rbtree_map_load_fail *skel;
>> +
>> +    skel = rbtree_map_load_fail__open_and_load();
>> +    if (!ASSERT_ERR_PTR(skel, "rbtree_map_load_fail__open_and_load"))
>> +        rbtree_map_load_fail__destroy(skel);
>> +}
>> +
>> +static void verify_fail(const char *prog_name, const char *expected_err_msg)
>> +{
>> +    LIBBPF_OPTS(bpf_object_open_opts, opts);
>> +    struct rbtree_map_fail *skel;
>> +    struct bpf_program *prog;
>> +    int err;
>> +
>> +    opts.kernel_log_buf = obj_log_buf;
>> +    opts.kernel_log_size = log_buf_sz;
>> +    opts.kernel_log_level = 1;
>> +
>> +    skel = rbtree_map_fail__open_opts(&opts);
>> +    if (!ASSERT_OK_PTR(skel, "rbtree_map_fail__open_opts"))
>> +        goto cleanup;
>> +
>> +    prog = bpf_object__find_program_by_name(skel->obj, prog_name);
>> +    if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
>> +        goto cleanup;
>> +
>> +    bpf_program__set_autoload(prog, true);
>> +    err = rbtree_map_fail__load(skel);
>> +    if (!ASSERT_ERR(err, "unexpected load success"))
>> +        goto cleanup;
>> +
>> +    if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
>> +        fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
>> +        fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
>> +    }
>> +
>> +cleanup:
>> +    rbtree_map_fail__destroy(skel);
>> +}
>> +
>> +void test_rbtree_map_alloc_node__size_too_small(void)
>> +{
>> +    struct rbtree_map_fail *skel;
>> +    struct bpf_program *prog;
>> +    struct bpf_link *link;
>> +    int err;
>> +
>> +    skel = rbtree_map_fail__open();
>> +    if (!ASSERT_OK_PTR(skel, "rbtree_map_fail__open"))
>> +        goto cleanup;
>> +
>> +    prog = skel->progs.alloc_node__size_too_small;
>> +    bpf_program__set_autoload(prog, true);
>> +
>> +    err = rbtree_map_fail__load(skel);
>> +    if (!ASSERT_OK(err, "unexpected load fail"))
>> +        goto cleanup;
>> +
>> +    link = bpf_program__attach(skel->progs.alloc_node__size_too_small);
>> +    if (!ASSERT_OK_PTR(link, "link"))
>> +        goto cleanup;
>> +
>> +    syscall(SYS_getpgid);
>> +
>> +    ASSERT_EQ(skel->bss->size_too_small__alloc_fail, 1, "alloc_fail");
>> +
>> +    bpf_link__destroy(link);
>> +cleanup:
>> +    rbtree_map_fail__destroy(skel);
>> +}
>> +
>> +void test_rbtree_map_add_node__no_lock(void)
>> +{
>> +    struct rbtree_map_fail *skel;
>> +    struct bpf_program *prog;
>> +    struct bpf_link *link;
>> +    int err;
>> +
>> +    skel = rbtree_map_fail__open();
>> +    if (!ASSERT_OK_PTR(skel, "rbtree_map_fail__open"))
>> +        goto cleanup;
>> +
>> +    prog = skel->progs.add_node__no_lock;
>> +    bpf_program__set_autoload(prog, true);
>> +
>> +    err = rbtree_map_fail__load(skel);
>> +    if (!ASSERT_OK(err, "unexpected load fail"))
>> +        goto cleanup;
>> +
>> +    link = bpf_program__attach(skel->progs.add_node__no_lock);
>> +    if (!ASSERT_OK_PTR(link, "link"))
>> +        goto cleanup;
>> +
>> +    syscall(SYS_getpgid);
>> +
>> +    ASSERT_EQ(skel->bss->no_lock_add__fail, 1, "alloc_fail");
>> +
>> +    bpf_link__destroy(link);
>> +cleanup:
>> +    rbtree_map_fail__destroy(skel);
>> +}
>> +
>> +void test_rbtree_map_prog_load_fail(void)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(rbtree_prog_load_fail_tests); i++) {
>> +        if (!test__start_subtest(rbtree_prog_load_fail_tests[i].prog_name))
>> +            continue;
>> +
>> +        verify_fail(rbtree_prog_load_fail_tests[i].prog_name,
>> +                rbtree_prog_load_fail_tests[i].expected_err_msg);
>> +    }
>> +}
>> +
>> +void test_rbtree_map(void)
>> +{
>> +    struct rbtree_map *skel;
>> +    struct bpf_link *link;
>> +
>> +    skel = rbtree_map__open_and_load();
>> +    if (!ASSERT_OK_PTR(skel, "rbtree_map__open_and_load"))
>> +        goto cleanup;
>> +
>> +    link = bpf_program__attach(skel->progs.check_rbtree);
>> +    if (!ASSERT_OK_PTR(link, "link"))
>> +        goto cleanup;
>> +
>> +    for (int i = 0; i < 100; i++)
>> +        syscall(SYS_getpgid);
>> +
>> +    ASSERT_EQ(skel->bss->calls, 100, "calls_equal");
>> +
>> +    bpf_link__destroy(link);
>> +cleanup:
>> +    rbtree_map__destroy(skel);
>> +}
>> diff --git a/tools/testing/selftests/bpf/progs/rbtree_map.c b/tools/testing/selftests/bpf/progs/rbtree_map.c
>> new file mode 100644
>> index 000000000000..0cd467838f6e
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/progs/rbtree_map.c
>> @@ -0,0 +1,111 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
>> +
>> +#include "vmlinux.h"
>> +#include <bpf/bpf_helpers.h>
>> +#include "bpf_misc.h"
>> +
>> +struct node_data {
>> +    struct rb_node node;
>> +    __u32 one;
>> +    __u32 two;
>> +};
>> +
>> +struct {
>> +    __uint(type, BPF_MAP_TYPE_RBTREE);
>> +    __type(value, struct node_data);
>> +} rbtree SEC(".maps");
>> +
>> +long calls;
>> +
>> +static bool less(struct rb_node *a, const struct rb_node *b)
>> +{
>> +    struct node_data *node_a;
>> +    struct node_data *node_b;
>> +
>> +    node_a = container_of(a, struct node_data, node);
>> +    node_b = container_of(b, struct node_data, node);
>> +
>> +    return node_a->one < node_b->one;
>> +}
>> +
>> +// Key = node_datq
>> +static int cmp(const void *key, const struct rb_node *b)
>> +{
>> +    struct node_data *node_a;
>> +    struct node_data *node_b;
>> +
>> +    node_a = container_of(key, struct node_data, node);
>> +    node_b = container_of(b, struct node_data, node);
>> +
>> +    return node_b->one - node_a->one;
>> +}
>> +
>> +// Key = just node_data.one
>> +static int cmp2(const void *key, const struct rb_node *b)
>> +{
>> +    __u32 one;
>> +    struct node_data *node_b;
>> +
>> +    one = *(__u32 *)key;
>> +    node_b = container_of(b, struct node_data, node);
>> +
>> +    return node_b->one - one;
>> +}
>> +
>> +SEC("fentry/" SYS_PREFIX "sys_getpgid")
>> +int check_rbtree(void *ctx)
>> +{
>> +    struct node_data *node, *found, *ret;
>> +    struct node_data popped;
>> +    struct node_data search;
>> +    __u32 search2;
>> +
>> +    node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
> 
> If I understand correctly, bpf_rtbree_alloc_node() may cause reschedule
> inside the function. So, the program should be sleepable, right?
> 

My mistake, will change alloc flag to GFP_NOWAIT.

>> +    if (!node)
>> +        return 0;
>> +
>> +    node->one = calls;
>> +    node->two = 6;
>> +    bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
>> +
>> +    ret = (struct node_data *)bpf_rbtree_add(&rbtree, node, less);
>> +    if (!ret) {
>> +        bpf_rbtree_free_node(&rbtree, node);
>> +        goto unlock_ret;
>> +    }
>> +
>> +    bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
>> +
>> +    bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
>> +
>> +    search.one = calls;
>> +    found = (struct node_data *)bpf_rbtree_find(&rbtree, &search, cmp);
>> +    if (!found)
>> +        goto unlock_ret;
>> +
>> +    int node_ct = 0;
>> +    struct node_data *iter = (struct node_data *)bpf_rbtree_first(&rbtree);
>> +
>> +    while (iter) {
>> +        node_ct++;
>> +        iter = (struct node_data *)bpf_rbtree_next(&rbtree, iter);
>> +    }
>> +
>> +    ret = (struct node_data *)bpf_rbtree_remove(&rbtree, found);
>> +    if (!ret)
>> +        goto unlock_ret;
>> +
>> +    bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
>> +
>> +    bpf_rbtree_free_node(&rbtree, ret);
>> +
>> +    __sync_fetch_and_add(&calls, 1);
>> +    return 0;
>> +
>> +unlock_ret:
>> +    bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
>> +    return 0;
>> +}
>> +
> [...]

  reply	other threads:[~2022-08-10 17:49 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-22 18:34 [RFC PATCH bpf-next 00/11] bpf: Introduce rbtree map Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 01/11] bpf: Pull repeated reg access bounds check into helper fn Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 02/11] bpf: Add verifier support for custom callback return range Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 03/11] bpf: Add rb_node_off to bpf_map Dave Marchevsky
2022-08-01 22:19   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 04/11] bpf: Add rbtree map Dave Marchevsky
2022-08-01 21:49   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 05/11] bpf: Add bpf_spin_lock member to rbtree Dave Marchevsky
2022-08-01 22:17   ` Alexei Starovoitov
2022-08-02 13:59     ` Kumar Kartikeya Dwivedi
2022-08-02 15:30       ` Alexei Starovoitov
2022-08-10 21:46     ` Kumar Kartikeya Dwivedi
2022-08-10 22:06       ` Alexei Starovoitov
2022-08-10 23:16         ` Kumar Kartikeya Dwivedi
2022-08-15  5:33       ` Yonghong Song
2022-08-15  5:37         ` Kumar Kartikeya Dwivedi
2022-07-22 18:34 ` [RFC PATCH bpf-next 06/11] bpf: Add bpf_rbtree_{lock,unlock} helpers Dave Marchevsky
2022-08-01 21:58   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 07/11] bpf: Enforce spinlock hold for bpf_rbtree_{add,remove,find} Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 08/11] bpf: Add OBJ_NON_OWNING_REF type flag Dave Marchevsky
2022-08-01 22:41   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 09/11] bpf: Add CONDITIONAL_RELEASE " Dave Marchevsky
2022-08-01 22:23   ` Alexei Starovoitov
2022-07-22 18:34 ` [RFC PATCH bpf-next 10/11] bpf: Introduce PTR_ITER and PTR_ITER_END type flags Dave Marchevsky
2022-07-29 16:31   ` Tejun Heo
2022-08-01 22:44   ` Alexei Starovoitov
2022-08-02 13:05     ` Kumar Kartikeya Dwivedi
2022-08-02 15:10       ` Alexei Starovoitov
2022-08-10 17:56     ` Dave Marchevsky
2022-07-22 18:34 ` [RFC PATCH bpf-next 11/11] selftests/bpf: Add rbtree map tests Dave Marchevsky
2022-07-28  7:18   ` Yonghong Song
2022-08-10 17:48     ` Dave Marchevsky [this message]
2022-07-28  7:04 ` [RFC PATCH bpf-next 00/11] bpf: Introduce rbtree map Yonghong Song
2022-08-10 17:54   ` Dave Marchevsky
2022-08-01 21:27 ` Alexei Starovoitov
2022-08-10 18:11   ` Dave Marchevsky
2022-08-02 22:02 ` Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e34cd3ef-81da-4993-da6c-ff104b585423@fb.com \
    --to=davemarchevsky@fb.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kernel-team@fb.com \
    --cc=tj@kernel.org \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).