From: Hou Tao <houtao@huaweicloud.com>
To: bpf@vger.kernel.org
Cc: linux-fsdevel@vger.kernel.org,
Alexei Starovoitov <alexei.starovoitov@gmail.com>,
Yonghong Song <yhs@fb.com>,
Andrii Nakryiko <andrii.nakryiko@gmail.com>,
Viacheslav Dubeyko <slava@dubeyko.com>,
Amir Goldstein <amir73il@gmail.com>,
houtao1@huawei.com
Subject: [RFC PATCH bpf-next 4/4] selftests/bpf: Add test cases for bpf file-system iterator
Date: Sun, 7 May 2023 12:01:07 +0800 [thread overview]
Message-ID: <20230507040107.3755166-5-houtao@huaweicloud.com> (raw)
In-Reply-To: <20230507040107.3755166-1-houtao@huaweicloud.com>
From: Hou Tao <houtao1@huawei.com>
Add three test cases to demonstrate the basic functionalities of bpf
file-system iterator:
1) dump_raw_inode. Use bpf_seq_printf_btf to dump the content of the
passed inode and its super_block
2) dump_inode. Use bpf_filemap_{cachestat,find_present,get_order} to
dump the details of the inode page cache.
3) dump_mnt. Dump the basic information of the passed mount.
Signed-off-by: Hou Tao <houtao1@huawei.com>
---
.../selftests/bpf/prog_tests/bpf_iter_fs.c | 184 ++++++++++++++++++
.../testing/selftests/bpf/progs/bpf_iter_fs.c | 122 ++++++++++++
2 files changed, 306 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_iter_fs.c
create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_fs.c
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_fs.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_fs.c
new file mode 100644
index 000000000000..e26d736001b4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_fs.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <test_progs.h>
+#include "bpf_iter_fs.skel.h"
+
+static void test_bpf_iter_raw_inode(void)
+{
+ const char *fpath = "/tmp/raw_inode.test";
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ int ino_fd, iter_fd, err;
+ struct bpf_iter_fs *skel;
+ struct bpf_link *link;
+ char buf[8192];
+ ssize_t nr;
+
+ ino_fd = open(fpath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (!ASSERT_GE(ino_fd, 0, "open file"))
+ return;
+ ftruncate(ino_fd, 4095);
+
+ skel = bpf_iter_fs__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ goto close_ino;
+
+ bpf_program__set_autoload(skel->progs.dump_raw_inode, true);
+
+ err = bpf_iter_fs__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto free_skel;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.fs.type = BPF_FS_ITER_INODE;
+ linfo.fs.fd = ino_fd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(skel->progs.dump_raw_inode, &opts);
+ if (!ASSERT_OK_PTR(link, "attach iter"))
+ goto free_skel;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create iter"))
+ goto free_link;
+
+ nr = read(iter_fd, buf, sizeof(buf));
+ if (!ASSERT_GT(nr, 0, "read iter"))
+ goto close_iter;
+
+ buf[nr - 1] = 0;
+ puts(buf);
+
+close_iter:
+ close(iter_fd);
+free_link:
+ bpf_link__destroy(link);
+free_skel:
+ bpf_iter_fs__destroy(skel);
+close_ino:
+ close(ino_fd);
+}
+
+static void test_bpf_iter_inode(void)
+{
+ const char *fpath = "/tmp/inode.test";
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ int ino_fd, iter_fd, err;
+ struct bpf_iter_fs *skel;
+ struct bpf_link *link;
+ char buf[8192];
+ ssize_t nr;
+
+ /* Close fd after reading iterator completes */
+ ino_fd = open(fpath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (!ASSERT_GE(ino_fd, 0, "open file"))
+ return;
+ pwrite(ino_fd, buf, sizeof(buf), 0);
+ pwrite(ino_fd, buf, sizeof(buf), sizeof(buf) * 2);
+
+ skel = bpf_iter_fs__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ goto close_ino;
+
+ bpf_program__set_autoload(skel->progs.dump_inode, true);
+
+ err = bpf_iter_fs__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto free_skel;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.fs.type = BPF_FS_ITER_INODE;
+ linfo.fs.fd = ino_fd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(skel->progs.dump_inode, &opts);
+ if (!ASSERT_OK_PTR(link, "attach iter"))
+ goto free_skel;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create iter"))
+ goto free_link;
+
+ nr = read(iter_fd, buf, sizeof(buf));
+ if (!ASSERT_GT(nr, 0, "read iter"))
+ goto close_iter;
+
+ buf[nr - 1] = 0;
+ puts(buf);
+
+close_iter:
+ close(iter_fd);
+free_link:
+ bpf_link__destroy(link);
+free_skel:
+ bpf_iter_fs__destroy(skel);
+close_ino:
+ close(ino_fd);
+}
+
+static void test_bpf_iter_mnt(void)
+{
+ const char *fpath = "/tmp/mnt.test";
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ int mnt_fd, iter_fd, err;
+ struct bpf_iter_fs *skel;
+ struct bpf_link *link;
+ char buf[8192];
+ ssize_t nr;
+
+ /* Close fd after reading iterator completes */
+ mnt_fd = open(fpath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (!ASSERT_GE(mnt_fd, 0, "open file"))
+ return;
+
+ skel = bpf_iter_fs__open();
+ if (!ASSERT_OK_PTR(skel, "open"))
+ goto close_ino;
+
+ bpf_program__set_autoload(skel->progs.dump_mnt, true);
+
+ err = bpf_iter_fs__load(skel);
+ if (!ASSERT_OK(err, "load"))
+ goto free_skel;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.fs.type = BPF_FS_ITER_MNT;
+ linfo.fs.fd = mnt_fd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(skel->progs.dump_mnt, &opts);
+ if (!ASSERT_OK_PTR(link, "attach iter"))
+ goto free_skel;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create iter"))
+ goto free_link;
+
+ nr = read(iter_fd, buf, sizeof(buf));
+ if (!ASSERT_GT(nr, 0, "read iter"))
+ goto close_iter;
+
+ buf[nr - 1] = 0;
+ puts(buf);
+
+close_iter:
+ close(iter_fd);
+free_link:
+ bpf_link__destroy(link);
+free_skel:
+ bpf_iter_fs__destroy(skel);
+close_ino:
+ close(mnt_fd);
+}
+
+void test_bpf_iter_fs(void)
+{
+ if (test__start_subtest("dump_raw_inode"))
+ test_bpf_iter_raw_inode();
+ if (test__start_subtest("dump_inode"))
+ test_bpf_iter_inode();
+ if (test__start_subtest("dump_mnt"))
+ test_bpf_iter_mnt();
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_fs.c b/tools/testing/selftests/bpf/progs/bpf_iter_fs.c
new file mode 100644
index 000000000000..e238446b6ddf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_fs.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include "bpf_iter.h"
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct dump_ctx {
+ struct seq_file *seq;
+ struct inode *inode;
+ unsigned long from;
+ unsigned long max;
+};
+
+void bpf_filemap_cachestat(struct inode *inode, unsigned long from, unsigned long last,
+ struct cachestat *cs) __ksym;
+long bpf_filemap_find_present(struct inode *inode, unsigned long from, unsigned long last) __ksym;
+long bpf_filemap_get_order(struct inode *inode, unsigned long index) __ksym;
+
+static u64 dump_page_order(unsigned int i, void *ctx)
+{
+ struct dump_ctx *dump = ctx;
+ unsigned long index;
+ unsigned int order;
+
+ index = bpf_filemap_find_present(dump->inode, dump->from, dump->max);
+ if (index == -1UL)
+ return 1;
+ order = bpf_filemap_get_order(dump->inode, index);
+
+ BPF_SEQ_PRINTF(dump->seq, " page offset %lu order %u\n", index, order);
+ dump->from = index + (1 << order);
+ return 0;
+}
+
+SEC("?iter/fs_inode")
+int dump_raw_inode(struct bpf_iter__fs_inode *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct inode *inode = ctx->inode;
+ struct btf_ptr ptr;
+
+ if (inode == NULL)
+ return 0;
+
+ memset(&ptr, 0, sizeof(ptr));
+ ptr.type_id = bpf_core_type_id_kernel(struct inode);
+ ptr.ptr = inode;
+ bpf_seq_printf_btf(seq, &ptr, sizeof(ptr), 0);
+
+ memset(&ptr, 0, sizeof(ptr));
+ ptr.type_id = bpf_core_type_id_kernel(struct super_block);
+ ptr.ptr = inode->i_sb;
+ bpf_seq_printf_btf(seq, &ptr, sizeof(ptr), 0);
+
+ return 0;
+}
+
+SEC("?iter/fs_inode")
+int dump_inode(struct bpf_iter__fs_inode *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct inode *inode = ctx->inode;
+ struct cachestat cs = {};
+ struct super_block *sb;
+ struct dentry *dentry;
+ struct dump_ctx dump;
+
+ if (inode == NULL)
+ return 0;
+
+ sb = inode->i_sb;
+ BPF_SEQ_PRINTF(seq, "sb: bsize %lu s_op %ps s_type %ps name %s\n",
+ sb->s_blocksize, sb->s_op, sb->s_type, sb->s_type->name);
+
+ BPF_SEQ_PRINTF(seq, "ino: inode nlink %d inum %lu size %llu",
+ inode->i_nlink, inode->i_ino, inode->i_size);
+ dentry = ctx->dentry;
+ if (dentry)
+ BPF_SEQ_PRINTF(seq, ", name %s\n", dentry->d_name.name);
+ else
+ BPF_SEQ_PRINTF(seq, "\n");
+
+ bpf_filemap_cachestat(inode, 0, ~0UL, &cs);
+ BPF_SEQ_PRINTF(seq, "cache: cached %llu dirty %llu wb %llu evicted %llu\n",
+ cs.nr_cache, cs.nr_dirty, cs.nr_writeback, cs.nr_evicted);
+
+ dump.seq = seq;
+ dump.inode = inode;
+ dump.from = 0;
+ /* TODO: handle BPF_MAX_LOOPS */
+ dump.max = ((unsigned long)inode->i_size + 4095) / 4096;
+ BPF_SEQ_PRINTF(seq, "orders:\n");
+ bpf_loop(dump.max, dump_page_order, &dump, 0);
+
+ return 0;
+}
+
+SEC("?iter/fs_mnt")
+int dump_mnt(struct bpf_iter__fs_mnt *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct mount *mnt = ctx->mnt;
+ struct super_block *sb;
+
+ if (mnt == NULL)
+ return 0;
+
+ sb = mnt->mnt.mnt_sb;
+ BPF_SEQ_PRINTF(seq, "dev %u:%u ",
+ sb->s_dev >> 20, sb->s_dev & ((1 << 20) - 1));
+
+ BPF_SEQ_PRINTF(seq, "id %d parent_id %d mnt_flags 0x%x",
+ mnt->mnt_id, mnt->mnt_parent->mnt_id, mnt->mnt.mnt_flags);
+ if (mnt->mnt.mnt_flags & 0x1000)
+ BPF_SEQ_PRINTF(seq, " shared:%d", mnt->mnt_group_id);
+ BPF_SEQ_PRINTF(seq, "\n");
+
+ return 0;
+}
--
2.29.2
prev parent reply other threads:[~2023-05-07 3:30 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-07 4:01 [RFC PATCH bpf-next 0/4] Introduce bpf iterators for file-system Hou Tao
2023-05-07 4:01 ` [RFC PATCH bpf-next 1/4] bpf: Introduce bpf iterator for file-system inode Hou Tao
2023-05-07 4:01 ` [RFC PATCH bpf-next 2/4] bpf: Add three kfunc helpers for bpf fs inode iterator Hou Tao
2023-05-07 4:01 ` [RFC PATCH bpf-next 3/4] bpf: Introduce bpf iterator for file system mount Hou Tao
2023-05-07 4:01 ` Hou Tao [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230507040107.3755166-5-houtao@huaweicloud.com \
--to=houtao@huaweicloud.com \
--cc=alexei.starovoitov@gmail.com \
--cc=amir73il@gmail.com \
--cc=andrii.nakryiko@gmail.com \
--cc=bpf@vger.kernel.org \
--cc=houtao1@huawei.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=slava@dubeyko.com \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).