All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yonghong Song <yhs@fb.com>
To: <bpf@vger.kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>,
	Cong Wang <xiyou.wangcong@gmail.com>,
	Daniel Borkmann <daniel@iogearbox.net>, <kernel-team@fb.com>
Subject: [PATCH bpf-next v2 05/11] bpf: add hashtab support for bpf_for_each_map_elem() helper
Date: Wed, 17 Feb 2021 10:18:08 -0800	[thread overview]
Message-ID: <20210217181808.3190262-1-yhs@fb.com> (raw)
In-Reply-To: <20210217181803.3189437-1-yhs@fb.com>

This patch added support for hashmap, percpu hashmap,
lru hashmap and percpu lru hashmap.

Signed-off-by: Yonghong Song <yhs@fb.com>
---
 include/linux/bpf.h   |  4 +++
 kernel/bpf/hashtab.c  | 63 +++++++++++++++++++++++++++++++++++++++++++
 kernel/bpf/verifier.c | 27 +++++++++++++++++++
 3 files changed, 94 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 40f41a9b40f9..34277ab1eda5 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1392,6 +1392,10 @@ void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
 				struct bpf_link_info *info);
 
+int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+				   struct bpf_func_state *caller,
+				   struct bpf_func_state *callee);
+
 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index d63912e73ad9..f652b92ca79f 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1869,6 +1869,61 @@ static const struct bpf_iter_seq_info iter_seq_info = {
 	.seq_priv_size		= sizeof(struct bpf_iter_seq_hash_map_info),
 };
 
+static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
+				  void *callback_ctx, u64 flags)
+{
+	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+	struct hlist_nulls_head *head;
+	long ret = 0, num_calls = 0;
+	struct hlist_nulls_node *n;
+	struct htab_elem *elem;
+	u32 roundup_key_size;
+	void __percpu *pptr;
+	struct bucket *b;
+	void *key, *val;
+	bool is_percpu;
+	int i;
+
+	if (flags != 0)
+		return -EINVAL;
+
+	is_percpu = htab_is_percpu(htab);
+
+	roundup_key_size = round_up(map->key_size, 8);
+	/* disable migration so percpu value prepared here will be the
+	 * same as the one seen by the bpf program with bpf_map_lookup_elem().
+	 */
+	migrate_disable();
+	for (i = 0; i < htab->n_buckets; i++) {
+		b = &htab->buckets[i];
+		rcu_read_lock();
+		head = &b->head;
+		hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
+			key = elem->key;
+			if (!is_percpu) {
+				val = elem->key + roundup_key_size;
+			} else {
+				/* current cpu value for percpu map */
+				pptr = htab_elem_get_ptr(elem, map->key_size);
+				val = this_cpu_ptr(pptr);
+			}
+			num_calls++;
+			ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
+					(u64)(long)key, (u64)(long)val,
+					(u64)(long)callback_ctx, 0);
+			if (ret) {
+				rcu_read_unlock();
+				ret = (ret == 1) ? 0 : -EINVAL;
+				goto out;
+			}
+		}
+		rcu_read_unlock();
+	}
+out:
+	migrate_enable();
+	return ret ?: num_calls;
+}
+
 static int htab_map_btf_id;
 const struct bpf_map_ops htab_map_ops = {
 	.map_meta_equal = bpf_map_meta_equal,
@@ -1881,6 +1936,8 @@ const struct bpf_map_ops htab_map_ops = {
 	.map_delete_elem = htab_map_delete_elem,
 	.map_gen_lookup = htab_map_gen_lookup,
 	.map_seq_show_elem = htab_map_seq_show_elem,
+	.map_set_for_each_callback_args = map_set_for_each_callback_args,
+	.map_for_each_callback = bpf_for_each_hash_elem,
 	BATCH_OPS(htab),
 	.map_btf_name = "bpf_htab",
 	.map_btf_id = &htab_map_btf_id,
@@ -1900,6 +1957,8 @@ const struct bpf_map_ops htab_lru_map_ops = {
 	.map_delete_elem = htab_lru_map_delete_elem,
 	.map_gen_lookup = htab_lru_map_gen_lookup,
 	.map_seq_show_elem = htab_map_seq_show_elem,
+	.map_set_for_each_callback_args = map_set_for_each_callback_args,
+	.map_for_each_callback = bpf_for_each_hash_elem,
 	BATCH_OPS(htab_lru),
 	.map_btf_name = "bpf_htab",
 	.map_btf_id = &htab_lru_map_btf_id,
@@ -2019,6 +2078,8 @@ const struct bpf_map_ops htab_percpu_map_ops = {
 	.map_update_elem = htab_percpu_map_update_elem,
 	.map_delete_elem = htab_map_delete_elem,
 	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
+	.map_set_for_each_callback_args = map_set_for_each_callback_args,
+	.map_for_each_callback = bpf_for_each_hash_elem,
 	BATCH_OPS(htab_percpu),
 	.map_btf_name = "bpf_htab",
 	.map_btf_id = &htab_percpu_map_btf_id,
@@ -2036,6 +2097,8 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
 	.map_update_elem = htab_lru_percpu_map_update_elem,
 	.map_delete_elem = htab_lru_map_delete_elem,
 	.map_seq_show_elem = htab_percpu_map_seq_show_elem,
+	.map_set_for_each_callback_args = map_set_for_each_callback_args,
+	.map_for_each_callback = bpf_for_each_hash_elem,
 	BATCH_OPS(htab_lru_percpu),
 	.map_btf_name = "bpf_htab",
 	.map_btf_id = &htab_lru_percpu_map_btf_id,
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f80386d094b7..2ce8ed8aca70 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5415,6 +5415,33 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
 	return 0;
 }
 
+int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+				   struct bpf_func_state *caller,
+				   struct bpf_func_state *callee)
+{
+	/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
+	 *      void *callback_ctx, u64 flags);
+	 * callback_fn(struct bpf_map *map, void *key, void *value,
+	 *      void *callback_ctx);
+	 */
+	callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
+
+	callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
+	__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
+	callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
+
+	callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
+	__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
+	callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
+
+	/* pointer to stack or null */
+	callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
+
+	/* unused */
+	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
+	return 0;
+}
+
 static int set_callee_state(struct bpf_verifier_env *env,
 			    struct bpf_func_state *caller,
 			    struct bpf_func_state *callee, int insn_idx)
-- 
2.24.1


  parent reply	other threads:[~2021-02-17 18:19 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-17 18:18 [PATCH bpf-next v2 00/11] bpf: add bpf_for_each_map_elem() helper Yonghong Song
2021-02-17 18:18 ` [PATCH bpf-next v2 01/11] bpf: factor out visit_func_call_insn() in check_cfg() Yonghong Song
2021-02-17 18:18 ` [PATCH bpf-next v2 02/11] bpf: factor out verbose_invalid_scalar() Yonghong Song
2021-02-17 18:18 ` [PATCH bpf-next v2 03/11] bpf: refactor check_func_call() to allow callback function Yonghong Song
2021-02-17 18:18 ` [PATCH bpf-next v2 04/11] bpf: add bpf_for_each_map_elem() helper Yonghong Song
2021-02-22 20:59   ` Alexei Starovoitov
2021-02-23 18:39     ` Yonghong Song
2021-02-23 18:46       ` Alexei Starovoitov
2021-02-23 19:37         ` Yonghong Song
2021-02-17 18:18 ` Yonghong Song [this message]
2021-02-22 22:56   ` [PATCH bpf-next v2 05/11] bpf: add hashtab support for " Alexei Starovoitov
2021-02-23 18:41     ` Yonghong Song
2021-02-17 18:18 ` [PATCH bpf-next v2 06/11] bpf: add arraymap " Yonghong Song
2021-02-17 18:18 ` [PATCH bpf-next v2 07/11] libbpf: move function is_ldimm64() earlier in libbpf.c Yonghong Song
2021-02-23  8:06   ` Andrii Nakryiko
2021-02-17 18:18 ` [PATCH bpf-next v2 08/11] libbpf: support local function pointer relocation Yonghong Song
2021-02-23  8:03   ` Andrii Nakryiko
2021-02-23 18:55     ` Yonghong Song
2021-02-23 19:07       ` Alexei Starovoitov
2021-02-23 19:21         ` Andrii Nakryiko
2021-02-23 19:19       ` Andrii Nakryiko
2021-02-23 19:47         ` Yonghong Song
2021-02-23 21:24           ` Andrii Nakryiko
2021-02-17 18:18 ` [PATCH bpf-next v2 09/11] bpftool: print local function pointer properly Yonghong Song
2021-02-23  8:06   ` Andrii Nakryiko
2021-02-23 19:00     ` Yonghong Song
2021-02-17 18:18 ` [PATCH bpf-next v2 10/11] selftests/bpf: add hashmap test for bpf_for_each_map_elem() helper Yonghong Song
2021-02-17 18:18 ` [PATCH bpf-next v2 11/11] selftests/bpf: add arraymap " Yonghong Song
2021-02-17 18:29 ` [PATCH bpf-next v2 00/11] bpf: add " Yonghong Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210217181808.3190262-1-yhs@fb.com \
    --to=yhs@fb.com \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kernel-team@fb.com \
    --cc=xiyou.wangcong@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.