All of lore.kernel.org
 help / color / mirror / Atom feed
* Re: [PATCH v2 bpf-next 2/5] bpf: af_unix: Use batching algorithm in bpf unix iter.
@ 2022-01-13 17:20 kernel test robot
  0 siblings, 0 replies; 2+ messages in thread
From: kernel test robot @ 2022-01-13 17:20 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 4336 bytes --]

CC: kbuild-all(a)lists.01.org
In-Reply-To: <20220113002849.4384-3-kuniyu@amazon.co.jp>
References: <20220113002849.4384-3-kuniyu@amazon.co.jp>
TO: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
TO: "David S. Miller" <davem@davemloft.net>
CC: netdev(a)vger.kernel.org
TO: Jakub Kicinski <kuba@kernel.org>
TO: Alexei Starovoitov <ast@kernel.org>
TO: Daniel Borkmann <daniel@iogearbox.net>
TO: Andrii Nakryiko <andrii@kernel.org>
CC: Martin KaFai Lau <kafai@fb.com>
CC: Benjamin Herrenschmidt <benh@amazon.com>
CC: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
CC: bpf(a)vger.kernel.org

Hi Kuniyuki,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on bpf-next/master]

url:    https://github.com/0day-ci/linux/commits/Kuniyuki-Iwashima/bpf-Batching-iter-for-AF_UNIX-sockets/20220113-083151
base:   https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
:::::: branch date: 17 hours ago
:::::: commit date: 17 hours ago
config: arm-randconfig-m031-20220113 (https://download.01.org/0day-ci/archive/20220114/202201140137.dztxZIjJ-lkp(a)intel.com/config)
compiler: arm-linux-gnueabi-gcc (GCC) 11.2.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

New smatch warnings:
net/unix/af_unix.c:3464 bpf_iter_unix_batch() warn: sleeping in atomic context

Old smatch warnings:
net/unix/af_unix.c:1586 unix_stream_connect() warn: variable dereferenced before check 'other' (see line 1469)
net/unix/af_unix.c:2652 manage_oob() warn: returning freed memory 'skb'

vim +3464 net/unix/af_unix.c

a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3436  
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3437  static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3438  					loff_t *pos)
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3439  {
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3440  	struct bpf_unix_iter_state *iter = seq->private;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3441  	unsigned int expected;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3442  	bool resized = false;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3443  	struct sock *sk;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3444  
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3445  	if (iter->st_bucket_done)
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3446  		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3447  
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3448  again:
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3449  	/* Get a new batch */
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3450  	iter->cur_sk = 0;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3451  	iter->end_sk = 0;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3452  
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3453  	sk = unix_get_first(seq, pos);
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3454  	if (!sk)
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3455  		return NULL; /* Done */
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3456  
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3457  	expected = bpf_iter_unix_hold_batch(seq, sk);
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3458  
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3459  	if (iter->end_sk == expected) {
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3460  		iter->st_bucket_done = true;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3461  		return sk;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3462  	}
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3463  
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13 @3464  	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3465  		resized = true;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3466  		goto again;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3467  	}
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3468  
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3469  	return sk;
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3470  }
a8f4be17743b80 Kuniyuki Iwashima 2022-01-13  3471  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH v2 bpf-next 2/5] bpf: af_unix: Use batching algorithm in bpf unix iter.
  2022-01-13  0:28 [PATCH v2 bpf-next 0/5] bpf: Batching iter for AF_UNIX sockets Kuniyuki Iwashima
@ 2022-01-13  0:28 ` Kuniyuki Iwashima
  0 siblings, 0 replies; 2+ messages in thread
From: Kuniyuki Iwashima @ 2022-01-13  0:28 UTC (permalink / raw)
  To: David S. Miller, Jakub Kicinski, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko
  Cc: Martin KaFai Lau, Benjamin Herrenschmidt, Kuniyuki Iwashima,
	Kuniyuki Iwashima, bpf, netdev

The commit 04c7820b776f ("bpf: tcp: Bpf iter batching and lock_sock")
introduces the batching algorithm to iterate TCP sockets with more
consistency.

This patch uses the same algorithm to iterate AF_UNIX sockets.

Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
---
 net/unix/af_unix.c | 184 +++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 177 insertions(+), 7 deletions(-)

diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e1c4082accdb..d383d5f63b6b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -3356,6 +3356,15 @@ static const struct seq_operations unix_seq_ops = {
 };
 
 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
+struct bpf_unix_iter_state {
+	struct seq_net_private p;
+	unsigned int cur_sk;
+	unsigned int end_sk;
+	unsigned int max_sk;
+	struct sock **batch;
+	bool st_bucket_done;
+};
+
 struct bpf_iter__unix {
 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
 	__bpf_md_ptr(struct unix_sock *, unix_sk);
@@ -3374,24 +3383,156 @@ static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
 	return bpf_iter_run_prog(prog, &ctx);
 }
 
+static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
+
+{
+	struct bpf_unix_iter_state *iter = seq->private;
+	unsigned int expected = 1;
+	struct sock *sk;
+
+	sock_hold(start_sk);
+	iter->batch[iter->end_sk++] = start_sk;
+
+	for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
+		if (sock_net(sk) != seq_file_net(seq))
+			continue;
+
+		if (iter->end_sk < iter->max_sk) {
+			sock_hold(sk);
+			iter->batch[iter->end_sk++] = sk;
+		}
+
+		expected++;
+	}
+
+	spin_unlock(&unix_table_locks[start_sk->sk_hash]);
+
+	return expected;
+}
+
+static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
+{
+	while (iter->cur_sk < iter->end_sk)
+		sock_put(iter->batch[iter->cur_sk++]);
+}
+
+static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
+				       unsigned int new_batch_sz)
+{
+	struct sock **new_batch;
+
+	new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
+			     GFP_USER | __GFP_NOWARN);
+	if (!new_batch)
+		return -ENOMEM;
+
+	bpf_iter_unix_put_batch(iter);
+	kvfree(iter->batch);
+	iter->batch = new_batch;
+	iter->max_sk = new_batch_sz;
+
+	return 0;
+}
+
+static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
+					loff_t *pos)
+{
+	struct bpf_unix_iter_state *iter = seq->private;
+	unsigned int expected;
+	bool resized = false;
+	struct sock *sk;
+
+	if (iter->st_bucket_done)
+		*pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
+
+again:
+	/* Get a new batch */
+	iter->cur_sk = 0;
+	iter->end_sk = 0;
+
+	sk = unix_get_first(seq, pos);
+	if (!sk)
+		return NULL; /* Done */
+
+	expected = bpf_iter_unix_hold_batch(seq, sk);
+
+	if (iter->end_sk == expected) {
+		iter->st_bucket_done = true;
+		return sk;
+	}
+
+	if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
+		resized = true;
+		goto again;
+	}
+
+	return sk;
+}
+
+static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	if (!*pos)
+		return SEQ_START_TOKEN;
+
+	/* bpf iter does not support lseek, so it always
+	 * continue from where it was stop()-ped.
+	 */
+	return bpf_iter_unix_batch(seq, pos);
+}
+
+static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct bpf_unix_iter_state *iter = seq->private;
+	struct sock *sk;
+
+	/* Whenever seq_next() is called, the iter->cur_sk is
+	 * done with seq_show(), so advance to the next sk in
+	 * the batch.
+	 */
+	if (iter->cur_sk < iter->end_sk)
+		sock_put(iter->batch[iter->cur_sk++]);
+
+	++*pos;
+
+	if (iter->cur_sk < iter->end_sk)
+		sk = iter->batch[iter->cur_sk];
+	else
+		sk = bpf_iter_unix_batch(seq, pos);
+
+	return sk;
+}
+
 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
 {
 	struct bpf_iter_meta meta;
 	struct bpf_prog *prog;
 	struct sock *sk = v;
 	uid_t uid;
+	bool slow;
+	int ret;
 
 	if (v == SEQ_START_TOKEN)
 		return 0;
 
+	slow = lock_sock_fast(sk);
+
+	if (unlikely(sk_unhashed(sk))) {
+		ret = SEQ_SKIP;
+		goto unlock;
+	}
+
 	uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
 	meta.seq = seq;
 	prog = bpf_iter_get_info(&meta, false);
-	return unix_prog_seq_show(prog, &meta, v, uid);
+	ret = unix_prog_seq_show(prog, &meta, v, uid);
+unlock:
+	unlock_sock_fast(sk, slow);
+	return ret;
 }
 
 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
 {
+	struct bpf_unix_iter_state *iter = seq->private;
 	struct bpf_iter_meta meta;
 	struct bpf_prog *prog;
 
@@ -3402,12 +3543,13 @@ static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
 			(void)unix_prog_seq_show(prog, &meta, v, 0);
 	}
 
-	unix_seq_stop(seq, v);
+	if (iter->cur_sk < iter->end_sk)
+		bpf_iter_unix_put_batch(iter);
 }
 
 static const struct seq_operations bpf_iter_unix_seq_ops = {
-	.start	= unix_seq_start,
-	.next	= unix_seq_next,
+	.start	= bpf_iter_unix_seq_start,
+	.next	= bpf_iter_unix_seq_next,
 	.stop	= bpf_iter_unix_seq_stop,
 	.show	= bpf_iter_unix_seq_show,
 };
@@ -3456,11 +3598,39 @@ static struct pernet_operations unix_net_ops = {
 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
 		     struct unix_sock *unix_sk, uid_t uid)
 
+#define INIT_BATCH_SZ 16
+
+static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
+{
+	struct bpf_unix_iter_state *iter = priv_data;
+	int err;
+
+	err = bpf_iter_init_seq_net(priv_data, aux);
+	if (err)
+		return err;
+
+	err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
+	if (err) {
+		bpf_iter_fini_seq_net(priv_data);
+		return err;
+	}
+
+	return 0;
+}
+
+static void bpf_iter_fini_unix(void *priv_data)
+{
+	struct bpf_unix_iter_state *iter = priv_data;
+
+	bpf_iter_fini_seq_net(priv_data);
+	kvfree(iter->batch);
+}
+
 static const struct bpf_iter_seq_info unix_seq_info = {
 	.seq_ops		= &bpf_iter_unix_seq_ops,
-	.init_seq_private	= bpf_iter_init_seq_net,
-	.fini_seq_private	= bpf_iter_fini_seq_net,
-	.seq_priv_size		= sizeof(struct seq_net_private),
+	.init_seq_private	= bpf_iter_init_unix,
+	.fini_seq_private	= bpf_iter_fini_unix,
+	.seq_priv_size		= sizeof(struct bpf_unix_iter_state),
 };
 
 static struct bpf_iter_reg unix_reg_info = {
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-01-13 17:20 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-13 17:20 [PATCH v2 bpf-next 2/5] bpf: af_unix: Use batching algorithm in bpf unix iter kernel test robot
  -- strict thread matches above, loose matches on Subject: below --
2022-01-13  0:28 [PATCH v2 bpf-next 0/5] bpf: Batching iter for AF_UNIX sockets Kuniyuki Iwashima
2022-01-13  0:28 ` [PATCH v2 bpf-next 2/5] bpf: af_unix: Use batching algorithm in bpf unix iter Kuniyuki Iwashima

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.