linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: io-uring@vger.kernel.org, netdev@vger.kernel.org,
	bpf@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>, Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>,
	Martin KaFai Lau <kafai@fb.com>, Song Liu <songliubraving@fb.com>,
	Yonghong Song <yhs@fb.com>,
	John Fastabend <john.fastabend@gmail.com>,
	KP Singh <kpsingh@kernel.org>,
	Horst Schirmeier <horst.schirmeier@tu-dortmund.de>,
	"Franz-B . Tuneke" <franz-bernhard.tuneke@tu-dortmund.de>,
	Christian Dietrich <stettberger@dokucode.de>
Subject: [PATCH 23/23] io_uring: enable bpf reqs to wait for CQs
Date: Wed, 19 May 2021 15:13:34 +0100	[thread overview]
Message-ID: <a3d5ac5539a8d9f0423fea051a038e8bbfe10c99.1621424513.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1621424513.git.asml.silence@gmail.com>

Add experimental support for bpf requests waiting for a number of CQEs
to in a specified CQ.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c                 | 80 +++++++++++++++++++++++++++++++++--
 include/uapi/linux/io_uring.h |  2 +
 2 files changed, 79 insertions(+), 3 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 805c10be7ea4..cf02389747b5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -687,6 +687,12 @@ struct io_bpf {
 	struct bpf_prog			*prog;
 };
 
+struct io_async_bpf {
+	struct wait_queue_entry		wqe;
+	unsigned int 			wait_nr;
+	unsigned int 			wait_idx;
+};
+
 struct io_completion {
 	struct file			*file;
 	struct list_head		list;
@@ -1050,7 +1056,9 @@ static const struct io_op_def io_op_defs[] = {
 	},
 	[IORING_OP_RENAMEAT] = {},
 	[IORING_OP_UNLINKAT] = {},
-	[IORING_OP_BPF] = {},
+	[IORING_OP_BPF] = {
+		.async_size		= sizeof(struct io_async_bpf),
+	},
 };
 
 static bool io_disarm_next(struct io_kiocb *req);
@@ -9148,6 +9156,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
 			}
 		}
 
+		wake_up_all(&ctx->wait);
 		ret |= io_cancel_defer_files(ctx, task, files);
 		ret |= io_poll_remove_all(ctx, task, files);
 		ret |= io_kill_timeouts(ctx, task, files);
@@ -10492,6 +10501,10 @@ static bool io_bpf_is_valid_access(int off, int size,
 	switch (off) {
 	case offsetof(struct io_uring_bpf_ctx, user_data):
 		return size == sizeof_field(struct io_uring_bpf_ctx, user_data);
+	case offsetof(struct io_uring_bpf_ctx, wait_nr):
+		return size == sizeof_field(struct io_uring_bpf_ctx, wait_nr);
+	case offsetof(struct io_uring_bpf_ctx, wait_idx):
+		return size == sizeof_field(struct io_uring_bpf_ctx, wait_idx);
 	}
 	return false;
 }
@@ -10503,6 +10516,60 @@ const struct bpf_verifier_ops bpf_io_uring_verifier_ops = {
 	.is_valid_access	= io_bpf_is_valid_access,
 };
 
+static inline bool io_bpf_need_wake(struct io_async_bpf *abpf)
+{
+	struct io_kiocb *req = abpf->wqe.private;
+	struct io_ring_ctx *ctx = req->ctx;
+
+	if (unlikely(percpu_ref_is_dying(&ctx->refs)) ||
+		     atomic_read(&req->task->io_uring->in_idle))
+		return true;
+	return __io_cqring_events(&ctx->cqs[abpf->wait_idx]) >= abpf->wait_nr;
+}
+
+static int io_bpf_wait_func(struct wait_queue_entry *wqe, unsigned mode,
+			       int sync, void *key)
+{
+	struct io_async_bpf *abpf = container_of(wqe, struct io_async_bpf, wqe);
+	bool wake = io_bpf_need_wake(abpf);
+
+	if (wake) {
+		list_del_init_careful(&wqe->entry);
+		req_ref_get(wqe->private);
+		io_queue_async_work(wqe->private);
+	}
+	return wake;
+}
+
+static int io_bpf_wait_cq_async(struct io_kiocb *req, unsigned int nr,
+				unsigned int idx)
+{
+	struct io_ring_ctx *ctx = req->ctx;
+	struct wait_queue_head *wq;
+	struct wait_queue_entry *wqe;
+	struct io_async_bpf *abpf;
+
+	if (unlikely(idx >= ctx->cq_nr))
+		return -EINVAL;
+	if (!req->async_data && io_alloc_async_data(req))
+		return -ENOMEM;
+
+	abpf = req->async_data;
+	abpf->wait_nr = nr;
+	abpf->wait_idx = idx;
+	wqe = &abpf->wqe;
+	init_waitqueue_func_entry(wqe, io_bpf_wait_func);
+	wqe->private = req;
+	wq = &ctx->wait;
+
+	spin_lock_irq(&wq->lock);
+	__add_wait_queue(wq, wqe);
+	smp_mb();
+	io_bpf_wait_func(wqe, 0, 0, NULL);
+	spin_unlock_irq(&wq->lock);
+	return 0;
+}
+
 static void io_bpf_run(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
@@ -10512,8 +10579,8 @@ static void io_bpf_run(struct io_kiocb *req, unsigned int issue_flags)
 
 	lockdep_assert_held(&req->ctx->uring_lock);
 
-	if (unlikely(percpu_ref_is_dying(&ctx->refs) ||
-		     atomic_read(&req->task->io_uring->in_idle)))
+	if (unlikely(percpu_ref_is_dying(&ctx->refs)) ||
+		     atomic_read(&req->task->io_uring->in_idle))
 		goto done;
 
 	memset(&bpf_ctx.u, 0, sizeof(bpf_ctx.u));
@@ -10531,6 +10598,13 @@ static void io_bpf_run(struct io_kiocb *req, unsigned int issue_flags)
 	}
 	io_submit_state_end(&ctx->submit_state, ctx);
 	ret = 0;
+
+	if (bpf_ctx.u.wait_nr) {
+		ret = io_bpf_wait_cq_async(req, bpf_ctx.u.wait_nr,
+					   bpf_ctx.u.wait_idx);
+		if (!ret)
+			return;
+	}
 done:
 	__io_req_complete(req, issue_flags, ret, 0);
 }
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index d7b1713bcfb0..95c04af3afd4 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -405,6 +405,8 @@ struct io_uring_getevents_arg {
 
 struct io_uring_bpf_ctx {
 	__u64	user_data;
+	__u32	wait_nr;
+	__u32	wait_idx;
 };
 
 #endif
-- 
2.31.1


  parent reply	other threads:[~2021-05-19 14:16 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-19 14:13 [RFC v2 00/23] io_uring BPF requests Pavel Begunkov
2021-05-19 14:13 ` [PATCH 01/23] io_uring: shuffle rarely used ctx fields Pavel Begunkov
2021-05-20 21:46   ` Song Liu
2021-05-20 22:46     ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 02/23] io_uring: localise fixed resources fields Pavel Begunkov
2021-05-19 14:13 ` [PATCH 03/23] io_uring: remove dependency on ring->sq/cq_entries Pavel Begunkov
2021-05-19 14:13 ` [PATCH 04/23] io_uring: deduce cq_mask from cq_entries Pavel Begunkov
2021-05-19 14:13 ` [PATCH 05/23] io_uring: kill cached_cq_overflow Pavel Begunkov
2021-05-19 14:13 ` [PATCH 06/23] io_uring: rename io_get_cqring Pavel Begunkov
2021-05-19 14:13 ` [PATCH 07/23] io_uring: extract struct for CQ Pavel Begunkov
2021-05-19 14:13 ` [PATCH 08/23] io_uring: internally pass CQ indexes Pavel Begunkov
2021-05-19 14:13 ` [PATCH 09/23] io_uring: extract cq size helper Pavel Begunkov
2021-05-19 14:13 ` [PATCH 10/23] io_uring: add support for multiple CQs Pavel Begunkov
2021-05-19 14:13 ` [PATCH 11/23] io_uring: enable mmap'ing additional CQs Pavel Begunkov
2021-05-19 14:13 ` [PATCH 12/23] bpf: add IOURING program type Pavel Begunkov
2021-05-20 23:34   ` Song Liu
2021-05-21  0:56     ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 13/23] io_uring: implement bpf prog registration Pavel Begunkov
2021-05-20 23:45   ` Song Liu
2021-05-21  0:43     ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 14/23] io_uring: add support for bpf requests Pavel Begunkov
2021-05-21  0:42   ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 15/23] io_uring: enable BPF to submit SQEs Pavel Begunkov
2021-05-21  0:06   ` Song Liu
2021-05-21  1:07   ` Alexei Starovoitov
2021-05-21  9:33     ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 16/23] io_uring: enable bpf to submit CQEs Pavel Begunkov
2021-05-19 14:13 ` [PATCH 17/23] io_uring: enable bpf to reap CQEs Pavel Begunkov
2021-05-19 14:13 ` [PATCH 18/23] libbpf: support io_uring Pavel Begunkov
2021-05-19 17:38   ` Andrii Nakryiko
2021-05-20  9:58     ` Pavel Begunkov
2021-05-20 17:23       ` Andrii Nakryiko
2021-05-19 14:13 ` [PATCH 19/23] io_uring: pass user_data to bpf executor Pavel Begunkov
2021-05-19 14:13 ` [PATCH 20/23] bpf: Add bpf_copy_to_user() helper Pavel Begunkov
2021-05-19 14:13 ` [PATCH 21/23] io_uring: wire bpf copy to user Pavel Begunkov
2021-05-19 14:13 ` [PATCH 22/23] io_uring: don't wait on CQ exclusively Pavel Begunkov
2021-05-19 14:13 ` Pavel Begunkov [this message]
2021-05-21  0:35 ` [RFC v2 00/23] io_uring BPF requests Song Liu
2021-05-21  0:58   ` Pavel Begunkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a3d5ac5539a8d9f0423fea051a038e8bbfe10c99.1621424513.git.asml.silence@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=axboe@kernel.dk \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=franz-bernhard.tuneke@tu-dortmund.de \
    --cc=horst.schirmeier@tu-dortmund.de \
    --cc=io-uring@vger.kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=kafai@fb.com \
    --cc=kpsingh@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=songliubraving@fb.com \
    --cc=stettberger@dokucode.de \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).