linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: io-uring@vger.kernel.org, netdev@vger.kernel.org,
	bpf@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>, Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>,
	Martin KaFai Lau <kafai@fb.com>, Song Liu <songliubraving@fb.com>,
	Yonghong Song <yhs@fb.com>,
	John Fastabend <john.fastabend@gmail.com>,
	KP Singh <kpsingh@kernel.org>,
	Horst Schirmeier <horst.schirmeier@tu-dortmund.de>,
	"Franz-B . Tuneke" <franz-bernhard.tuneke@tu-dortmund.de>,
	Christian Dietrich <stettberger@dokucode.de>
Subject: [PATCH 07/23] io_uring: extract struct for CQ
Date: Wed, 19 May 2021 15:13:18 +0100	[thread overview]
Message-ID: <9203fb800f78165633f295e17bfcacf3c3409404.1621424513.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1621424513.git.asml.silence@gmail.com>

Extract a structure describing an internal completion queue state and
called, struct io_cqring. We need it to support multi-CQ rings.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 47 +++++++++++++++++++++++++----------------------
 1 file changed, 25 insertions(+), 22 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 49a1b6b81d7d..4fecd9da689e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -335,6 +335,12 @@ struct io_submit_state {
 	unsigned int		ios_left;
 };
 
+struct io_cqring {
+	unsigned		cached_tail;
+	unsigned		entries;
+	struct io_rings		*rings;
+};
+
 struct io_ring_ctx {
 	struct {
 		struct percpu_ref	refs;
@@ -402,17 +408,14 @@ struct io_ring_ctx {
 	struct xarray		personalities;
 	u32			pers_next;
 
-	struct {
-		unsigned		cached_cq_tail;
-		unsigned		cq_entries;
-		atomic_t		cq_timeouts;
-		unsigned		cq_last_tm_flush;
-		unsigned		cq_extra;
-		unsigned long		cq_check_overflow;
-		struct wait_queue_head	cq_wait;
-		struct fasync_struct	*cq_fasync;
-		struct eventfd_ctx	*cq_ev_fd;
-	} ____cacheline_aligned_in_smp;
+	struct fasync_struct	*cq_fasync;
+	struct eventfd_ctx	*cq_ev_fd;
+	atomic_t		cq_timeouts;
+	unsigned		cq_last_tm_flush;
+	unsigned long		cq_check_overflow;
+	unsigned		cq_extra;
+	struct wait_queue_head	cq_wait;
+	struct io_cqring	cqs[1];
 
 	struct {
 		spinlock_t		completion_lock;
@@ -1207,7 +1210,7 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
 	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
 		struct io_ring_ctx *ctx = req->ctx;
 
-		return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
+		return seq + READ_ONCE(ctx->cq_extra) != ctx->cqs[0].cached_tail;
 	}
 
 	return false;
@@ -1312,7 +1315,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
 	if (list_empty(&ctx->timeout_list))
 		return;
 
-	seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+	seq = ctx->cqs[0].cached_tail - atomic_read(&ctx->cq_timeouts);
 
 	do {
 		u32 events_needed, events_got;
@@ -1346,7 +1349,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
 	io_flush_timeouts(ctx);
 
 	/* order cqe stores with ring update */
-	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
+	smp_store_release(&ctx->rings->cq.tail, ctx->cqs[0].cached_tail);
 
 	if (unlikely(!list_empty(&ctx->defer_list)))
 		__io_queue_deferred(ctx);
@@ -1361,23 +1364,23 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
 
 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
 {
-	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
+	return ctx->cqs[0].cached_tail - READ_ONCE(ctx->rings->cq.head);
 }
 
 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
 {
 	struct io_rings *rings = ctx->rings;
-	unsigned tail, mask = ctx->cq_entries - 1;
+	unsigned tail, mask = ctx->cqs[0].entries - 1;
 
 	/*
 	 * writes to the cq entry need to come after reading head; the
 	 * control dependency is enough as we're using WRITE_ONCE to
 	 * fill the cq entry
 	 */
-	if (__io_cqring_events(ctx) == ctx->cq_entries)
+	if (__io_cqring_events(ctx) == ctx->cqs[0].entries)
 		return NULL;
 
-	tail = ctx->cached_cq_tail++;
+	tail = ctx->cqs[0].cached_tail++;
 	return &rings->cqes[tail & mask];
 }
 
@@ -1430,7 +1433,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
 	unsigned long flags;
 	bool all_flushed, posted;
 
-	if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
+	if (!force && __io_cqring_events(ctx) == ctx->cqs[0].entries)
 		return false;
 
 	posted = false;
@@ -5670,7 +5673,7 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
 		goto add;
 	}
 
-	tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+	tail = ctx->cqs[0].cached_tail - atomic_read(&ctx->cq_timeouts);
 	req->timeout.target_seq = tail + off;
 
 	/* Update the last seq here in case io_flush_timeouts() hasn't.
@@ -9331,7 +9334,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 		if (unlikely(ret))
 			goto out;
 
-		min_complete = min(min_complete, ctx->cq_entries);
+		min_complete = min(min_complete, ctx->cqs[0].entries);
 
 		/*
 		 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
@@ -9481,7 +9484,7 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
 
 	/* make sure these are sane, as we already accounted them */
 	ctx->sq_entries = p->sq_entries;
-	ctx->cq_entries = p->cq_entries;
+	ctx->cqs[0].entries = p->cq_entries;
 
 	size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
 	if (size == SIZE_MAX)
-- 
2.31.1


  parent reply	other threads:[~2021-05-19 14:14 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-19 14:13 [RFC v2 00/23] io_uring BPF requests Pavel Begunkov
2021-05-19 14:13 ` [PATCH 01/23] io_uring: shuffle rarely used ctx fields Pavel Begunkov
2021-05-20 21:46   ` Song Liu
2021-05-20 22:46     ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 02/23] io_uring: localise fixed resources fields Pavel Begunkov
2021-05-19 14:13 ` [PATCH 03/23] io_uring: remove dependency on ring->sq/cq_entries Pavel Begunkov
2021-05-19 14:13 ` [PATCH 04/23] io_uring: deduce cq_mask from cq_entries Pavel Begunkov
2021-05-19 14:13 ` [PATCH 05/23] io_uring: kill cached_cq_overflow Pavel Begunkov
2021-05-19 14:13 ` [PATCH 06/23] io_uring: rename io_get_cqring Pavel Begunkov
2021-05-19 14:13 ` Pavel Begunkov [this message]
2021-05-19 14:13 ` [PATCH 08/23] io_uring: internally pass CQ indexes Pavel Begunkov
2021-05-19 14:13 ` [PATCH 09/23] io_uring: extract cq size helper Pavel Begunkov
2021-05-19 14:13 ` [PATCH 10/23] io_uring: add support for multiple CQs Pavel Begunkov
2021-05-19 14:13 ` [PATCH 11/23] io_uring: enable mmap'ing additional CQs Pavel Begunkov
2021-05-19 14:13 ` [PATCH 12/23] bpf: add IOURING program type Pavel Begunkov
2021-05-20 23:34   ` Song Liu
2021-05-21  0:56     ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 13/23] io_uring: implement bpf prog registration Pavel Begunkov
2021-05-20 23:45   ` Song Liu
2021-05-21  0:43     ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 14/23] io_uring: add support for bpf requests Pavel Begunkov
2021-05-21  0:42   ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 15/23] io_uring: enable BPF to submit SQEs Pavel Begunkov
2021-05-21  0:06   ` Song Liu
2021-05-21  1:07   ` Alexei Starovoitov
2021-05-21  9:33     ` Pavel Begunkov
2021-05-19 14:13 ` [PATCH 16/23] io_uring: enable bpf to submit CQEs Pavel Begunkov
2021-05-19 14:13 ` [PATCH 17/23] io_uring: enable bpf to reap CQEs Pavel Begunkov
2021-05-19 14:13 ` [PATCH 18/23] libbpf: support io_uring Pavel Begunkov
2021-05-19 17:38   ` Andrii Nakryiko
2021-05-20  9:58     ` Pavel Begunkov
2021-05-20 17:23       ` Andrii Nakryiko
2021-05-19 14:13 ` [PATCH 19/23] io_uring: pass user_data to bpf executor Pavel Begunkov
2021-05-19 14:13 ` [PATCH 20/23] bpf: Add bpf_copy_to_user() helper Pavel Begunkov
2021-05-19 14:13 ` [PATCH 21/23] io_uring: wire bpf copy to user Pavel Begunkov
2021-05-19 14:13 ` [PATCH 22/23] io_uring: don't wait on CQ exclusively Pavel Begunkov
2021-05-19 14:13 ` [PATCH 23/23] io_uring: enable bpf reqs to wait for CQs Pavel Begunkov
2021-05-21  0:35 ` [RFC v2 00/23] io_uring BPF requests Song Liu
2021-05-21  0:58   ` Pavel Begunkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9203fb800f78165633f295e17bfcacf3c3409404.1621424513.git.asml.silence@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=axboe@kernel.dk \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=franz-bernhard.tuneke@tu-dortmund.de \
    --cc=horst.schirmeier@tu-dortmund.de \
    --cc=io-uring@vger.kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=kafai@fb.com \
    --cc=kpsingh@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=songliubraving@fb.com \
    --cc=stettberger@dokucode.de \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).