All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: io-uring@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>, asml.silence@gmail.com
Subject: [PATCH for-next 2/6] io_uring: don't inline __io_get_cqe()
Date: Fri, 17 Jun 2022 09:48:01 +0100	[thread overview]
Message-ID: <c1ac829198a881b7af8710926f99a3559b9f24c0.1655455613.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1655455613.git.asml.silence@gmail.com>

__io_get_cqe() is not as hot as io_get_cqe(), no need to inline it, it
sheds ~500B from the binary.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c | 35 +++++++++++++++++++++++++++++++++++
 io_uring/io_uring.h | 36 +-----------------------------------
 2 files changed, 36 insertions(+), 35 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 7ffb8422e7d0..a3b1339335c5 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -165,6 +165,11 @@ static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
 		__io_submit_flush_completions(ctx);
 }
 
+static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
+{
+	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
+}
+
 static bool io_match_linked(struct io_kiocb *head)
 {
 	struct io_kiocb *req;
@@ -673,6 +678,36 @@ bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
 	return true;
 }
 
+/*
+ * writes to the cq entry need to come after reading head; the
+ * control dependency is enough as we're using WRITE_ONCE to
+ * fill the cq entry
+ */
+struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
+{
+	struct io_rings *rings = ctx->rings;
+	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
+	unsigned int shift = 0;
+	unsigned int free, queued, len;
+
+	if (ctx->flags & IORING_SETUP_CQE32)
+		shift = 1;
+
+	/* userspace may cheat modifying the tail, be safe and do min */
+	queued = min(__io_cqring_events(ctx), ctx->cq_entries);
+	free = ctx->cq_entries - queued;
+	/* we need a contiguous range, limit based on the current array offset */
+	len = min(free, ctx->cq_entries - off);
+	if (!len)
+		return NULL;
+
+	ctx->cached_cq_tail++;
+	ctx->cqe_cached = &rings->cqes[off];
+	ctx->cqe_sentinel = ctx->cqe_cached + len;
+	ctx->cqe_cached++;
+	return &rings->cqes[off << shift];
+}
+
 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx,
 			    u64 user_data, s32 res, u32 cflags)
 {
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index ce6538c9aed3..51032a494aec 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -16,44 +16,10 @@ enum {
 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
 };
 
+struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
 bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
 			      u32 cflags, u64 extra1, u64 extra2);
 
-static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
-{
-	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
-}
-
-/*
- * writes to the cq entry need to come after reading head; the
- * control dependency is enough as we're using WRITE_ONCE to
- * fill the cq entry
- */
-static inline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
-{
-	struct io_rings *rings = ctx->rings;
-	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
-	unsigned int shift = 0;
-	unsigned int free, queued, len;
-
-	if (ctx->flags & IORING_SETUP_CQE32)
-		shift = 1;
-
-	/* userspace may cheat modifying the tail, be safe and do min */
-	queued = min(__io_cqring_events(ctx), ctx->cq_entries);
-	free = ctx->cq_entries - queued;
-	/* we need a contiguous range, limit based on the current array offset */
-	len = min(free, ctx->cq_entries - off);
-	if (!len)
-		return NULL;
-
-	ctx->cached_cq_tail++;
-	ctx->cqe_cached = &rings->cqes[off];
-	ctx->cqe_sentinel = ctx->cqe_cached + len;
-	ctx->cqe_cached++;
-	return &rings->cqes[off << shift];
-}
-
 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
 {
 	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
-- 
2.36.1


  parent reply	other threads:[~2022-06-17  8:49 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-17  8:47 [PATCH for-next 0/6] clean up __io_fill_cqe_req() Pavel Begunkov
2022-06-17  8:48 ` [PATCH for-next 1/6] io_uring: don't expose io_fill_cqe_aux() Pavel Begunkov
2022-06-17  8:48 ` Pavel Begunkov [this message]
2022-06-17  8:48 ` [PATCH for-next 3/6] io_uring: introduce io_req_cqe_overflow() Pavel Begunkov
2022-06-17  8:48 ` [PATCH for-next 4/6] io_uring: deduplicate __io_fill_cqe_req tracing Pavel Begunkov
2022-06-17  8:48 ` [PATCH for-next 5/6] io_uring: deduplicate io_get_cqe() calls Pavel Begunkov
2022-06-17  8:48 ` [PATCH for-next 6/6] io_uring: change ->cqe_cached invariant for CQE32 Pavel Begunkov
2022-06-17 13:35 ` [PATCH for-next 0/6] clean up __io_fill_cqe_req() Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c1ac829198a881b7af8710926f99a3559b9f24c0.1655455613.git.asml.silence@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=io-uring@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.