All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: io-uring@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>, asml.silence@gmail.com
Subject: [PATCH for-next v2 16/25] io_uring: pass poll_find lock back
Date: Tue, 14 Jun 2022 15:37:06 +0100	[thread overview]
Message-ID: <c189e37df752d1acf74c50eaeb5aa563099c4ba3.1655213915.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1655213915.git.asml.silence@gmail.com>

Instead of using implicit knowledge of what is locked or not after
io_poll_find() and co returns, pass back a pointer to the locked
bucket if any. If set the user must to unlock the spinlock.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/poll.c | 46 ++++++++++++++++++++++++++--------------------
 1 file changed, 26 insertions(+), 20 deletions(-)

diff --git a/io_uring/poll.c b/io_uring/poll.c
index 7f6b16f687b0..7fc4aafcca95 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -559,12 +559,15 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
 }
 
 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
-				     struct io_cancel_data *cd)
+				     struct io_cancel_data *cd,
+				     struct io_hash_bucket **out_bucket)
 {
 	struct io_kiocb *req;
 	u32 index = hash_long(cd->data, ctx->cancel_hash_bits);
 	struct io_hash_bucket *hb = &ctx->cancel_hash[index];
 
+	*out_bucket = NULL;
+
 	spin_lock(&hb->lock);
 	hlist_for_each_entry(req, &hb->list, hash_node) {
 		if (cd->data != req->cqe.user_data)
@@ -576,6 +579,7 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
 				continue;
 			req->work.cancel_seq = cd->seq;
 		}
+		*out_bucket = hb;
 		return req;
 	}
 	spin_unlock(&hb->lock);
@@ -583,11 +587,14 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
 }
 
 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
-					  struct io_cancel_data *cd)
+					  struct io_cancel_data *cd,
+					  struct io_hash_bucket **out_bucket)
 {
 	struct io_kiocb *req;
 	int i;
 
+	*out_bucket = NULL;
+
 	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
 		struct io_hash_bucket *hb = &ctx->cancel_hash[i];
 
@@ -599,6 +606,7 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
 			if (cd->seq == req->work.cancel_seq)
 				continue;
 			req->work.cancel_seq = cd->seq;
+			*out_bucket = hb;
 			return req;
 		}
 		spin_unlock(&hb->lock);
@@ -617,23 +625,19 @@ static bool io_poll_disarm(struct io_kiocb *req)
 
 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
 {
+	struct io_hash_bucket *bucket;
 	struct io_kiocb *req;
-	u32 index;
-	spinlock_t *lock;
 
 	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
-		req = io_poll_file_find(ctx, cd);
+		req = io_poll_file_find(ctx, cd, &bucket);
 	else
-		req = io_poll_find(ctx, false, cd);
-	if (!req) {
-		return -ENOENT;
-	} else {
-		index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits);
-		lock = &ctx->cancel_hash[index].lock;
-	}
-	io_poll_cancel_req(req);
-	spin_unlock(lock);
-	return 0;
+		req = io_poll_find(ctx, false, cd, &bucket);
+
+	if (req)
+		io_poll_cancel_req(req);
+	if (bucket)
+		spin_unlock(&bucket->lock);
+	return req ? 0 : -ENOENT;
 }
 
 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
@@ -726,19 +730,21 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
 	struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
 	struct io_cancel_data cd = { .data = poll_update->old_user_data, };
 	struct io_ring_ctx *ctx = req->ctx;
-	u32 index = hash_long(cd.data, ctx->cancel_hash_bits);
-	spinlock_t *lock = &ctx->cancel_hash[index].lock;
+	struct io_hash_bucket *bucket;
 	struct io_kiocb *preq;
 	int ret2, ret = 0;
 	bool locked;
 
-	preq = io_poll_find(ctx, true, &cd);
+	preq = io_poll_find(ctx, true, &cd, &bucket);
+	if (preq)
+		ret2 = io_poll_disarm(preq);
+	if (bucket)
+		spin_unlock(&bucket->lock);
+
 	if (!preq) {
 		ret = -ENOENT;
 		goto out;
 	}
-	ret2 = io_poll_disarm(preq);
-	spin_unlock(lock);
 	if (!ret2) {
 		ret = -EALREADY;
 		goto out;
-- 
2.36.1


  parent reply	other threads:[~2022-06-14 14:38 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-14 14:36 [PATCH for-next v2 00/25] 5.20 cleanups and poll optimisations Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 01/25] io_uring: make reg buf init consistent Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 02/25] io_uring: move defer_list to slow data Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 03/25] io_uring: better caching for ctx timeout fields Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 04/25] io_uring: refactor ctx slow data placement Pavel Begunkov
2022-06-15  7:58   ` Hao Xu
2022-06-15 10:11     ` Pavel Begunkov
2022-06-15 10:59       ` Hao Xu
2022-06-14 14:36 ` [PATCH for-next v2 05/25] io_uring: move small helpers to headers Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 06/25] io_uring: explain io_wq_work::cancel_seq placement Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 07/25] io_uring: inline ->registered_rings Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 08/25] io_uring: don't set REQ_F_COMPLETE_INLINE in tw Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 09/25] io_uring: never defer-complete multi-apoll Pavel Begunkov
2022-06-15  8:05   ` Hao Xu
2022-06-14 14:37 ` [PATCH for-next v2 10/25] io_uring: kill REQ_F_COMPLETE_INLINE Pavel Begunkov
2022-06-15  8:20   ` Hao Xu
2022-06-15 10:18     ` Pavel Begunkov
2022-06-15 10:19       ` Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 11/25] io_uring: refactor io_req_task_complete() Pavel Begunkov
2022-06-14 17:45   ` Hao Xu
2022-06-14 17:52     ` Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 12/25] io_uring: don't inline io_put_kbuf Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 13/25] io_uring: remove check_cq checking from hot paths Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 14/25] io_uring: poll: remove unnecessary req->ref set Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 15/25] io_uring: switch cancel_hash to use per entry spinlock Pavel Begunkov
2022-06-14 14:37 ` Pavel Begunkov [this message]
2022-06-14 14:37 ` [PATCH for-next v2 17/25] io_uring: clean up io_try_cancel Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 18/25] io_uring: limit number hash buckets Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 19/25] io_uring: clean up io_ring_ctx_alloc Pavel Begunkov
2022-06-15  8:46   ` Hao Xu
2022-06-14 14:37 ` [PATCH for-next v2 20/25] io_uring: use state completion infra for poll reqs Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 21/25] io_uring: add IORING_SETUP_SINGLE_ISSUER Pavel Begunkov
2022-06-15  9:34   ` Hao Xu
2022-06-15 10:20     ` Pavel Begunkov
2022-06-15  9:41   ` Hao Xu
2022-06-15 10:26     ` Pavel Begunkov
2022-06-15 11:08       ` Hao Xu
2022-06-14 14:37 ` [PATCH for-next v2 22/25] io_uring: pass hash table into poll_find Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 23/25] io_uring: introduce a struct for hash table Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 24/25] io_uring: propagate locking state to poll cancel Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 25/25] io_uring: mutex locked poll hashing Pavel Begunkov
2022-06-15 12:53   ` Hao Xu
2022-06-15 13:55     ` Pavel Begunkov
2022-06-15 15:19       ` Hao Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c189e37df752d1acf74c50eaeb5aa563099c4ba3.1655213915.git.asml.silence@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=io-uring@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.