All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: Jens Axboe <axboe@kernel.dk>, Hao Xu <haoxu@linux.alibaba.com>
Cc: io-uring@vger.kernel.org, Joseph Qi <joseph.qi@linux.alibaba.com>
Subject: Re: [PATCH for-5.15] io_uring: fix lacking of protection for compl_nr
Date: Fri, 20 Aug 2021 23:41:44 +0100	[thread overview]
Message-ID: <34b4d60a-d3c5-bb7d-80c9-d90a98f8632d@gmail.com> (raw)
In-Reply-To: <3cae21c2-5db7-add1-1587-c87e22e726dc@kernel.dk>

On 8/20/21 11:30 PM, Jens Axboe wrote:
> On 8/20/21 4:28 PM, Pavel Begunkov wrote:
>> On 8/20/21 11:09 PM, Jens Axboe wrote:
>>> On 8/20/21 3:32 PM, Pavel Begunkov wrote:
>>>> On 8/20/21 9:39 PM, Hao Xu wrote:
>>>>> 在 2021/8/21 上午2:59, Pavel Begunkov 写道:
>>>>>> On 8/20/21 7:40 PM, Hao Xu wrote:
>>>>>>> coml_nr in ctx_flush_and_put() is not protected by uring_lock, this
>>>>>>> may cause problems when accessing it parallelly.
>>>>>>
>>>>>> Did you hit any problem? It sounds like it should be fine as is:
>>>>>>
>>>>>> The trick is that it's only responsible to flush requests added
>>>>>> during execution of current call to tctx_task_work(), and those
>>>>>> naturally synchronised with the current task. All other potentially
>>>>>> enqueued requests will be of someone else's responsibility.
>>>>>>
>>>>>> So, if nobody flushed requests, we're finely in-sync. If we see
>>>>>> 0 there, but actually enqueued a request, it means someone
>>>>>> actually flushed it after the request had been added.
>>>>>>
>>>>>> Probably, needs a more formal explanation with happens-before
>>>>>> and so.
>>>>> I should put more detail in the commit message, the thing is:
>>>>> say coml_nr > 0
>>>>>
>>>>>   ctx_flush_and put                  other context
>>>>>    if (compl_nr)                      get mutex
>>>>>                                       coml_nr > 0
>>>>>                                       do flush
>>>>>                                           coml_nr = 0
>>>>>                                       release mutex
>>>>>         get mutex
>>>>>            do flush (*)
>>>>>         release mutex
>>>>>
>>>>> in (*) place, we do a bunch of unnecessary works, moreover, we
>>>>
>>>> I wouldn't care about overhead, that shouldn't be much
>>>>
>>>>> call io_cqring_ev_posted() which I think we shouldn't.
>>>>
>>>> IMHO, users should expect spurious io_cqring_ev_posted(),
>>>> though there were some eventfd users complaining before, so
>>>> for them we can do
>>>
>>> It does sometimes cause issues, see:
>>
>> I'm used that locking may end up in spurious wakeups. May be
>> different for eventfd, but considering that we do batch
>> completions and so might be calling it only once per multiple
>> CQEs, it shouldn't be.
> 
> The wakeups are fine, it's the ev increment that's causing some issues.

If userspace doesn't expect that eventfd may get diverged from the
number of posted CQEs, we need something like below. The weird part
is that it looks nobody complained about this one, even though it
should be happening pretty often. 


diff --git a/fs/io_uring.c b/fs/io_uring.c
index 761f4d99a1a9..7a0fc024d857 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1463,34 +1463,39 @@ static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
 	return !ctx->eventfd_async || io_wq_current_is_worker();
 }
 
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+static void __io_cqring_ev_posted(struct io_ring_ctx *ctx, unsigned events)
 {
 	/* see waitqueue_active() comment */
 	smp_mb();
 
+	if (io_should_trigger_evfd(ctx))
+		eventfd_signal(ctx->cq_ev_fd, events);
 	if (waitqueue_active(&ctx->wait))
 		wake_up(&ctx->wait);
 	if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
 		wake_up(&ctx->sq_data->wait);
-	if (io_should_trigger_evfd(ctx))
-		eventfd_signal(ctx->cq_ev_fd, 1);
 	if (waitqueue_active(&ctx->cq_wait)) {
 		wake_up_interruptible(&ctx->cq_wait);
 		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
 	}
 }
 
-static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
+static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+	__io_cqring_ev_posted(ctx, 1);
+}
+
+static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx, unsigned events)
 {
 	/* see waitqueue_active() comment */
 	smp_mb();
 
+	if (io_should_trigger_evfd(ctx))
+		eventfd_signal(ctx->cq_ev_fd, events);
 	if (ctx->flags & IORING_SETUP_SQPOLL) {
 		if (waitqueue_active(&ctx->wait))
 			wake_up(&ctx->wait);
 	}
-	if (io_should_trigger_evfd(ctx))
-		eventfd_signal(ctx->cq_ev_fd, 1);
 	if (waitqueue_active(&ctx->cq_wait)) {
 		wake_up_interruptible(&ctx->cq_wait);
 		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
@@ -2223,7 +2228,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs,
 	io_commit_cqring(ctx);
 	spin_unlock_irq(&ctx->completion_lock);
 
-	io_cqring_ev_posted(ctx);
+	__io_cqring_ev_posted(ctx, nr);
 	for (i = 0; i < nr; i++) {
 		req = cs->reqs[i];
 
@@ -2336,6 +2341,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 {
 	struct req_batch rb;
 	struct io_kiocb *req;
+	unsigned int events = 0;
 
 	/* order with ->result store in io_complete_rw_iopoll() */
 	smp_rmb();
@@ -2360,15 +2366,16 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 
 		__io_cqring_fill_event(ctx, req->user_data, req->result, cflags,
 					req->cq_idx);
-		(*nr_events)++;
+		events++;
 
 		if (req_ref_put_and_test(req))
 			io_req_free_batch(&rb, req, &ctx->submit_state);
 	}
 
 	io_commit_cqring(ctx);
-	io_cqring_ev_posted_iopoll(ctx);
+	io_cqring_ev_posted_iopoll(ctx, events);
 	io_req_free_batch_finish(ctx, &rb);
+	*nr_events += events;
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -5404,7 +5411,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
 	spin_unlock_irq(&ctx->completion_lock);
 
 	if (posted)
-		io_cqring_ev_posted(ctx);
+		__io_cqring_ev_posted(ctx, posted);
 
 	return posted != 0;
 }
@@ -9010,7 +9017,7 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
 		io_commit_cqring(ctx);
 	spin_unlock_irq(&ctx->completion_lock);
 	if (canceled != 0)
-		io_cqring_ev_posted(ctx);
+		__io_cqring_ev_posted(ctx, canceled);
 	return canceled != 0;
 }





  reply	other threads:[~2021-08-20 22:42 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-20 18:40 [PATCH for-5.15] io_uring: fix lacking of protection for compl_nr Hao Xu
2021-08-20 18:59 ` Pavel Begunkov
2021-08-20 20:39   ` Hao Xu
2021-08-20 21:32     ` Pavel Begunkov
2021-08-20 22:07       ` Hao Xu
2021-08-20 22:09       ` Jens Axboe
2021-08-20 22:21         ` Hao Xu
2021-08-20 22:28         ` Pavel Begunkov
2021-08-20 22:30           ` Jens Axboe
2021-08-20 22:41             ` Pavel Begunkov [this message]
2021-08-20 22:46               ` Jens Axboe
2021-08-20 22:59                 ` Pavel Begunkov
2021-08-21  3:10                   ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=34b4d60a-d3c5-bb7d-80c9-d90a98f8632d@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=haoxu@linux.alibaba.com \
    --cc=io-uring@vger.kernel.org \
    --cc=joseph.qi@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.