All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 5.14 0/3] fallback fix and task_work cleanup
@ 2021-06-30 20:54 Pavel Begunkov
  2021-06-30 20:54 ` [PATCH 1/3] io_uring: fix stuck fallback reqs Pavel Begunkov
                   ` (3 more replies)
  0 siblings, 4 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 20:54 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Haven't see the bug in 1/3 in the wild, but should be possible, and so
I'd like it for 5.14. Should it be stable? Perhaps, others may go 5.14
as well.

Pavel Begunkov (3):
  io_uring: fix stuck fallback reqs
  io_uring: simplify task_work func
  io_uring: tweak io_req_task_work_add

 fs/io_uring.c | 131 +++++++++++++++++---------------------------------
 1 file changed, 45 insertions(+), 86 deletions(-)

-- 
2.32.0


^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 1/3] io_uring: fix stuck fallback reqs
  2021-06-30 20:54 [PATCH 5.14 0/3] fallback fix and task_work cleanup Pavel Begunkov
@ 2021-06-30 20:54 ` Pavel Begunkov
  2021-06-30 20:54 ` [PATCH 2/3] io_uring: simplify task_work func Pavel Begunkov
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 20:54 UTC (permalink / raw)
  To: Jens Axboe, io-uring

When task_work_add() fails, we use ->exit_task_work to queue the work.
That will be run only in the cancellation path, which happens either
when the ctx is dying or one of tasks with inflight requests is exiting
or executing. There is a good chance that such a request would just get
stuck in the list potentially hodling a file, all io_uring rsrc
recycling or some other resources. Nothing terrible, it'll go away at
some point, but we don't want to lock them up for longer than needed.

Replace that hand made ->exit_task_work with delayed_work + llist
inspired by fput_many().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 61 +++++++++++++++++----------------------------------
 1 file changed, 20 insertions(+), 41 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 92f85d4385a2..08ca835629ce 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -465,7 +465,8 @@ struct io_ring_ctx {
 		struct mm_struct		*mm_account;
 
 		/* ctx exit and cancelation */
-		struct callback_head		*exit_task_work;
+		struct llist_head		fallback_llist;
+		struct delayed_work		fallback_work;
 		struct work_struct		exit_work;
 		struct list_head		tctx_list;
 		struct completion		ref_comp;
@@ -885,6 +886,8 @@ struct io_kiocb {
 	struct io_wq_work		work;
 	const struct cred		*creds;
 
+	struct llist_node		fallback_node;
+
 	/* store used ubuf, so we can prevent reloading */
 	struct io_mapped_ubuf		*imu;
 };
@@ -1100,6 +1103,8 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx);
 static bool io_poll_remove_waitqs(struct io_kiocb *req);
 static int io_req_prep_async(struct io_kiocb *req);
 
+static void io_fallback_req_func(struct work_struct *unused);
+
 static struct kmem_cache *req_cachep;
 
 static const struct file_operations io_uring_fops;
@@ -1231,6 +1236,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	INIT_LIST_HEAD(&ctx->tctx_list);
 	INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
 	INIT_LIST_HEAD(&ctx->locked_free_list);
+	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
 	return ctx;
 err:
 	kfree(ctx->dummy_ubuf);
@@ -2028,44 +2034,12 @@ static int io_req_task_work_add(struct io_kiocb *req)
 	return ret;
 }
 
-static bool io_run_task_work_head(struct callback_head **work_head)
-{
-	struct callback_head *work, *next;
-	bool executed = false;
-
-	do {
-		work = xchg(work_head, NULL);
-		if (!work)
-			break;
-
-		do {
-			next = work->next;
-			work->func(work);
-			work = next;
-			cond_resched();
-		} while (work);
-		executed = true;
-	} while (1);
-
-	return executed;
-}
-
-static void io_task_work_add_head(struct callback_head **work_head,
-				  struct callback_head *task_work)
-{
-	struct callback_head *head;
-
-	do {
-		head = READ_ONCE(*work_head);
-		task_work->next = head;
-	} while (cmpxchg(work_head, head, task_work) != head);
-}
-
 static void io_req_task_work_add_fallback(struct io_kiocb *req,
 					  task_work_func_t cb)
 {
 	init_task_work(&req->task_work, cb);
-	io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
+	if (llist_add(&req->fallback_node, &req->ctx->fallback_llist))
+		schedule_delayed_work(&req->ctx->fallback_work, 1);
 }
 
 static void io_req_task_cancel(struct callback_head *cb)
@@ -2514,6 +2488,17 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
 }
 #endif
 
+static void io_fallback_req_func(struct work_struct *work)
+{
+	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
+						fallback_work.work);
+	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
+	struct io_kiocb *req, *tmp;
+
+	llist_for_each_entry_safe(req, tmp, node, fallback_node)
+		req->task_work.func(&req->task_work);
+}
+
 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
 			     unsigned int issue_flags)
 {
@@ -8956,11 +8941,6 @@ static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
 	return -EINVAL;
 }
 
-static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
-{
-	return io_run_task_work_head(&ctx->exit_task_work);
-}
-
 struct io_tctx_exit {
 	struct callback_head		task_work;
 	struct completion		completion;
@@ -9225,7 +9205,6 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
 		ret |= io_kill_timeouts(ctx, task, cancel_all);
 		if (task)
 			ret |= io_run_task_work();
-		ret |= io_run_ctx_fallback(ctx);
 		if (!ret)
 			break;
 		cond_resched();
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 2/3] io_uring: simplify task_work func
  2021-06-30 20:54 [PATCH 5.14 0/3] fallback fix and task_work cleanup Pavel Begunkov
  2021-06-30 20:54 ` [PATCH 1/3] io_uring: fix stuck fallback reqs Pavel Begunkov
@ 2021-06-30 20:54 ` Pavel Begunkov
  2021-06-30 20:54 ` [PATCH 3/3] io_uring: tweak io_req_task_work_add Pavel Begunkov
  2021-06-30 21:18 ` [PATCH 5.14 0/3] fallback fix and task_work cleanup Jens Axboe
  3 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 20:54 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Since we don't really use req->task_work anymore, get rid of it together
with the nasty ->func aliasing between ->io_task_work and ->task_work,
and hide ->fallback_node inside of io_task_work.

Also, as task_work is gone now, replace the callback type from
task_work_func_t to a function taking io_kiocb to avoid casting and
simplify code.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 72 ++++++++++++++++++++-------------------------------
 1 file changed, 28 insertions(+), 44 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 08ca835629ce..5fab427305e3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -808,9 +808,14 @@ struct async_poll {
 	struct io_poll_iocb	*double_poll;
 };
 
+typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
+
 struct io_task_work {
-	struct io_wq_work_node	node;
-	task_work_func_t	func;
+	union {
+		struct io_wq_work_node	node;
+		struct llist_node	fallback_node;
+	};
+	io_req_tw_func_t		func;
 };
 
 enum {
@@ -876,18 +881,13 @@ struct io_kiocb {
 
 	/* used with ctx->iopoll_list with reads/writes */
 	struct list_head		inflight_entry;
-	union {
-		struct io_task_work	io_task_work;
-		struct callback_head	task_work;
-	};
+	struct io_task_work		io_task_work;
 	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
 	struct hlist_node		hash_node;
 	struct async_poll		*apoll;
 	struct io_wq_work		work;
 	const struct cred		*creds;
 
-	struct llist_node		fallback_node;
-
 	/* store used ubuf, so we can prevent reloading */
 	struct io_mapped_ubuf		*imu;
 };
@@ -1964,7 +1964,7 @@ static void tctx_task_work(struct callback_head *cb)
 				ctx = req->ctx;
 				percpu_ref_get(&ctx->refs);
 			}
-			req->task_work.func(&req->task_work);
+			req->io_task_work.func(req);
 			node = next;
 		}
 		if (wq_list_empty(&tctx->task_list)) {
@@ -2035,16 +2035,16 @@ static int io_req_task_work_add(struct io_kiocb *req)
 }
 
 static void io_req_task_work_add_fallback(struct io_kiocb *req,
-					  task_work_func_t cb)
+					  io_req_tw_func_t cb)
 {
-	init_task_work(&req->task_work, cb);
-	if (llist_add(&req->fallback_node, &req->ctx->fallback_llist))
+	req->io_task_work.func = cb;
+	if (llist_add(&req->io_task_work.fallback_node,
+		      &req->ctx->fallback_llist))
 		schedule_delayed_work(&req->ctx->fallback_work, 1);
 }
 
-static void io_req_task_cancel(struct callback_head *cb)
+static void io_req_task_cancel(struct io_kiocb *req)
 {
-	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
 	struct io_ring_ctx *ctx = req->ctx;
 
 	/* ctx is guaranteed to stay alive while we hold uring_lock */
@@ -2053,7 +2053,7 @@ static void io_req_task_cancel(struct callback_head *cb)
 	mutex_unlock(&ctx->uring_lock);
 }
 
-static void __io_req_task_submit(struct io_kiocb *req)
+static void io_req_task_submit(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 
@@ -2066,17 +2066,10 @@ static void __io_req_task_submit(struct io_kiocb *req)
 	mutex_unlock(&ctx->uring_lock);
 }
 
-static void io_req_task_submit(struct callback_head *cb)
-{
-	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-
-	__io_req_task_submit(req);
-}
-
 static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
 {
 	req->result = ret;
-	req->task_work.func = io_req_task_cancel;
+	req->io_task_work.func = io_req_task_cancel;
 
 	if (unlikely(io_req_task_work_add(req)))
 		io_req_task_work_add_fallback(req, io_req_task_cancel);
@@ -2084,7 +2077,7 @@ static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
 
 static void io_req_task_queue(struct io_kiocb *req)
 {
-	req->task_work.func = io_req_task_submit;
+	req->io_task_work.func = io_req_task_submit;
 
 	if (unlikely(io_req_task_work_add(req)))
 		io_req_task_queue_fail(req, -ECANCELED);
@@ -2198,18 +2191,11 @@ static inline void io_put_req(struct io_kiocb *req)
 		io_free_req(req);
 }
 
-static void io_put_req_deferred_cb(struct callback_head *cb)
-{
-	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-
-	io_free_req(req);
-}
-
 static void io_free_req_deferred(struct io_kiocb *req)
 {
-	req->task_work.func = io_put_req_deferred_cb;
+	req->io_task_work.func = io_free_req;
 	if (unlikely(io_req_task_work_add(req)))
-		io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
+		io_req_task_work_add_fallback(req, io_free_req);
 }
 
 static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
@@ -2495,8 +2481,8 @@ static void io_fallback_req_func(struct work_struct *work)
 	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
 	struct io_kiocb *req, *tmp;
 
-	llist_for_each_entry_safe(req, tmp, node, fallback_node)
-		req->task_work.func(&req->task_work);
+	llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
+		req->io_task_work.func(req);
 }
 
 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
@@ -4998,7 +4984,7 @@ struct io_poll_table {
 };
 
 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
-			   __poll_t mask, task_work_func_t func)
+			   __poll_t mask, io_req_tw_func_t func)
 {
 	int ret;
 
@@ -5011,7 +4997,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
 	list_del_init(&poll->wait.entry);
 
 	req->result = mask;
-	req->task_work.func = func;
+	req->io_task_work.func = func;
 
 	/*
 	 * If this fails, then the task is exiting. When a task exits, the
@@ -5108,9 +5094,8 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
 	return !(flags & IORING_CQE_F_MORE);
 }
 
-static void io_poll_task_func(struct callback_head *cb)
+static void io_poll_task_func(struct io_kiocb *req)
 {
-	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_kiocb *nxt;
 
@@ -5132,7 +5117,7 @@ static void io_poll_task_func(struct callback_head *cb)
 		if (done) {
 			nxt = io_put_req_find_next(req);
 			if (nxt)
-				__io_req_task_submit(nxt);
+				io_req_task_submit(nxt);
 		}
 	}
 }
@@ -5241,9 +5226,8 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
 }
 
-static void io_async_task_func(struct callback_head *cb)
+static void io_async_task_func(struct io_kiocb *req)
 {
-	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
 	struct async_poll *apoll = req->apoll;
 	struct io_ring_ctx *ctx = req->ctx;
 
@@ -5259,7 +5243,7 @@ static void io_async_task_func(struct callback_head *cb)
 	spin_unlock_irq(&ctx->completion_lock);
 
 	if (!READ_ONCE(apoll->poll.canceled))
-		__io_req_task_submit(req);
+		io_req_task_submit(req);
 	else
 		io_req_complete_failed(req, -ECANCELED);
 }
@@ -9006,7 +8990,7 @@ static void io_ring_exit_work(struct work_struct *work)
 	/*
 	 * Some may use context even when all refs and requests have been put,
 	 * and they are free to do so while still holding uring_lock or
-	 * completion_lock, see __io_req_task_submit(). Apart from other work,
+	 * completion_lock, see io_req_task_submit(). Apart from other work,
 	 * this lock/unlock section also waits them to finish.
 	 */
 	mutex_lock(&ctx->uring_lock);
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 20:54 [PATCH 5.14 0/3] fallback fix and task_work cleanup Pavel Begunkov
  2021-06-30 20:54 ` [PATCH 1/3] io_uring: fix stuck fallback reqs Pavel Begunkov
  2021-06-30 20:54 ` [PATCH 2/3] io_uring: simplify task_work func Pavel Begunkov
@ 2021-06-30 20:54 ` Pavel Begunkov
  2021-06-30 21:17   ` Jens Axboe
  2021-06-30 21:18 ` [PATCH 5.14 0/3] fallback fix and task_work cleanup Jens Axboe
  3 siblings, 1 reply; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 20:54 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Whenever possible we don't want to fallback a request. task_work_add()
will be fine if the task is exiting, so don't check for PF_EXITING,
there is anyway only a relatively small gap between setting the flag
and doing the final task_work_run().

Also add likely for the hot path.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5fab427305e3..1893a4229dbb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1990,9 +1990,6 @@ static int io_req_task_work_add(struct io_kiocb *req)
 	unsigned long flags;
 	int ret = 0;
 
-	if (unlikely(tsk->flags & PF_EXITING))
-		return -ESRCH;
-
 	WARN_ON_ONCE(!tctx);
 
 	spin_lock_irqsave(&tctx->task_lock, flags);
@@ -2000,7 +1997,7 @@ static int io_req_task_work_add(struct io_kiocb *req)
 	spin_unlock_irqrestore(&tctx->task_lock, flags);
 
 	/* task_work already pending, we're done */
-	if (test_bit(0, &tctx->task_state) ||
+	if (likely(test_bit(0, &tctx->task_state)) ||
 	    test_and_set_bit(0, &tctx->task_state))
 		return 0;
 
@@ -2011,8 +2008,7 @@ static int io_req_task_work_add(struct io_kiocb *req)
 	 * will do the job.
 	 */
 	notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
-
-	if (!task_work_add(tsk, &tctx->task_work, notify)) {
+	if (likely(!task_work_add(tsk, &tctx->task_work, notify))) {
 		wake_up_process(tsk);
 		return 0;
 	}
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 20:54 ` [PATCH 3/3] io_uring: tweak io_req_task_work_add Pavel Begunkov
@ 2021-06-30 21:17   ` Jens Axboe
  2021-06-30 21:19     ` Pavel Begunkov
  0 siblings, 1 reply; 18+ messages in thread
From: Jens Axboe @ 2021-06-30 21:17 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 6/30/21 2:54 PM, Pavel Begunkov wrote:
> Whenever possible we don't want to fallback a request. task_work_add()
> will be fine if the task is exiting, so don't check for PF_EXITING,
> there is anyway only a relatively small gap between setting the flag
> and doing the final task_work_run().
> 
> Also add likely for the hot path.

I'm not a huge fan of likely/unlikely, and in particular constructs like:

> -	if (test_bit(0, &tctx->task_state) ||
> +	if (likely(test_bit(0, &tctx->task_state)) ||
>  	    test_and_set_bit(0, &tctx->task_state))
>  		return 0;

where the state is combined. In any case, it should be a separate
change. If there's an "Also" paragraph in a patch, then that's also
usually a good clue that that particular change should've been
separate :-)

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 5.14 0/3] fallback fix and task_work cleanup
  2021-06-30 20:54 [PATCH 5.14 0/3] fallback fix and task_work cleanup Pavel Begunkov
                   ` (2 preceding siblings ...)
  2021-06-30 20:54 ` [PATCH 3/3] io_uring: tweak io_req_task_work_add Pavel Begunkov
@ 2021-06-30 21:18 ` Jens Axboe
  2021-06-30 21:20   ` Pavel Begunkov
  3 siblings, 1 reply; 18+ messages in thread
From: Jens Axboe @ 2021-06-30 21:18 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 6/30/21 2:54 PM, Pavel Begunkov wrote:
> Haven't see the bug in 1/3 in the wild, but should be possible, and so
> I'd like it for 5.14. Should it be stable? Perhaps, others may go 5.14
> as well.
> 
> Pavel Begunkov (3):
>   io_uring: fix stuck fallback reqs
>   io_uring: simplify task_work func
>   io_uring: tweak io_req_task_work_add
> 
>  fs/io_uring.c | 131 +++++++++++++++++---------------------------------
>  1 file changed, 45 insertions(+), 86 deletions(-)

Applied 1-2, thanks.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:17   ` Jens Axboe
@ 2021-06-30 21:19     ` Pavel Begunkov
  2021-06-30 21:22       ` Jens Axboe
  0 siblings, 1 reply; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 21:19 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 6/30/21 10:17 PM, Jens Axboe wrote:
> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>> Whenever possible we don't want to fallback a request. task_work_add()
>> will be fine if the task is exiting, so don't check for PF_EXITING,
>> there is anyway only a relatively small gap between setting the flag
>> and doing the final task_work_run().
>>
>> Also add likely for the hot path.
> 
> I'm not a huge fan of likely/unlikely, and in particular constructs like:
> 
>> -	if (test_bit(0, &tctx->task_state) ||
>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>  	    test_and_set_bit(0, &tctx->task_state))
>>  		return 0;
> 
> where the state is combined. In any case, it should be a separate
> change. If there's an "Also" paragraph in a patch, then that's also
> usually a good clue that that particular change should've been
> separate :-)

Not sure what's wrong with likely above, but how about drop
this one then?

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 5.14 0/3] fallback fix and task_work cleanup
  2021-06-30 21:18 ` [PATCH 5.14 0/3] fallback fix and task_work cleanup Jens Axboe
@ 2021-06-30 21:20   ` Pavel Begunkov
  0 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 21:20 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 6/30/21 10:18 PM, Jens Axboe wrote:
> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>> Haven't see the bug in 1/3 in the wild, but should be possible, and so
>> I'd like it for 5.14. Should it be stable? Perhaps, others may go 5.14
>> as well.
>>
>> Pavel Begunkov (3):
>>   io_uring: fix stuck fallback reqs
>>   io_uring: simplify task_work func
>>   io_uring: tweak io_req_task_work_add
>>
>>  fs/io_uring.c | 131 +++++++++++++++++---------------------------------
>>  1 file changed, 45 insertions(+), 86 deletions(-)
> 
> Applied 1-2, thanks.

Ah, you already did drop, thanks

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:19     ` Pavel Begunkov
@ 2021-06-30 21:22       ` Jens Axboe
  2021-06-30 21:38         ` Pavel Begunkov
  0 siblings, 1 reply; 18+ messages in thread
From: Jens Axboe @ 2021-06-30 21:22 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 6/30/21 3:19 PM, Pavel Begunkov wrote:
> On 6/30/21 10:17 PM, Jens Axboe wrote:
>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>> Whenever possible we don't want to fallback a request. task_work_add()
>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>> there is anyway only a relatively small gap between setting the flag
>>> and doing the final task_work_run().
>>>
>>> Also add likely for the hot path.
>>
>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>
>>> -	if (test_bit(0, &tctx->task_state) ||
>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>  		return 0;
>>
>> where the state is combined. In any case, it should be a separate
>> change. If there's an "Also" paragraph in a patch, then that's also
>> usually a good clue that that particular change should've been
>> separate :-)
> 
> Not sure what's wrong with likely above, but how about drop
> this one then?

Yep I did - we can do the exiting change separately, the commit message
just needs to be clarified a bit on why it's ok to do now. And that
last sentence dropped, of course.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:22       ` Jens Axboe
@ 2021-06-30 21:38         ` Pavel Begunkov
  2021-06-30 21:45           ` Jens Axboe
  0 siblings, 1 reply; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 21:38 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 6/30/21 10:22 PM, Jens Axboe wrote:
> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>> there is anyway only a relatively small gap between setting the flag
>>>> and doing the final task_work_run().
>>>>
>>>> Also add likely for the hot path.
>>>
>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>
>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>  		return 0;
>>>
>>> where the state is combined. In any case, it should be a separate
>>> change. If there's an "Also" paragraph in a patch, then that's also
>>> usually a good clue that that particular change should've been
>>> separate :-)
>>
>> Not sure what's wrong with likely above, but how about drop
>> this one then?
> 
> Yep I did - we can do the exiting change separately, the commit message

I think 1-2 is good enough for 5.14, I'll just send it for-next

> just needs to be clarified a bit on why it's ok to do now. And that

It should have been ok to do before those 2 patches, but
haven't tracked where it lost actuality.

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:38         ` Pavel Begunkov
@ 2021-06-30 21:45           ` Jens Axboe
  2021-06-30 21:56             ` Jens Axboe
  2021-06-30 21:57             ` Pavel Begunkov
  0 siblings, 2 replies; 18+ messages in thread
From: Jens Axboe @ 2021-06-30 21:45 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 6/30/21 3:38 PM, Pavel Begunkov wrote:
> On 6/30/21 10:22 PM, Jens Axboe wrote:
>> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>>> there is anyway only a relatively small gap between setting the flag
>>>>> and doing the final task_work_run().
>>>>>
>>>>> Also add likely for the hot path.
>>>>
>>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>>
>>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>>  		return 0;
>>>>
>>>> where the state is combined. In any case, it should be a separate
>>>> change. If there's an "Also" paragraph in a patch, then that's also
>>>> usually a good clue that that particular change should've been
>>>> separate :-)
>>>
>>> Not sure what's wrong with likely above, but how about drop
>>> this one then?
>>
>> Yep I did - we can do the exiting change separately, the commit message
> 
> I think 1-2 is good enough for 5.14, I'll just send it for-next
> 
>> just needs to be clarified a bit on why it's ok to do now. And that
> 
> It should have been ok to do before those 2 patches, but
> haven't tracked where it lost actuality.

Right, I was thinking it was related to the swapping of the signal
exit and task work run ordering. But didn't look that far yet...

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:45           ` Jens Axboe
@ 2021-06-30 21:56             ` Jens Axboe
  2021-06-30 22:04               ` Pavel Begunkov
  2021-06-30 22:10               ` Pavel Begunkov
  2021-06-30 21:57             ` Pavel Begunkov
  1 sibling, 2 replies; 18+ messages in thread
From: Jens Axboe @ 2021-06-30 21:56 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 6/30/21 3:45 PM, Jens Axboe wrote:
> On 6/30/21 3:38 PM, Pavel Begunkov wrote:
>> On 6/30/21 10:22 PM, Jens Axboe wrote:
>>> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>>>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>>>> there is anyway only a relatively small gap between setting the flag
>>>>>> and doing the final task_work_run().
>>>>>>
>>>>>> Also add likely for the hot path.
>>>>>
>>>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>>>
>>>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>>>  		return 0;
>>>>>
>>>>> where the state is combined. In any case, it should be a separate
>>>>> change. If there's an "Also" paragraph in a patch, then that's also
>>>>> usually a good clue that that particular change should've been
>>>>> separate :-)
>>>>
>>>> Not sure what's wrong with likely above, but how about drop
>>>> this one then?
>>>
>>> Yep I did - we can do the exiting change separately, the commit message
>>
>> I think 1-2 is good enough for 5.14, I'll just send it for-next
>>
>>> just needs to be clarified a bit on why it's ok to do now. And that
>>
>> It should have been ok to do before those 2 patches, but
>> haven't tracked where it lost actuality.
> 
> Right, I was thinking it was related to the swapping of the signal
> exit and task work run ordering. But didn't look that far yet...

BTW, in usual testing, even just the one hunk removing the exit check
seems to result in quite a lot of memory leaks running
test/poll-mshot-update. So something is funky with the patch.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:45           ` Jens Axboe
  2021-06-30 21:56             ` Jens Axboe
@ 2021-06-30 21:57             ` Pavel Begunkov
  2021-06-30 21:57               ` Jens Axboe
  1 sibling, 1 reply; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 21:57 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 6/30/21 10:45 PM, Jens Axboe wrote:
> On 6/30/21 3:38 PM, Pavel Begunkov wrote:
>> On 6/30/21 10:22 PM, Jens Axboe wrote:
>>> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>>>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>>>> there is anyway only a relatively small gap between setting the flag
>>>>>> and doing the final task_work_run().
>>>>>>
>>>>>> Also add likely for the hot path.
>>>>>
>>>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>>>
>>>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>>>  		return 0;
>>>>>
>>>>> where the state is combined. In any case, it should be a separate
>>>>> change. If there's an "Also" paragraph in a patch, then that's also
>>>>> usually a good clue that that particular change should've been
>>>>> separate :-)
>>>>
>>>> Not sure what's wrong with likely above, but how about drop
>>>> this one then?
>>>
>>> Yep I did - we can do the exiting change separately, the commit message
>>
>> I think 1-2 is good enough for 5.14, I'll just send it for-next
>>
>>> just needs to be clarified a bit on why it's ok to do now. And that
>>
>> It should have been ok to do before those 2 patches, but
>> haven't tracked where it lost actuality.
> 
> Right, I was thinking it was related to the swapping of the signal
> exit and task work run ordering. But didn't look that far yet...

commit 6200b0ae4ea28a4bfd8eb434e33e6201b7a6a282
Author: Jens Axboe <axboe@kernel.dk>
Date:   Sun Sep 13 14:38:30 2020 -0600

    io_uring: don't run task work on an exiting task

Came from there, times where there still was io_uring_flush()
and no from do_exit() cancellations

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:57             ` Pavel Begunkov
@ 2021-06-30 21:57               ` Jens Axboe
  0 siblings, 0 replies; 18+ messages in thread
From: Jens Axboe @ 2021-06-30 21:57 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 6/30/21 3:57 PM, Pavel Begunkov wrote:
> On 6/30/21 10:45 PM, Jens Axboe wrote:
>> On 6/30/21 3:38 PM, Pavel Begunkov wrote:
>>> On 6/30/21 10:22 PM, Jens Axboe wrote:
>>>> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>>>>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>>>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>>>>> there is anyway only a relatively small gap between setting the flag
>>>>>>> and doing the final task_work_run().
>>>>>>>
>>>>>>> Also add likely for the hot path.
>>>>>>
>>>>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>>>>
>>>>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>>>>  		return 0;
>>>>>>
>>>>>> where the state is combined. In any case, it should be a separate
>>>>>> change. If there's an "Also" paragraph in a patch, then that's also
>>>>>> usually a good clue that that particular change should've been
>>>>>> separate :-)
>>>>>
>>>>> Not sure what's wrong with likely above, but how about drop
>>>>> this one then?
>>>>
>>>> Yep I did - we can do the exiting change separately, the commit message
>>>
>>> I think 1-2 is good enough for 5.14, I'll just send it for-next
>>>
>>>> just needs to be clarified a bit on why it's ok to do now. And that
>>>
>>> It should have been ok to do before those 2 patches, but
>>> haven't tracked where it lost actuality.
>>
>> Right, I was thinking it was related to the swapping of the signal
>> exit and task work run ordering. But didn't look that far yet...
> 
> commit 6200b0ae4ea28a4bfd8eb434e33e6201b7a6a282
> Author: Jens Axboe <axboe@kernel.dk>
> Date:   Sun Sep 13 14:38:30 2020 -0600
> 
>     io_uring: don't run task work on an exiting task
> 
> Came from there, times where there still was io_uring_flush()
> and no from do_exit() cancellations

Ah yes, that makes sense.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:56             ` Jens Axboe
@ 2021-06-30 22:04               ` Pavel Begunkov
  2021-06-30 22:10               ` Pavel Begunkov
  1 sibling, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 22:04 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 6/30/21 10:56 PM, Jens Axboe wrote:
> On 6/30/21 3:45 PM, Jens Axboe wrote:
>> On 6/30/21 3:38 PM, Pavel Begunkov wrote:
>>> On 6/30/21 10:22 PM, Jens Axboe wrote:
>>>> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>>>>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>>>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>>>>> there is anyway only a relatively small gap between setting the flag
>>>>>>> and doing the final task_work_run().
>>>>>>>
>>>>>>> Also add likely for the hot path.
>>>>>>
>>>>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>>>>
>>>>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>>>>  		return 0;
>>>>>>
>>>>>> where the state is combined. In any case, it should be a separate
>>>>>> change. If there's an "Also" paragraph in a patch, then that's also
>>>>>> usually a good clue that that particular change should've been
>>>>>> separate :-)
>>>>>
>>>>> Not sure what's wrong with likely above, but how about drop
>>>>> this one then?
>>>>
>>>> Yep I did - we can do the exiting change separately, the commit message
>>>
>>> I think 1-2 is good enough for 5.14, I'll just send it for-next
>>>
>>>> just needs to be clarified a bit on why it's ok to do now. And that
>>>
>>> It should have been ok to do before those 2 patches, but
>>> haven't tracked where it lost actuality.
>>
>> Right, I was thinking it was related to the swapping of the signal
>> exit and task work run ordering. But didn't look that far yet...
> 
> BTW, in usual testing, even just the one hunk removing the exit check
> seems to result in quite a lot of memory leaks running
> test/poll-mshot-update. So something is funky with the patch.

Oh, have no idea why yet. Apparently, I commented out
the test as it was always failing, and forgot to return
it back... Will take a note

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 21:56             ` Jens Axboe
  2021-06-30 22:04               ` Pavel Begunkov
@ 2021-06-30 22:10               ` Pavel Begunkov
  2021-06-30 22:11                 ` Jens Axboe
  1 sibling, 1 reply; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 22:10 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 6/30/21 10:56 PM, Jens Axboe wrote:
> On 6/30/21 3:45 PM, Jens Axboe wrote:
>> On 6/30/21 3:38 PM, Pavel Begunkov wrote:
>>> On 6/30/21 10:22 PM, Jens Axboe wrote:
>>>> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>>>>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>>>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>>>>> there is anyway only a relatively small gap between setting the flag
>>>>>>> and doing the final task_work_run().
>>>>>>>
>>>>>>> Also add likely for the hot path.
>>>>>>
>>>>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>>>>
>>>>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>>>>  		return 0;
>>>>>>
>>>>>> where the state is combined. In any case, it should be a separate
>>>>>> change. If there's an "Also" paragraph in a patch, then that's also
>>>>>> usually a good clue that that particular change should've been
>>>>>> separate :-)
>>>>>
>>>>> Not sure what's wrong with likely above, but how about drop
>>>>> this one then?
>>>>
>>>> Yep I did - we can do the exiting change separately, the commit message
>>>
>>> I think 1-2 is good enough for 5.14, I'll just send it for-next
>>>
>>>> just needs to be clarified a bit on why it's ok to do now. And that
>>>
>>> It should have been ok to do before those 2 patches, but
>>> haven't tracked where it lost actuality.
>>
>> Right, I was thinking it was related to the swapping of the signal
>> exit and task work run ordering. But didn't look that far yet...
> 
> BTW, in usual testing, even just the one hunk removing the exit check
> seems to result in quite a lot of memory leaks running
> test/poll-mshot-update. So something is funky with the patch.

I guess you're positive that patches 1-2 have nothing to do
with that. Right?

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 22:10               ` Pavel Begunkov
@ 2021-06-30 22:11                 ` Jens Axboe
  2021-06-30 22:53                   ` Pavel Begunkov
  0 siblings, 1 reply; 18+ messages in thread
From: Jens Axboe @ 2021-06-30 22:11 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 6/30/21 4:10 PM, Pavel Begunkov wrote:
> On 6/30/21 10:56 PM, Jens Axboe wrote:
>> On 6/30/21 3:45 PM, Jens Axboe wrote:
>>> On 6/30/21 3:38 PM, Pavel Begunkov wrote:
>>>> On 6/30/21 10:22 PM, Jens Axboe wrote:
>>>>> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>>>>>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>>>>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>>>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>>>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>>>>>> there is anyway only a relatively small gap between setting the flag
>>>>>>>> and doing the final task_work_run().
>>>>>>>>
>>>>>>>> Also add likely for the hot path.
>>>>>>>
>>>>>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>>>>>
>>>>>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>>>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>>>>>  		return 0;
>>>>>>>
>>>>>>> where the state is combined. In any case, it should be a separate
>>>>>>> change. If there's an "Also" paragraph in a patch, then that's also
>>>>>>> usually a good clue that that particular change should've been
>>>>>>> separate :-)
>>>>>>
>>>>>> Not sure what's wrong with likely above, but how about drop
>>>>>> this one then?
>>>>>
>>>>> Yep I did - we can do the exiting change separately, the commit message
>>>>
>>>> I think 1-2 is good enough for 5.14, I'll just send it for-next
>>>>
>>>>> just needs to be clarified a bit on why it's ok to do now. And that
>>>>
>>>> It should have been ok to do before those 2 patches, but
>>>> haven't tracked where it lost actuality.
>>>
>>> Right, I was thinking it was related to the swapping of the signal
>>> exit and task work run ordering. But didn't look that far yet...
>>
>> BTW, in usual testing, even just the one hunk removing the exit check
>> seems to result in quite a lot of memory leaks running
>> test/poll-mshot-update. So something is funky with the patch.
> 
> I guess you're positive that patches 1-2 have nothing to do
> with that. Right?

I double checked, and seems fine with those two alone. Ran the test
twice, saw massive amounts of leaks with patches 1-3, and none with
patches 1-2 only.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 3/3] io_uring: tweak io_req_task_work_add
  2021-06-30 22:11                 ` Jens Axboe
@ 2021-06-30 22:53                   ` Pavel Begunkov
  0 siblings, 0 replies; 18+ messages in thread
From: Pavel Begunkov @ 2021-06-30 22:53 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 6/30/21 11:11 PM, Jens Axboe wrote:
> On 6/30/21 4:10 PM, Pavel Begunkov wrote:
>> On 6/30/21 10:56 PM, Jens Axboe wrote:
>>> On 6/30/21 3:45 PM, Jens Axboe wrote:
>>>> On 6/30/21 3:38 PM, Pavel Begunkov wrote:
>>>>> On 6/30/21 10:22 PM, Jens Axboe wrote:
>>>>>> On 6/30/21 3:19 PM, Pavel Begunkov wrote:
>>>>>>> On 6/30/21 10:17 PM, Jens Axboe wrote:
>>>>>>>> On 6/30/21 2:54 PM, Pavel Begunkov wrote:
>>>>>>>>> Whenever possible we don't want to fallback a request. task_work_add()
>>>>>>>>> will be fine if the task is exiting, so don't check for PF_EXITING,
>>>>>>>>> there is anyway only a relatively small gap between setting the flag
>>>>>>>>> and doing the final task_work_run().
>>>>>>>>>
>>>>>>>>> Also add likely for the hot path.
>>>>>>>>
>>>>>>>> I'm not a huge fan of likely/unlikely, and in particular constructs like:
>>>>>>>>
>>>>>>>>> -	if (test_bit(0, &tctx->task_state) ||
>>>>>>>>> +	if (likely(test_bit(0, &tctx->task_state)) ||
>>>>>>>>>  	    test_and_set_bit(0, &tctx->task_state))
>>>>>>>>>  		return 0;
>>>>>>>>
>>>>>>>> where the state is combined. In any case, it should be a separate
>>>>>>>> change. If there's an "Also" paragraph in a patch, then that's also
>>>>>>>> usually a good clue that that particular change should've been
>>>>>>>> separate :-)
>>>>>>>
>>>>>>> Not sure what's wrong with likely above, but how about drop
>>>>>>> this one then?
>>>>>>
>>>>>> Yep I did - we can do the exiting change separately, the commit message
>>>>>
>>>>> I think 1-2 is good enough for 5.14, I'll just send it for-next
>>>>>
>>>>>> just needs to be clarified a bit on why it's ok to do now. And that
>>>>>
>>>>> It should have been ok to do before those 2 patches, but
>>>>> haven't tracked where it lost actuality.
>>>>
>>>> Right, I was thinking it was related to the swapping of the signal
>>>> exit and task work run ordering. But didn't look that far yet...
>>>
>>> BTW, in usual testing, even just the one hunk removing the exit check
>>> seems to result in quite a lot of memory leaks running
>>> test/poll-mshot-update. So something is funky with the patch.
>>
>> I guess you're positive that patches 1-2 have nothing to do
>> with that. Right?
> 
> I double checked, and seems fine with those two alone. Ran the test
> twice, saw massive amounts of leaks with patches 1-3, and none with
> patches 1-2 only.

I think there is a problem with the failing path of
io_req_task_work_add(), the removing back part. Will send a patch
tomorrow, but not able to test.

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2021-06-30 22:53 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-30 20:54 [PATCH 5.14 0/3] fallback fix and task_work cleanup Pavel Begunkov
2021-06-30 20:54 ` [PATCH 1/3] io_uring: fix stuck fallback reqs Pavel Begunkov
2021-06-30 20:54 ` [PATCH 2/3] io_uring: simplify task_work func Pavel Begunkov
2021-06-30 20:54 ` [PATCH 3/3] io_uring: tweak io_req_task_work_add Pavel Begunkov
2021-06-30 21:17   ` Jens Axboe
2021-06-30 21:19     ` Pavel Begunkov
2021-06-30 21:22       ` Jens Axboe
2021-06-30 21:38         ` Pavel Begunkov
2021-06-30 21:45           ` Jens Axboe
2021-06-30 21:56             ` Jens Axboe
2021-06-30 22:04               ` Pavel Begunkov
2021-06-30 22:10               ` Pavel Begunkov
2021-06-30 22:11                 ` Jens Axboe
2021-06-30 22:53                   ` Pavel Begunkov
2021-06-30 21:57             ` Pavel Begunkov
2021-06-30 21:57               ` Jens Axboe
2021-06-30 21:18 ` [PATCH 5.14 0/3] fallback fix and task_work cleanup Jens Axboe
2021-06-30 21:20   ` Pavel Begunkov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.