All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 5.12 0/2] kill sqo_dead
@ 2021-03-01 13:02 Pavel Begunkov
  2021-03-01 13:02 ` [PATCH 1/2] io_uring: kill sqo_dead and sqo submission halting Pavel Begunkov
  2021-03-01 13:02 ` [PATCH 2/2] io_uring: remove sqo_task Pavel Begunkov
  0 siblings, 2 replies; 4+ messages in thread
From: Pavel Begunkov @ 2021-03-01 13:02 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Remove not needed anymore sqo_dead and some other leftovers

Pavel Begunkov (2):
  io_uring: kill sqo_dead and sqo submission halting
  io_uring: remove sqo_task

 fs/io_uring.c | 50 +++-----------------------------------------------
 1 file changed, 3 insertions(+), 47 deletions(-)

-- 
2.24.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] io_uring: kill sqo_dead and sqo submission halting
  2021-03-01 13:02 [PATCH 5.12 0/2] kill sqo_dead Pavel Begunkov
@ 2021-03-01 13:02 ` Pavel Begunkov
  2021-03-01 14:42   ` Jens Axboe
  2021-03-01 13:02 ` [PATCH 2/2] io_uring: remove sqo_task Pavel Begunkov
  1 sibling, 1 reply; 4+ messages in thread
From: Pavel Begunkov @ 2021-03-01 13:02 UTC (permalink / raw)
  To: Jens Axboe, io-uring

As SQPOLL task doesn't poke into ->sqo_task anymore, there is no need to
kill the sqo when the master task exits. Before it was necessary to
avoid races accessing sqo_task->files with removing them.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 40 +++-------------------------------------
 1 file changed, 3 insertions(+), 37 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 54e6c4db9620..6529761b77eb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -338,7 +338,6 @@ struct io_ring_ctx {
 		unsigned int		drain_next: 1;
 		unsigned int		eventfd_async: 1;
 		unsigned int		restricted: 1;
-		unsigned int		sqo_dead: 1;
 		unsigned int		sqo_exec: 1;
 
 		/*
@@ -1972,7 +1971,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
 
 	/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
 	mutex_lock(&ctx->uring_lock);
-	if (!ctx->sqo_dead && !(current->flags & PF_EXITING) && !current->in_execve)
+	if (!(current->flags & PF_EXITING) && !current->in_execve)
 		__io_queue_sqe(req);
 	else
 		__io_req_task_cancel(req, -EFAULT);
@@ -6583,8 +6582,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
 		if (!list_empty(&ctx->iopoll_list))
 			io_do_iopoll(ctx, &nr_events, 0);
 
-		if (to_submit && !ctx->sqo_dead &&
-		    likely(!percpu_ref_is_dying(&ctx->refs)))
+		if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)))
 			ret = io_submit_sqes(ctx, to_submit);
 		mutex_unlock(&ctx->uring_lock);
 	}
@@ -7823,7 +7821,7 @@ static int io_sq_thread_fork(struct io_sq_data *sqd, struct io_ring_ctx *ctx)
 
 	clear_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
 	reinit_completion(&sqd->completion);
-	ctx->sqo_dead = ctx->sqo_exec = 0;
+	ctx->sqo_exec = 0;
 	sqd->task_pid = current->pid;
 	current->flags |= PF_IO_WORKER;
 	ret = io_wq_fork_thread(io_sq_thread, sqd);
@@ -8534,10 +8532,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 {
 	mutex_lock(&ctx->uring_lock);
 	percpu_ref_kill(&ctx->refs);
-
-	if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
-		ctx->sqo_dead = 1;
-
 	/* if force is set, the ring is going away. always drop after that */
 	ctx->cq_overflow_flushed = 1;
 	if (ctx->rings)
@@ -8697,19 +8691,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 	}
 }
 
-static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
-{
-	mutex_lock(&ctx->uring_lock);
-	ctx->sqo_dead = 1;
-	if (ctx->flags & IORING_SETUP_R_DISABLED)
-		io_sq_offload_start(ctx);
-	mutex_unlock(&ctx->uring_lock);
-
-	/* make sure callers enter the ring to get error */
-	if (ctx->rings)
-		io_ring_set_wakeup_flag(ctx);
-}
-
 /*
  * We need to iteratively cancel requests, in case a request has dependent
  * hard links. These persist even for failure of cancelations, hence keep
@@ -8722,7 +8703,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 	bool did_park = false;
 
 	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
-		io_disable_sqo_submit(ctx);
 		did_park = io_sq_thread_park(ctx->sq_data);
 		if (did_park) {
 			task = ctx->sq_data->thread;
@@ -8843,7 +8823,6 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
 
 	if (!sqd)
 		return;
-	io_disable_sqo_submit(ctx);
 	if (!io_sq_thread_park(sqd))
 		return;
 	tctx = ctx->sq_data->thread->io_uring;
@@ -8888,7 +8867,6 @@ void __io_uring_task_cancel(void)
 	/* make sure overflow events are dropped */
 	atomic_inc(&tctx->in_idle);
 
-	/* trigger io_disable_sqo_submit() */
 	if (tctx->sqpoll) {
 		struct file *file;
 		unsigned long index;
@@ -9019,22 +8997,14 @@ static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
 	do {
 		if (!io_sqring_full(ctx))
 			break;
-
 		prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
 
-		if (unlikely(ctx->sqo_dead)) {
-			ret = -EOWNERDEAD;
-			goto out;
-		}
-
 		if (!io_sqring_full(ctx))
 			break;
-
 		schedule();
 	} while (!signal_pending(current));
 
 	finish_wait(&ctx->sqo_sq_wait, &wait);
-out:
 	return ret;
 }
 
@@ -9120,8 +9090,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 				goto out;
 		}
 		ret = -EOWNERDEAD;
-		if (unlikely(ctx->sqo_dead))
-			goto out;
 		if (flags & IORING_ENTER_SQ_WAKEUP)
 			wake_up(&ctx->sq_data->wait);
 		if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9493,7 +9461,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
 	 */
 	ret = io_uring_install_fd(ctx, file);
 	if (ret < 0) {
-		io_disable_sqo_submit(ctx);
 		/* fput will clean it up */
 		fput(file);
 		return ret;
@@ -9502,7 +9469,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
 	return ret;
 err:
-	io_disable_sqo_submit(ctx);
 	io_ring_ctx_wait_and_kill(ctx);
 	return ret;
 }
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] io_uring: remove sqo_task
  2021-03-01 13:02 [PATCH 5.12 0/2] kill sqo_dead Pavel Begunkov
  2021-03-01 13:02 ` [PATCH 1/2] io_uring: kill sqo_dead and sqo submission halting Pavel Begunkov
@ 2021-03-01 13:02 ` Pavel Begunkov
  1 sibling, 0 replies; 4+ messages in thread
From: Pavel Begunkov @ 2021-03-01 13:02 UTC (permalink / raw)
  To: Jens Axboe, io-uring

Now, sqo_task is used only for a warning that is not interesting anymore
since sqo_dead is gone, remove all of that including ctx->sqo_task.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 10 ----------
 1 file changed, 10 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6529761b77eb..7fb284ce5319 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -379,11 +379,6 @@ struct io_ring_ctx {
 
 	struct io_rings	*rings;
 
-	/*
-	 * For SQPOLL usage
-	 */
-	struct task_struct	*sqo_task;
-
 	/* Only used for accounting purposes */
 	struct mm_struct	*mm_account;
 
@@ -8747,10 +8742,6 @@ static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
 				fput(file);
 				return ret;
 			}
-
-			/* one and only SQPOLL file note, held by sqo_task */
-			WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) &&
-				     current != ctx->sqo_task);
 		}
 		tctx->last = file;
 	}
@@ -9398,7 +9389,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
 	ctx->compat = in_compat_syscall();
 	if (!capable(CAP_IPC_LOCK))
 		ctx->user = get_uid(current_user());
-	ctx->sqo_task = current;
 
 	/*
 	 * This is just grabbed for accounting purposes. When a process exits,
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] io_uring: kill sqo_dead and sqo submission halting
  2021-03-01 13:02 ` [PATCH 1/2] io_uring: kill sqo_dead and sqo submission halting Pavel Begunkov
@ 2021-03-01 14:42   ` Jens Axboe
  0 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2021-03-01 14:42 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 3/1/21 6:02 AM, Pavel Begunkov wrote:
> @@ -8697,19 +8691,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
>  	}
>  }
>  
> -static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
> -{
> -	mutex_lock(&ctx->uring_lock);
> -	ctx->sqo_dead = 1;
> -	if (ctx->flags & IORING_SETUP_R_DISABLED)
> -		io_sq_offload_start(ctx);
> -	mutex_unlock(&ctx->uring_lock);
> -
> -	/* make sure callers enter the ring to get error */
> -	if (ctx->rings)
> -		io_ring_set_wakeup_flag(ctx);
> -}

We need to retain the offload start here for IORING_SETUP_R_DISABLED, or
we'll potentially hang when:

> @@ -8722,7 +8703,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
>  	bool did_park = false;
>  
>  	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
> -		io_disable_sqo_submit(ctx);
>  		did_park = io_sq_thread_park(ctx->sq_data);
>  		if (did_park) {
>  			task = ctx->sq_data->thread;

we try and park right here. Maybe we can just do that in
cancel_sqpoll(), will double check.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-03-01 14:42 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-01 13:02 [PATCH 5.12 0/2] kill sqo_dead Pavel Begunkov
2021-03-01 13:02 ` [PATCH 1/2] io_uring: kill sqo_dead and sqo submission halting Pavel Begunkov
2021-03-01 14:42   ` Jens Axboe
2021-03-01 13:02 ` [PATCH 2/2] io_uring: remove sqo_task Pavel Begunkov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.