linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] io_uring: extend async work merging
@ 2019-09-11 16:15 Jens Axboe
  2019-09-12 20:13 ` Jeff Moyer
  2019-09-13  7:08 ` Nikolay Borisov
  0 siblings, 2 replies; 4+ messages in thread
From: Jens Axboe @ 2019-09-11 16:15 UTC (permalink / raw)
  To: linux-block

We currently merge async work items if we see a strict sequential hit.
This helps avoid unnecessary workqueue switches when we don't need
them. We can extend this merging to cover cases where it's not a strict
sequential hit, but the IO still fits within the same page. If an
application is doing multiple requests within the same page, we don't
want separate workers waiting on the same page to complete IO. It's much
faster to let the first worker bring in the page, then operate on that
page from the same worker to complete the next request(s).

Signed-off-by: Jens Axboe <axboe@kernel.dk>

---

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 03fcd974fd1d..4bc3ee4ea81f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -167,7 +167,7 @@ struct async_list {
 	struct list_head	list;
 
 	struct file		*file;
-	off_t			io_end;
+	off_t			io_start;
 	size_t			io_len;
 };
 
@@ -1189,6 +1189,28 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
 	return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
 }
 
+static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
+{
+	if (al->file == kiocb->ki_filp) {
+		off_t start, end;
+
+		/*
+		 * Allow merging if we're anywhere in the range of the same
+		 * page. Generally this happens for sub-page reads or writes,
+		 * and it's beneficial to allow the first worker to bring the
+		 * page in and the piggy backed work can then work on the
+		 * cached page.
+		 */
+		start = al->io_start & PAGE_MASK;
+		end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
+		if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
+			return true;
+	}
+
+	al->file = NULL;
+	return false;
+}
+
 /*
  * Make a note of the last file/offset/direction we punted to async
  * context. We'll use this information to see if we can piggy back a
@@ -1200,9 +1222,8 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
 	struct async_list *async_list = &req->ctx->pending_async[rw];
 	struct kiocb *kiocb = &req->rw;
 	struct file *filp = kiocb->ki_filp;
-	off_t io_end = kiocb->ki_pos + len;
 
-	if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
+	if (io_should_merge(async_list, kiocb)) {
 		unsigned long max_bytes;
 
 		/* Use 8x RA size as a decent limiter for both reads/writes */
@@ -1215,17 +1236,16 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
 			req->flags |= REQ_F_SEQ_PREV;
 			async_list->io_len += len;
 		} else {
-			io_end = 0;
-			async_list->io_len = 0;
+			async_list->file = NULL;
 		}
 	}
 
 	/* New file? Reset state. */
 	if (async_list->file != filp) {
-		async_list->io_len = 0;
+		async_list->io_start = kiocb->ki_pos;
+		async_list->io_len = len;
 		async_list->file = filp;
 	}
-	async_list->io_end = io_end;
 }
 
 static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
@@ -1994,7 +2014,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
  */
 static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
 {
-	bool ret = false;
+	bool ret;
 
 	if (!list)
 		return false;

-- 
Jens Axboe


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] io_uring: extend async work merging
  2019-09-11 16:15 [PATCH] io_uring: extend async work merging Jens Axboe
@ 2019-09-12 20:13 ` Jeff Moyer
  2019-09-12 20:18   ` Jens Axboe
  2019-09-13  7:08 ` Nikolay Borisov
  1 sibling, 1 reply; 4+ messages in thread
From: Jeff Moyer @ 2019-09-12 20:13 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-block

Jens Axboe <axboe@kernel.dk> writes:

> We currently merge async work items if we see a strict sequential hit.
> This helps avoid unnecessary workqueue switches when we don't need
> them. We can extend this merging to cover cases where it's not a strict
> sequential hit, but the IO still fits within the same page. If an
> application is doing multiple requests within the same page, we don't
> want separate workers waiting on the same page to complete IO. It's much
> faster to let the first worker bring in the page, then operate on that
> page from the same worker to complete the next request(s).
>
> Signed-off-by: Jens Axboe <axboe@kernel.dk>

Reviewed-by: Jeff Moyer <jmoyer@redhat.com>

Minor nit below.

> @@ -1994,7 +2014,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
>   */
>  static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
>  {
> -	bool ret = false;
> +	bool ret;
>  
>  	if (!list)
>  		return false;

This hunk looks unrelated.  Also, I think you could actually change that
to be initialized to true, and get rid of the assignment later:

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 03fcd974fd1d..a94c8584c480 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1994,7 +1994,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
  */
 static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
 {
-	bool ret = false;
+	bool ret = true;
 
 	if (!list)
 		return false;
@@ -2003,7 +2003,6 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
 	if (!atomic_read(&list->cnt))
 		return false;
 
-	ret = true;
 	spin_lock(&list->lock);
 	list_add_tail(&req->list, &list->list);
 	/*

Cheers,
Jeff

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] io_uring: extend async work merging
  2019-09-12 20:13 ` Jeff Moyer
@ 2019-09-12 20:18   ` Jens Axboe
  0 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2019-09-12 20:18 UTC (permalink / raw)
  To: Jeff Moyer; +Cc: linux-block

On 9/12/19 2:13 PM, Jeff Moyer wrote:
> Jens Axboe <axboe@kernel.dk> writes:
> 
>> We currently merge async work items if we see a strict sequential hit.
>> This helps avoid unnecessary workqueue switches when we don't need
>> them. We can extend this merging to cover cases where it's not a strict
>> sequential hit, but the IO still fits within the same page. If an
>> application is doing multiple requests within the same page, we don't
>> want separate workers waiting on the same page to complete IO. It's much
>> faster to let the first worker bring in the page, then operate on that
>> page from the same worker to complete the next request(s).
>>
>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> 
> Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
> 
> Minor nit below.
> 
>> @@ -1994,7 +2014,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
>>    */
>>   static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
>>   {
>> -	bool ret = false;
>> +	bool ret;
>>   
>>   	if (!list)
>>   		return false;
> 
> This hunk looks unrelated.  Also, I think you could actually change that
> to be initialized to true, and get rid of the assignment later:

Yeah I could, but that would have added more unrelated changes... I'm
fine with it later, even though the compiler probably takes care of it.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] io_uring: extend async work merging
  2019-09-11 16:15 [PATCH] io_uring: extend async work merging Jens Axboe
  2019-09-12 20:13 ` Jeff Moyer
@ 2019-09-13  7:08 ` Nikolay Borisov
  1 sibling, 0 replies; 4+ messages in thread
From: Nikolay Borisov @ 2019-09-13  7:08 UTC (permalink / raw)
  To: Jens Axboe, linux-block



On 11.09.19 г. 19:15 ч., Jens Axboe wrote:
> We currently merge async work items if we see a strict sequential hit.
> This helps avoid unnecessary workqueue switches when we don't need
> them. We can extend this merging to cover cases where it's not a strict
> sequential hit, but the IO still fits within the same page. If an
> application is doing multiple requests within the same page, we don't
> want separate workers waiting on the same page to complete IO. It's much
> faster to let the first worker bring in the page, then operate on that
> page from the same worker to complete the next request(s).
> 
> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> 
> ---
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 03fcd974fd1d..4bc3ee4ea81f 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -167,7 +167,7 @@ struct async_list {
>  	struct list_head	list;
>  
>  	struct file		*file;
> -	off_t			io_end;
> +	off_t			io_start;
>  	size_t			io_len;
>  };
>  
> @@ -1189,6 +1189,28 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
>  	return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
>  }
>  
> +static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
> +{
> +	if (al->file == kiocb->ki_filp) {
> +		off_t start, end;
> +
> +		/*
> +		 * Allow merging if we're anywhere in the range of the same
> +		 * page. Generally this happens for sub-page reads or writes,
> +		 * and it's beneficial to allow the first worker to bring the
> +		 * page in and the piggy backed work can then work on the
> +		 * cached page.
> +		 */
> +		start = al->io_start & PAGE_MASK;

nit: round_down(al->io_start, PAGE_SIZE);

> +		end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
nit: round_up(al->io_start+io_len, PAGE_SIZE)

> +		if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
> +			return true;
> +	}
> +
> +	al->file = NULL;
> +	return false;
> +}
> +
>  /*
>   * Make a note of the last file/offset/direction we punted to async
>   * context. We'll use this information to see if we can piggy back a
> @@ -1200,9 +1222,8 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
>  	struct async_list *async_list = &req->ctx->pending_async[rw];
>  	struct kiocb *kiocb = &req->rw;
>  	struct file *filp = kiocb->ki_filp;
> -	off_t io_end = kiocb->ki_pos + len;
>  
> -	if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
> +	if (io_should_merge(async_list, kiocb)) {
>  		unsigned long max_bytes;
>  
>  		/* Use 8x RA size as a decent limiter for both reads/writes */
> @@ -1215,17 +1236,16 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
>  			req->flags |= REQ_F_SEQ_PREV;
>  			async_list->io_len += len;
>  		} else {
> -			io_end = 0;
> -			async_list->io_len = 0;
> +			async_list->file = NULL;
>  		}
>  	}
>  
>  	/* New file? Reset state. */
>  	if (async_list->file != filp) {
> -		async_list->io_len = 0;
> +		async_list->io_start = kiocb->ki_pos;
> +		async_list->io_len = len;
>  		async_list->file = filp;
>  	}
> -	async_list->io_end = io_end;
>  }
>  
>  static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
> @@ -1994,7 +2014,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
>   */
>  static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
>  {
> -	bool ret = false;
> +	bool ret;
>  
>  	if (!list)
>  		return false;
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-09-13  7:08 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-09-11 16:15 [PATCH] io_uring: extend async work merging Jens Axboe
2019-09-12 20:13 ` Jeff Moyer
2019-09-12 20:18   ` Jens Axboe
2019-09-13  7:08 ` Nikolay Borisov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).