All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] fs:io_uring: clean up io_uring_cancel_files
@ 2019-11-13 10:06 Bob Liu
  2019-11-13 10:06 ` [PATCH 2/2] fs: io_uring: introduce req_need_defer() Bob Liu
  2019-11-13 19:37 ` [PATCH 1/2] fs:io_uring: clean up io_uring_cancel_files Jens Axboe
  0 siblings, 2 replies; 3+ messages in thread
From: Bob Liu @ 2019-11-13 10:06 UTC (permalink / raw)
  To: axboe; +Cc: io-uring, Bob Liu

return val is not used, drop it.
Also drop unnecessary if (cancel_req).

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 fs/io_uring.c | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 247e5e1..5781bfe 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4286,7 +4286,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 	DEFINE_WAIT(wait);
 
 	while (!list_empty_careful(&ctx->inflight_list)) {
-		enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
 		struct io_kiocb *cancel_req = NULL;
 
 		spin_lock_irq(&ctx->inflight_lock);
@@ -4304,14 +4303,12 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 						TASK_UNINTERRUPTIBLE);
 		spin_unlock_irq(&ctx->inflight_lock);
 
-		if (cancel_req) {
-			ret = io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
-			io_put_req(cancel_req);
-		}
-
 		/* We need to keep going until we don't find a matching req */
 		if (!cancel_req)
 			break;
+
+		io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
+		io_put_req(cancel_req);
 		schedule();
 	}
 	finish_wait(&ctx->inflight_wait, &wait);
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] fs: io_uring: introduce req_need_defer()
  2019-11-13 10:06 [PATCH 1/2] fs:io_uring: clean up io_uring_cancel_files Bob Liu
@ 2019-11-13 10:06 ` Bob Liu
  2019-11-13 19:37 ` [PATCH 1/2] fs:io_uring: clean up io_uring_cancel_files Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Bob Liu @ 2019-11-13 10:06 UTC (permalink / raw)
  To: axboe; +Cc: io-uring, Bob Liu

Make the code easier to read.

Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
 fs/io_uring.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5781bfe..742f6a7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -448,7 +448,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	return NULL;
 }
 
-static inline bool __io_sequence_defer(struct io_kiocb *req)
+static inline bool __req_need_defer(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 
@@ -456,12 +456,12 @@ static inline bool __io_sequence_defer(struct io_kiocb *req)
 					+ atomic_read(&ctx->cached_cq_overflow);
 }
 
-static inline bool io_sequence_defer(struct io_kiocb *req)
+static inline bool req_need_defer(struct io_kiocb *req)
 {
-	if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
-		return false;
+	if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
+		return __req_need_defer(req);
 
-	return __io_sequence_defer(req);
+	return false;
 }
 
 static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -469,7 +469,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
 	struct io_kiocb *req;
 
 	req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
-	if (req && !io_sequence_defer(req)) {
+	if (req && !req_need_defer(req)) {
 		list_del_init(&req->list);
 		return req;
 	}
@@ -482,7 +482,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
 	struct io_kiocb *req;
 
 	req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
-	if (req && !__io_sequence_defer(req)) {
+	if (req && !__req_need_defer(req)) {
 		list_del_init(&req->list);
 		return req;
 	}
@@ -2436,7 +2436,8 @@ static int io_req_defer(struct io_kiocb *req)
 	struct io_uring_sqe *sqe_copy;
 	struct io_ring_ctx *ctx = req->ctx;
 
-	if (!io_sequence_defer(req) && list_empty(&ctx->defer_list))
+	/* Still need defer if there is pending req in defer list. */
+	if (!req_need_defer(req) && list_empty(&ctx->defer_list))
 		return 0;
 
 	sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
@@ -2444,7 +2445,7 @@ static int io_req_defer(struct io_kiocb *req)
 		return -EAGAIN;
 
 	spin_lock_irq(&ctx->completion_lock);
-	if (!io_sequence_defer(req) && list_empty(&ctx->defer_list)) {
+	if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
 		spin_unlock_irq(&ctx->completion_lock);
 		kfree(sqe_copy);
 		return 0;
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH 1/2] fs:io_uring: clean up io_uring_cancel_files
  2019-11-13 10:06 [PATCH 1/2] fs:io_uring: clean up io_uring_cancel_files Bob Liu
  2019-11-13 10:06 ` [PATCH 2/2] fs: io_uring: introduce req_need_defer() Bob Liu
@ 2019-11-13 19:37 ` Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2019-11-13 19:37 UTC (permalink / raw)
  To: Bob Liu; +Cc: io-uring

On 11/13/19 3:06 AM, Bob Liu wrote:
> return val is not used, drop it.
> Also drop unnecessary if (cancel_req).

Applied both of these, reworded the commit messages a bit. Thanks for
sending them in. A few notes for next time:

- Please use a cover letter if you send more than 1 patch
- Patch subject should be "io_uring: bla bla", no fs, and use the same
  style for both (you have fs:io_uring and fs: io_uring

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-11-13 19:37 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-13 10:06 [PATCH 1/2] fs:io_uring: clean up io_uring_cancel_files Bob Liu
2019-11-13 10:06 ` [PATCH 2/2] fs: io_uring: introduce req_need_defer() Bob Liu
2019-11-13 19:37 ` [PATCH 1/2] fs:io_uring: clean up io_uring_cancel_files Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.