All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] small 5.17 updates
@ 2021-12-04 20:49 Pavel Begunkov
  2021-12-04 20:49 ` [PATCH 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Pavel Begunkov @ 2021-12-04 20:49 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

3/4 changes the return of IOPOLL for CQE_SKIP while we can, and
other are just small clean ups.

Hao Xu (1):
  io_uring: move up io_put_kbuf() and io_put_rw_kbuf()

Pavel Begunkov (3):
  io_uring: simplify selected buf handling
  io_uring: tweak iopoll return for REQ_F_CQE_SKIP
  io_uring: reuse io_req_task_complete for timeouts

 fs/io_uring.c | 91 +++++++++++++++++++++------------------------------
 1 file changed, 38 insertions(+), 53 deletions(-)

-- 
2.34.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf()
  2021-12-04 20:49 [PATCH 0/4] small 5.17 updates Pavel Begunkov
@ 2021-12-04 20:49 ` Pavel Begunkov
  2021-12-04 20:49 ` [PATCH 2/4] io_uring: simplify selected buf handling Pavel Begunkov
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 9+ messages in thread
From: Pavel Begunkov @ 2021-12-04 20:49 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence, Hao Xu

From: Hao Xu <haoxu@linux.alibaba.com>

Move them up to avoid explicit declaration. We will use them in later
patches.

Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Hao Xu <haoxu@linux.alibaba.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8b6bfed16f65..ffbe1b76f3a0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1273,6 +1273,24 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
 	}
 }
 
+static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
+{
+	unsigned int cflags;
+
+	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
+	cflags |= IORING_CQE_F_BUFFER;
+	req->flags &= ~REQ_F_BUFFER_SELECTED;
+	kfree(kbuf);
+	return cflags;
+}
+
+static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
+{
+	if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
+		return 0;
+	return io_put_kbuf(req, req->kbuf);
+}
+
 static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
 {
 	bool got = percpu_ref_tryget(ref);
@@ -2456,24 +2474,6 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
 }
 
-static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
-{
-	unsigned int cflags;
-
-	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
-	cflags |= IORING_CQE_F_BUFFER;
-	req->flags &= ~REQ_F_BUFFER_SELECTED;
-	kfree(kbuf);
-	return cflags;
-}
-
-static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
-{
-	if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
-		return 0;
-	return io_put_kbuf(req, req->kbuf);
-}
-
 static inline bool io_run_task_work(void)
 {
 	if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
-- 
2.34.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/4] io_uring: simplify selected buf handling
  2021-12-04 20:49 [PATCH 0/4] small 5.17 updates Pavel Begunkov
  2021-12-04 20:49 ` [PATCH 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
@ 2021-12-04 20:49 ` Pavel Begunkov
  2021-12-04 20:49 ` [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP Pavel Begunkov
  2021-12-04 20:49 ` [PATCH 4/4] io_uring: reuse io_req_task_complete for timeouts Pavel Begunkov
  3 siblings, 0 replies; 9+ messages in thread
From: Pavel Begunkov @ 2021-12-04 20:49 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

As selected buffers are now stored in a separate field in a request, get
rid of rw/recv specific helpers and simplify the code.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 44 +++++++++++++++++---------------------------
 1 file changed, 17 insertions(+), 27 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index ffbe1b76f3a0..64add8260abb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1273,22 +1273,24 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
 	}
 }
 
-static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
+static unsigned int __io_put_kbuf(struct io_kiocb *req)
 {
+	struct io_buffer *kbuf = req->kbuf;
 	unsigned int cflags;
 
 	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
 	cflags |= IORING_CQE_F_BUFFER;
 	req->flags &= ~REQ_F_BUFFER_SELECTED;
 	kfree(kbuf);
+	req->kbuf = NULL;
 	return cflags;
 }
 
-static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
+static inline unsigned int io_put_kbuf(struct io_kiocb *req)
 {
 	if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
 		return 0;
-	return io_put_kbuf(req, req->kbuf);
+	return __io_put_kbuf(req);
 }
 
 static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
@@ -2532,14 +2534,14 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 	prev = start;
 	wq_list_for_each_resume(pos, prev) {
 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
-		u32 cflags;
 
 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
 		if (!smp_load_acquire(&req->iopoll_completed))
 			break;
-		cflags = io_put_rw_kbuf(req);
+
 		if (!(req->flags & REQ_F_CQE_SKIP))
-			__io_fill_cqe(ctx, req->user_data, req->result, cflags);
+			__io_fill_cqe(ctx, req->user_data, req->result,
+				      io_put_kbuf(req));
 		nr_events++;
 	}
 
@@ -2715,7 +2717,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
 
 static void io_req_task_complete(struct io_kiocb *req, bool *locked)
 {
-	unsigned int cflags = io_put_rw_kbuf(req);
+	unsigned int cflags = io_put_kbuf(req);
 	int res = req->result;
 
 	if (*locked) {
@@ -2731,7 +2733,7 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
 {
 	if (__io_complete_rw_common(req, res))
 		return;
-	__io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
+	__io_req_complete(req, issue_flags, req->result, io_put_kbuf(req));
 }
 
 static void io_complete_rw(struct kiocb *kiocb, long res)
@@ -4979,11 +4981,6 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
 	return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
 }
 
-static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
-{
-	return io_put_kbuf(req, req->kbuf);
-}
-
 static int io_recvmsg_prep_async(struct io_kiocb *req)
 {
 	int ret;
@@ -5021,8 +5018,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 	struct socket *sock;
 	struct io_buffer *kbuf;
 	unsigned flags;
-	int min_ret = 0;
-	int ret, cflags = 0;
+	int ret, min_ret = 0;
 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
 	sock = sock_from_file(req->file);
@@ -5066,13 +5062,11 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 		req_set_fail(req);
 	}
 
-	if (req->flags & REQ_F_BUFFER_SELECTED)
-		cflags = io_put_recv_kbuf(req);
 	/* fast path, check for non-NULL to avoid function call */
 	if (kmsg->free_iov)
 		kfree(kmsg->free_iov);
 	req->flags &= ~REQ_F_NEED_CLEANUP;
-	__io_req_complete(req, issue_flags, ret, cflags);
+	__io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
 	return 0;
 }
 
@@ -5085,8 +5079,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 	struct socket *sock;
 	struct iovec iov;
 	unsigned flags;
-	int min_ret = 0;
-	int ret, cflags = 0;
+	int ret, min_ret = 0;
 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
 	sock = sock_from_file(req->file);
@@ -5128,9 +5121,8 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 		req_set_fail(req);
 	}
-	if (req->flags & REQ_F_BUFFER_SELECTED)
-		cflags = io_put_recv_kbuf(req);
-	__io_req_complete(req, issue_flags, ret, cflags);
+
+	__io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
 	return 0;
 }
 
@@ -6578,10 +6570,8 @@ static __cold void io_drain_req(struct io_kiocb *req)
 
 static void io_clean_op(struct io_kiocb *req)
 {
-	if (req->flags & REQ_F_BUFFER_SELECTED) {
-		kfree(req->kbuf);
-		req->kbuf = NULL;
-	}
+	if (req->flags & REQ_F_BUFFER_SELECTED)
+		io_put_kbuf(req);
 
 	if (req->flags & REQ_F_NEED_CLEANUP) {
 		switch (req->opcode) {
-- 
2.34.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP
  2021-12-04 20:49 [PATCH 0/4] small 5.17 updates Pavel Begunkov
  2021-12-04 20:49 ` [PATCH 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
  2021-12-04 20:49 ` [PATCH 2/4] io_uring: simplify selected buf handling Pavel Begunkov
@ 2021-12-04 20:49 ` Pavel Begunkov
  2021-12-04 22:21   ` Jens Axboe
  2021-12-04 20:49 ` [PATCH 4/4] io_uring: reuse io_req_task_complete for timeouts Pavel Begunkov
  3 siblings, 1 reply; 9+ messages in thread
From: Pavel Begunkov @ 2021-12-04 20:49 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

Currently, IOPOLL returns the number of completed requests, but with
REQ_F_CQE_SKIP there are not the same thing anymore. That may be
confusing as non-iopoll wait cares only about CQEs, so make io_do_iopoll
return the number of posted CQEs.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 64add8260abb..ea7a0daa0b3b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2538,10 +2538,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
 		if (!smp_load_acquire(&req->iopoll_completed))
 			break;
+		if (unlikely(req->flags & REQ_F_CQE_SKIP))
+			continue;
 
-		if (!(req->flags & REQ_F_CQE_SKIP))
-			__io_fill_cqe(ctx, req->user_data, req->result,
-				      io_put_kbuf(req));
+		__io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
 		nr_events++;
 	}
 
-- 
2.34.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 4/4] io_uring: reuse io_req_task_complete for timeouts
  2021-12-04 20:49 [PATCH 0/4] small 5.17 updates Pavel Begunkov
                   ` (2 preceding siblings ...)
  2021-12-04 20:49 ` [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP Pavel Begunkov
@ 2021-12-04 20:49 ` Pavel Begunkov
  3 siblings, 0 replies; 9+ messages in thread
From: Pavel Begunkov @ 2021-12-04 20:49 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

With kbuf unification io_req_task_complete() is now a generic function,
use it for timeout's tw completions.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index ea7a0daa0b3b..1265dc1942eb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5953,15 +5953,6 @@ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
 	return 0;
 }
 
-static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
-{
-	struct io_timeout_data *data = req->async_data;
-
-	if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
-		req_set_fail(req);
-	io_req_complete_post(req, -ETIME, 0);
-}
-
 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 {
 	struct io_timeout_data *data = container_of(timer,
@@ -5976,7 +5967,11 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 		atomic_read(&req->ctx->cq_timeouts) + 1);
 	spin_unlock_irqrestore(&ctx->timeout_lock, flags);
 
-	req->io_task_work.func = io_req_task_timeout;
+	if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
+		req_set_fail(req);
+
+	req->result = -ETIME;
+	req->io_task_work.func = io_req_task_complete;
 	io_req_task_work_add(req);
 	return HRTIMER_NORESTART;
 }
-- 
2.34.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP
  2021-12-04 20:49 ` [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP Pavel Begunkov
@ 2021-12-04 22:21   ` Jens Axboe
  2021-12-04 22:48     ` Pavel Begunkov
  0 siblings, 1 reply; 9+ messages in thread
From: Jens Axboe @ 2021-12-04 22:21 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 12/4/21 1:49 PM, Pavel Begunkov wrote:
> Currently, IOPOLL returns the number of completed requests, but with
> REQ_F_CQE_SKIP there are not the same thing anymore. That may be
> confusing as non-iopoll wait cares only about CQEs, so make io_do_iopoll
> return the number of posted CQEs.
> 
> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
> ---
>  fs/io_uring.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 64add8260abb..ea7a0daa0b3b 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2538,10 +2538,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
>  		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
>  		if (!smp_load_acquire(&req->iopoll_completed))
>  			break;
> +		if (unlikely(req->flags & REQ_F_CQE_SKIP))
> +			continue;
>  
> -		if (!(req->flags & REQ_F_CQE_SKIP))
> -			__io_fill_cqe(ctx, req->user_data, req->result,
> -				      io_put_kbuf(req));
> +		__io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
>  		nr_events++;
>  	}
>  

Not sure I follow the logic behind this change. Places like
io_iopoll_try_reap_events() just need a "did we find anything" return,
which is independent on whether or not we actually posted CQEs or not.
Other callers either don't care what the return value is or if it's < 0
or not (which this change won't affect).

I feel like I'm missing something here, or that the commit message
better needs to explain why this change is done.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP
  2021-12-04 22:21   ` Jens Axboe
@ 2021-12-04 22:48     ` Pavel Begunkov
  2021-12-04 23:20       ` Jens Axboe
  0 siblings, 1 reply; 9+ messages in thread
From: Pavel Begunkov @ 2021-12-04 22:48 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 12/4/21 22:21, Jens Axboe wrote:
> On 12/4/21 1:49 PM, Pavel Begunkov wrote:
>> Currently, IOPOLL returns the number of completed requests, but with
>> REQ_F_CQE_SKIP there are not the same thing anymore. That may be
>> confusing as non-iopoll wait cares only about CQEs, so make io_do_iopoll
>> return the number of posted CQEs.
>>
>> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
>> ---
>>   fs/io_uring.c | 6 +++---
>>   1 file changed, 3 insertions(+), 3 deletions(-)
>>
>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>> index 64add8260abb..ea7a0daa0b3b 100644
>> --- a/fs/io_uring.c
>> +++ b/fs/io_uring.c
>> @@ -2538,10 +2538,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
>>   		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
>>   		if (!smp_load_acquire(&req->iopoll_completed))
>>   			break;
>> +		if (unlikely(req->flags & REQ_F_CQE_SKIP))
>> +			continue;
>>   
>> -		if (!(req->flags & REQ_F_CQE_SKIP))
>> -			__io_fill_cqe(ctx, req->user_data, req->result,
>> -				      io_put_kbuf(req));
>> +		__io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
>>   		nr_events++;
>>   	}
>>   
> 
> Not sure I follow the logic behind this change. Places like
> io_iopoll_try_reap_events() just need a "did we find anything" return,
> which is independent on whether or not we actually posted CQEs or not.
> Other callers either don't care what the return value is or if it's < 0
> or not (which this change won't affect).
> 
> I feel like I'm missing something here, or that the commit message
> better needs to explain why this change is done.

I was wrong on how I described it, but it means that the problem is in
a different place.

int io_do_iopoll() {
	return nr_events;
}

int io_iopoll_check() {
	do {
		nr_events += io_do_iopoll();
	while (nr_events < min && ...);
}

And "events" there better to be CQEs, otherwise the semantics
of @min + CQE_SKIP is not very clear and mismatches non-IOPOLL.

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP
  2021-12-04 22:48     ` Pavel Begunkov
@ 2021-12-04 23:20       ` Jens Axboe
  2021-12-05  0:23         ` Pavel Begunkov
  0 siblings, 1 reply; 9+ messages in thread
From: Jens Axboe @ 2021-12-04 23:20 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 12/4/21 3:48 PM, Pavel Begunkov wrote:
> On 12/4/21 22:21, Jens Axboe wrote:
>> On 12/4/21 1:49 PM, Pavel Begunkov wrote:
>>> Currently, IOPOLL returns the number of completed requests, but with
>>> REQ_F_CQE_SKIP there are not the same thing anymore. That may be
>>> confusing as non-iopoll wait cares only about CQEs, so make io_do_iopoll
>>> return the number of posted CQEs.
>>>
>>> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
>>> ---
>>>   fs/io_uring.c | 6 +++---
>>>   1 file changed, 3 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>>> index 64add8260abb..ea7a0daa0b3b 100644
>>> --- a/fs/io_uring.c
>>> +++ b/fs/io_uring.c
>>> @@ -2538,10 +2538,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
>>>   		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
>>>   		if (!smp_load_acquire(&req->iopoll_completed))
>>>   			break;
>>> +		if (unlikely(req->flags & REQ_F_CQE_SKIP))
>>> +			continue;
>>>   
>>> -		if (!(req->flags & REQ_F_CQE_SKIP))
>>> -			__io_fill_cqe(ctx, req->user_data, req->result,
>>> -				      io_put_kbuf(req));
>>> +		__io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
>>>   		nr_events++;
>>>   	}
>>>   
>>
>> Not sure I follow the logic behind this change. Places like
>> io_iopoll_try_reap_events() just need a "did we find anything" return,
>> which is independent on whether or not we actually posted CQEs or not.
>> Other callers either don't care what the return value is or if it's < 0
>> or not (which this change won't affect).
>>
>> I feel like I'm missing something here, or that the commit message
>> better needs to explain why this change is done.
> 
> I was wrong on how I described it, but it means that the problem is in
> a different place.
> 
> int io_do_iopoll() {
> 	return nr_events;
> }
> 
> int io_iopoll_check() {
> 	do {
> 		nr_events += io_do_iopoll();
> 	while (nr_events < min && ...);
> }
> 
> And "events" there better to be CQEs, otherwise the semantics
> of @min + CQE_SKIP is not very clear and mismatches non-IOPOLL.

Can you do a v2 of this patch? Rest of them look good to me.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP
  2021-12-04 23:20       ` Jens Axboe
@ 2021-12-05  0:23         ` Pavel Begunkov
  0 siblings, 0 replies; 9+ messages in thread
From: Pavel Begunkov @ 2021-12-05  0:23 UTC (permalink / raw)
  To: Jens Axboe, io-uring

On 12/4/21 23:20, Jens Axboe wrote:
> On 12/4/21 3:48 PM, Pavel Begunkov wrote:
>> On 12/4/21 22:21, Jens Axboe wrote:
>>> On 12/4/21 1:49 PM, Pavel Begunkov wrote:
>>>> Currently, IOPOLL returns the number of completed requests, but with
>>>> REQ_F_CQE_SKIP there are not the same thing anymore. That may be
>>>> confusing as non-iopoll wait cares only about CQEs, so make io_do_iopoll
>>>> return the number of posted CQEs.
>>>>
>>>> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
>>>> ---
>>>>    fs/io_uring.c | 6 +++---
>>>>    1 file changed, 3 insertions(+), 3 deletions(-)
>>>>
>>>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>>>> index 64add8260abb..ea7a0daa0b3b 100644
>>>> --- a/fs/io_uring.c
>>>> +++ b/fs/io_uring.c
>>>> @@ -2538,10 +2538,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
>>>>    		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
>>>>    		if (!smp_load_acquire(&req->iopoll_completed))
>>>>    			break;
>>>> +		if (unlikely(req->flags & REQ_F_CQE_SKIP))
>>>> +			continue;
>>>>    
>>>> -		if (!(req->flags & REQ_F_CQE_SKIP))
>>>> -			__io_fill_cqe(ctx, req->user_data, req->result,
>>>> -				      io_put_kbuf(req));
>>>> +		__io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
>>>>    		nr_events++;
>>>>    	}
>>>>    
>>>
>>> Not sure I follow the logic behind this change. Places like
>>> io_iopoll_try_reap_events() just need a "did we find anything" return,
>>> which is independent on whether or not we actually posted CQEs or not.
>>> Other callers either don't care what the return value is or if it's < 0
>>> or not (which this change won't affect).
>>>
>>> I feel like I'm missing something here, or that the commit message
>>> better needs to explain why this change is done.
>>
>> I was wrong on how I described it, but it means that the problem is in
>> a different place.
>>
>> int io_do_iopoll() {
>> 	return nr_events;
>> }
>>
>> int io_iopoll_check() {
>> 	do {
>> 		nr_events += io_do_iopoll();
>> 	while (nr_events < min && ...);
>> }
>>
>> And "events" there better to be CQEs, otherwise the semantics
>> of @min + CQE_SKIP is not very clear and mismatches non-IOPOLL.
> 
> Can you do a v2 of this patch? Rest of them look good to me.

I'll send it tomorrow with clarifications added

-- 
Pavel Begunkov

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-12-05  0:23 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-04 20:49 [PATCH 0/4] small 5.17 updates Pavel Begunkov
2021-12-04 20:49 ` [PATCH 1/4] io_uring: move up io_put_kbuf() and io_put_rw_kbuf() Pavel Begunkov
2021-12-04 20:49 ` [PATCH 2/4] io_uring: simplify selected buf handling Pavel Begunkov
2021-12-04 20:49 ` [PATCH 3/4] io_uring: tweak iopoll return for REQ_F_CQE_SKIP Pavel Begunkov
2021-12-04 22:21   ` Jens Axboe
2021-12-04 22:48     ` Pavel Begunkov
2021-12-04 23:20       ` Jens Axboe
2021-12-05  0:23         ` Pavel Begunkov
2021-12-04 20:49 ` [PATCH 4/4] io_uring: reuse io_req_task_complete for timeouts Pavel Begunkov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.