[2/2] fs/epoll: deal with wait_queue only once
diff mbox series

Message ID 20181114182532.27981-2-dave@stgolabs.net
State In Next
Commit d8b770d554d053e367934a5315ec234ecfcb9caf
Headers show
Series
  • [1/2] fs/epoll: rename check_events label to send_events
Related show

Commit Message

Davidlohr Bueso Nov. 14, 2018, 6:25 p.m. UTC
There is no reason why we rearm the waitiqueue upon every
fetch_events retry (for when events are found yet send_events()
fails). If nothing else, this saves four lock operations per
retry, and furthermore reduces the scope of the lock even
further.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
---
 fs/eventpoll.c | 24 ++++++++++++++++--------
 1 file changed, 16 insertions(+), 8 deletions(-)

Comments

Andrew Morton Nov. 14, 2018, 10:51 p.m. UTC | #1
On Wed, 14 Nov 2018 10:25:32 -0800 Davidlohr Bueso <dave@stgolabs.net> wrote:

> There is no reason why we rearm the waitiqueue upon every
> fetch_events retry (for when events are found yet send_events()
> fails). If nothing else, this saves four lock operations per
> retry, and furthermore reduces the scope of the lock even
> further.
> 
> ..
>
> --- a/fs/eventpoll.c
> +++ b/fs/eventpoll.c
> @@ -1749,6 +1749,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
>  {
>  	int res = 0, eavail, timed_out = 0;
>  	u64 slack = 0;
> +	bool waiter = false;
>  	wait_queue_entry_t wait;
>  	ktime_t expires, *to = NULL;
>  
> @@ -1786,6 +1787,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
>  	if (eavail)
>  		goto send_events;
>  
> +	if (!waiter) {
> +		waiter = true;
> +		init_waitqueue_entry(&wait, current);
> +
> +		spin_lock_irq(&ep->wq.lock);
> +		__add_wait_queue_exclusive(&ep->wq, &wait);
> +		spin_unlock_irq(&ep->wq.lock);
> +	}
> +
>  	/*
>  	 * Busy poll timed out.  Drop NAPI ID for now, we can add
>  	 * it back in when we have moved a socket with a valid NAPI
> @@ -1798,10 +1808,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
>  	 * We need to sleep here, and we will be wake up by
>  	 * ep_poll_callback() when events will become available.
>  	 */
> -	init_waitqueue_entry(&wait, current);
> -	spin_lock_irq(&ep->wq.lock);
> -	__add_wait_queue_exclusive(&ep->wq, &wait);
> -	spin_unlock_irq(&ep->wq.lock);

Why was this moved to before the ep_reset_busy_poll_napi_id() call? 
That movement placed the code ahead of the block comment which serves
to explain its function.


This?  Which also fixes that comment and reflows it to use 80 cols.

--- a/fs/eventpoll.c~fs-epoll-deal-with-wait_queue-only-once-fix
+++ a/fs/eventpoll.c
@@ -1787,15 +1787,6 @@ fetch_events:
 	if (eavail)
 		goto send_events;
 
-	if (!waiter) {
-		waiter = true;
-		init_waitqueue_entry(&wait, current);
-
-		spin_lock_irq(&ep->wq.lock);
-		__add_wait_queue_exclusive(&ep->wq, &wait);
-		spin_unlock_irq(&ep->wq.lock);
-	}
-
 	/*
 	 * Busy poll timed out.  Drop NAPI ID for now, we can add
 	 * it back in when we have moved a socket with a valid NAPI
@@ -1804,10 +1795,18 @@ fetch_events:
 	ep_reset_busy_poll_napi_id(ep);
 
 	/*
-	 * We don't have any available event to return to the caller.
-	 * We need to sleep here, and we will be wake up by
-	 * ep_poll_callback() when events will become available.
+	 * We don't have any available event to return to the caller.  We need
+	 * to sleep here, and we will be woken by ep_poll_callback() when events
+	 * become available.
 	 */
+	if (!waiter) {
+		waiter = true;
+		init_waitqueue_entry(&wait, current);
+
+		spin_lock_irq(&ep->wq.lock);
+		__add_wait_queue_exclusive(&ep->wq, &wait);
+		spin_unlock_irq(&ep->wq.lock);
+	}
 
 	for (;;) {
 		/*
Davidlohr Bueso Nov. 15, 2018, 1:04 a.m. UTC | #2
On Wed, 14 Nov 2018, Andrew Morton wrote:

>Why was this moved to before the ep_reset_busy_poll_napi_id() call?
>That movement placed the code ahead of the block comment which serves
>to explain its function.

Yikes, that was a brain fart.

>
>
>This?  Which also fixes that comment and reflows it to use 80 cols.

Looks good, thanks.

>
>--- a/fs/eventpoll.c~fs-epoll-deal-with-wait_queue-only-once-fix
>+++ a/fs/eventpoll.c
>@@ -1787,15 +1787,6 @@ fetch_events:
> 	if (eavail)
> 		goto send_events;
>
>-	if (!waiter) {
>-		waiter = true;
>-		init_waitqueue_entry(&wait, current);
>-
>-		spin_lock_irq(&ep->wq.lock);
>-		__add_wait_queue_exclusive(&ep->wq, &wait);
>-		spin_unlock_irq(&ep->wq.lock);
>-	}
>-
> 	/*
> 	 * Busy poll timed out.  Drop NAPI ID for now, we can add
> 	 * it back in when we have moved a socket with a valid NAPI
>@@ -1804,10 +1795,18 @@ fetch_events:
> 	ep_reset_busy_poll_napi_id(ep);
>
> 	/*
>-	 * We don't have any available event to return to the caller.
>-	 * We need to sleep here, and we will be wake up by
>-	 * ep_poll_callback() when events will become available.
>+	 * We don't have any available event to return to the caller.  We need
>+	 * to sleep here, and we will be woken by ep_poll_callback() when events
>+	 * become available.
> 	 */
>+	if (!waiter) {
>+		waiter = true;
>+		init_waitqueue_entry(&wait, current);
>+
>+		spin_lock_irq(&ep->wq.lock);
>+		__add_wait_queue_exclusive(&ep->wq, &wait);
>+		spin_unlock_irq(&ep->wq.lock);
>+	}
>
> 	for (;;) {
> 		/*
>_
>

Patch
diff mbox series

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 7e2b5f3d6b3e..25b0c94cc091 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1749,6 +1749,7 @@  static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 {
 	int res = 0, eavail, timed_out = 0;
 	u64 slack = 0;
+	bool waiter = false;
 	wait_queue_entry_t wait;
 	ktime_t expires, *to = NULL;
 
@@ -1786,6 +1787,15 @@  static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 	if (eavail)
 		goto send_events;
 
+	if (!waiter) {
+		waiter = true;
+		init_waitqueue_entry(&wait, current);
+
+		spin_lock_irq(&ep->wq.lock);
+		__add_wait_queue_exclusive(&ep->wq, &wait);
+		spin_unlock_irq(&ep->wq.lock);
+	}
+
 	/*
 	 * Busy poll timed out.  Drop NAPI ID for now, we can add
 	 * it back in when we have moved a socket with a valid NAPI
@@ -1798,10 +1808,6 @@  static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 	 * We need to sleep here, and we will be wake up by
 	 * ep_poll_callback() when events will become available.
 	 */
-	init_waitqueue_entry(&wait, current);
-	spin_lock_irq(&ep->wq.lock);
-	__add_wait_queue_exclusive(&ep->wq, &wait);
-	spin_unlock_irq(&ep->wq.lock);
 
 	for (;;) {
 		/*
@@ -1836,10 +1842,6 @@  static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 
 	__set_current_state(TASK_RUNNING);
 
-	spin_lock_irq(&ep->wq.lock);
-	__remove_wait_queue(&ep->wq, &wait);
-	spin_unlock_irq(&ep->wq.lock);
-
 send_events:
 	/*
 	 * Try to transfer events to user space. In case we get 0 events and
@@ -1850,6 +1852,12 @@  static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 	    !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
 		goto fetch_events;
 
+	if (waiter) {
+		spin_lock_irq(&ep->wq.lock);
+		__remove_wait_queue(&ep->wq, &wait);
+		spin_unlock_irq(&ep->wq.lock);
+	}
+
 	return res;
 }