All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RFC -rt] mm: perform lru_add_drain_all() remotely
@ 2016-04-28 20:31 Luiz Capitulino
  2016-04-28 22:47 ` Rik van Riel
  0 siblings, 1 reply; 3+ messages in thread
From: Luiz Capitulino @ 2016-04-28 20:31 UTC (permalink / raw)
  To: linux-rt-users; +Cc: riel, bigeasy, tglx, srostedt, williams

lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
on all CPUs that have non-empty LRU pagevecs and then waiting for
the scheduled work to complete. However, workqueue threads may never
have the chance to run on a CPU that's running a SCHED_FIFO task.
This causes lru_add_drain_all() to block forever.

This commit solves this problem by changing lru_add_drain_all()
to drain the LRU pagevecs of remote CPUs. This is done by grabbing
swapvec_lock and calling lru_add_drain_cpu().

PS: This is based on an idea and initial implementation by
    Rik van Riel.

Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
---
 include/linux/locallock.h | 27 +++++++++++++++++++++++++++
 mm/swap.c                 | 35 +++++++++++++++++++++++++----------
 2 files changed, 52 insertions(+), 10 deletions(-)

diff --git a/include/linux/locallock.h b/include/linux/locallock.h
index 6fe5928..2de478b 100644
--- a/include/linux/locallock.h
+++ b/include/linux/locallock.h
@@ -104,6 +104,17 @@ static inline void __local_unlock(struct local_irq_lock *lv)
 		put_local_var(lvar);				\
 	} while (0)
 
+#define local_lock_other_cpu(lvar, cpu)                         \
+	do {                                                    \
+		__local_lock(&per_cpu(lvar, cpu));              \
+	} while (0)
+
+#define local_unlock_other_cpu(lvar, cpu)                       \
+	do {                                                    \
+		__local_unlock(&per_cpu(lvar, cpu));            \
+	} while (0)
+
+
 static inline void __local_lock_irq(struct local_irq_lock *lv)
 {
 	spin_lock_irqsave(&lv->lock, lv->flags);
@@ -163,6 +174,22 @@ static inline int __local_lock_irqsave(struct local_irq_lock *lv)
 		_flags = per_cpu(lvar, cpu).flags;			\
 	} while (0)
 
+#define local_lock_irqsave_other_cpu(lvar, _flags, cpu)			\
+	do {								\
+		if (cpu == smp_processor_id())				\
+			local_lock_irqsave(lvar, _flags);		\
+		else							\
+			local_lock_other_cpu(lvar, cpu);		\
+	} while (0)
+
+#define local_unlock_irqrestore_other_cpu(lvar, _flags, cpu)	        \
+	do {								\
+		if (cpu == smp_processor_id())				\
+			local_unlock_irqrestore(lvar, _flags);		\
+		else							\
+			local_unlock_other_cpu(lvar, cpu);		\
+	} while (0)
+
 static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
 					    unsigned long flags)
 {
diff --git a/mm/swap.c b/mm/swap.c
index ca194ae..9dc6956 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -821,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
 		unsigned long flags;
 
 		/* No harm done if a racing interrupt already did this */
-		local_lock_irqsave(rotate_lock, flags);
+		local_lock_irqsave_other_cpu(rotate_lock, flags, cpu);
 		pagevec_move_tail(pvec);
-		local_unlock_irqrestore(rotate_lock, flags);
+		local_unlock_irqrestore_other_cpu(rotate_lock, flags, cpu);
 	}
 
 	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
@@ -866,12 +866,32 @@ void lru_add_drain(void)
 	local_unlock_cpu(swapvec_lock);
 }
 
+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+	local_lock_other_cpu(swapvec_lock, cpu);
+	lru_add_drain_cpu(cpu);
+	local_unlock_other_cpu(swapvec_lock, cpu);
+}
+#else
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
 	lru_add_drain();
 }
 
-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+		INIT_WORK(work, lru_add_drain_per_cpu);
+		schedule_work_on(cpu, work);
+		cpumask_set_cpu(cpu, &has_work);
+
+}
+#endif
+
 
 void lru_add_drain_all(void)
 {
@@ -884,16 +904,11 @@ void lru_add_drain_all(void)
 	cpumask_clear(&has_work);
 
 	for_each_online_cpu(cpu) {
-		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH RFC -rt] mm: perform lru_add_drain_all() remotely
  2016-04-28 20:31 [PATCH RFC -rt] mm: perform lru_add_drain_all() remotely Luiz Capitulino
@ 2016-04-28 22:47 ` Rik van Riel
  2016-04-29 12:19   ` Luiz Capitulino
  0 siblings, 1 reply; 3+ messages in thread
From: Rik van Riel @ 2016-04-28 22:47 UTC (permalink / raw)
  To: Luiz Capitulino, linux-rt-users; +Cc: bigeasy, tglx, srostedt, williams

[-- Attachment #1: Type: text/plain, Size: 5401 bytes --]

On Thu, 2016-04-28 at 16:31 -0400, Luiz Capitulino wrote:
> lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
> on all CPUs that have non-empty LRU pagevecs and then waiting for
> the scheduled work to complete. However, workqueue threads may never
> have the chance to run on a CPU that's running a SCHED_FIFO task.
> This causes lru_add_drain_all() to block forever.
> 
> This commit solves this problem by changing lru_add_drain_all()
> to drain the LRU pagevecs of remote CPUs. This is done by grabbing
> swapvec_lock and calling lru_add_drain_cpu().
> 
> PS: This is based on an idea and initial implementation by
>     Rik van Riel.

I wrote maybe half the code in this patch. It should
probably have my signed-off-by line too :)

Anyway, the patch looks fine to me and seems to work.

  Signed-off-by: Rik van Riel <riel@redhat.com>
> Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
> ---
>  include/linux/locallock.h | 27 +++++++++++++++++++++++++++
>  mm/swap.c                 | 35 +++++++++++++++++++++++++----------
>  2 files changed, 52 insertions(+), 10 deletions(-)
> 
> diff --git a/include/linux/locallock.h b/include/linux/locallock.h
> index 6fe5928..2de478b 100644
> --- a/include/linux/locallock.h
> +++ b/include/linux/locallock.h
> @@ -104,6 +104,17 @@ static inline void __local_unlock(struct
> local_irq_lock *lv)
>  		put_local_var(lvar);				
> \
>  	} while (0)
>  
> +#define local_lock_other_cpu(lvar, cpu)                         \
> +	do {                                                    \
> +		__local_lock(&per_cpu(lvar, cpu));              \
> +	} while (0)
> +
> +#define local_unlock_other_cpu(lvar, cpu)                       \
> +	do {                                                    \
> +		__local_unlock(&per_cpu(lvar, cpu));            \
> +	} while (0)
> +
> +
>  static inline void __local_lock_irq(struct local_irq_lock *lv)
>  {
>  	spin_lock_irqsave(&lv->lock, lv->flags);
> @@ -163,6 +174,22 @@ static inline int __local_lock_irqsave(struct
> local_irq_lock *lv)
>  		_flags = per_cpu(lvar, cpu).flags;			
> \
>  	} while (0)
>  
> +#define local_lock_irqsave_other_cpu(lvar, _flags, cpu)		
> 	\
> +	do {								
> \
> +		if (cpu == smp_processor_id())			
> 	\
> +			local_lock_irqsave(lvar, _flags);		
> \
> +		else							
> \
> +			local_lock_other_cpu(lvar, cpu);		
> \
> +	} while (0)
> +
> +#define local_unlock_irqrestore_other_cpu(lvar, _flags, cpu)	
>         \
> +	do {								
> \
> +		if (cpu == smp_processor_id())			
> 	\
> +			local_unlock_irqrestore(lvar, _flags);	
> 	\
> +		else							
> \
> +			local_unlock_other_cpu(lvar, cpu);		
> \
> +	} while (0)
> +
>  static inline int __local_unlock_irqrestore(struct local_irq_lock
> *lv,
>  					    unsigned long flags)
>  {
> diff --git a/mm/swap.c b/mm/swap.c
> index ca194ae..9dc6956 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -821,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
>  		unsigned long flags;
>  
>  		/* No harm done if a racing interrupt already did
> this */
> -		local_lock_irqsave(rotate_lock, flags);
> +		local_lock_irqsave_other_cpu(rotate_lock, flags,
> cpu);
>  		pagevec_move_tail(pvec);
> -		local_unlock_irqrestore(rotate_lock, flags);
> +		local_unlock_irqrestore_other_cpu(rotate_lock,
> flags, cpu);
>  	}
>  
>  	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
> @@ -866,12 +866,32 @@ void lru_add_drain(void)
>  	local_unlock_cpu(swapvec_lock);
>  }
>  
> +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
> +
> +#ifdef CONFIG_PREEMPT_RT_BASE
> +static inline void remote_lru_add_drain(int cpu, struct cpumask
> *has_work)
> +{
> +	local_lock_other_cpu(swapvec_lock, cpu);
> +	lru_add_drain_cpu(cpu);
> +	local_unlock_other_cpu(swapvec_lock, cpu);
> +}
> +#else
>  static void lru_add_drain_per_cpu(struct work_struct *dummy)
>  {
>  	lru_add_drain();
>  }
>  
> -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
> +static inline void remote_lru_add_drain(int cpu, struct cpumask
> *has_work)
> +{
> +		struct work_struct *work =
> &per_cpu(lru_add_drain_work, cpu);
> +
> +		INIT_WORK(work, lru_add_drain_per_cpu);
> +		schedule_work_on(cpu, work);
> +		cpumask_set_cpu(cpu, &has_work);
> +
> +}
> +#endif
> +
>  
>  void lru_add_drain_all(void)
>  {
> @@ -884,16 +904,11 @@ void lru_add_drain_all(void)
>  	cpumask_clear(&has_work);
>  
>  	for_each_online_cpu(cpu) {
> -		struct work_struct *work =
> &per_cpu(lru_add_drain_work, cpu);
> -
>  		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
>  		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu))
> ||
>  		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs
> , cpu)) ||
> -		    need_activate_page_drain(cpu)) {
> -			INIT_WORK(work, lru_add_drain_per_cpu);
> -			schedule_work_on(cpu, work);
> -			cpumask_set_cpu(cpu, &has_work);
> -		}
> +		    need_activate_page_drain(cpu))
> +				remote_lru_add_drain(cpu,
> &has_work);
>  	}
>  
>  	for_each_cpu(cpu, &has_work)
-- 
All Rights Reversed.


[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 473 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH RFC -rt] mm: perform lru_add_drain_all() remotely
  2016-04-28 22:47 ` Rik van Riel
@ 2016-04-29 12:19   ` Luiz Capitulino
  0 siblings, 0 replies; 3+ messages in thread
From: Luiz Capitulino @ 2016-04-29 12:19 UTC (permalink / raw)
  To: Rik van Riel; +Cc: linux-rt-users, bigeasy, tglx, srostedt, williams

On Thu, 28 Apr 2016 18:47:16 -0400
Rik van Riel <riel@redhat.com> wrote:

> On Thu, 2016-04-28 at 16:31 -0400, Luiz Capitulino wrote:
> > lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
> > on all CPUs that have non-empty LRU pagevecs and then waiting for
> > the scheduled work to complete. However, workqueue threads may never
> > have the chance to run on a CPU that's running a SCHED_FIFO task.
> > This causes lru_add_drain_all() to block forever.
> > 
> > This commit solves this problem by changing lru_add_drain_all()
> > to drain the LRU pagevecs of remote CPUs. This is done by grabbing
> > swapvec_lock and calling lru_add_drain_cpu().
> > 
> > PS: This is based on an idea and initial implementation by
> >     Rik van Riel.  
> 
> I wrote maybe half the code in this patch.

And it was the hard part, sorry for not adding your signed-off-by.

> It should
> probably have my signed-off-by line too :)
> 
> Anyway, the patch looks fine to me and seems to work.
> 
>   Signed-off-by: Rik van Riel <riel@redhat.com>
> > Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
> > ---
> >  include/linux/locallock.h | 27 +++++++++++++++++++++++++++
> >  mm/swap.c                 | 35 +++++++++++++++++++++++++----------
> >  2 files changed, 52 insertions(+), 10 deletions(-)
> > 
> > diff --git a/include/linux/locallock.h b/include/linux/locallock.h
> > index 6fe5928..2de478b 100644
> > --- a/include/linux/locallock.h
> > +++ b/include/linux/locallock.h
> > @@ -104,6 +104,17 @@ static inline void __local_unlock(struct
> > local_irq_lock *lv)
> >  		put_local_var(lvar);				
> > \
> >  	} while (0)
> >  
> > +#define local_lock_other_cpu(lvar, cpu)                         \
> > +	do {                                                    \
> > +		__local_lock(&per_cpu(lvar, cpu));              \
> > +	} while (0)
> > +
> > +#define local_unlock_other_cpu(lvar, cpu)                       \
> > +	do {                                                    \
> > +		__local_unlock(&per_cpu(lvar, cpu));            \
> > +	} while (0)
> > +
> > +
> >  static inline void __local_lock_irq(struct local_irq_lock *lv)
> >  {
> >  	spin_lock_irqsave(&lv->lock, lv->flags);
> > @@ -163,6 +174,22 @@ static inline int __local_lock_irqsave(struct
> > local_irq_lock *lv)
> >  		_flags = per_cpu(lvar, cpu).flags;			
> > \
> >  	} while (0)
> >  
> > +#define local_lock_irqsave_other_cpu(lvar, _flags, cpu)		
> > 	\
> > +	do {								
> > \
> > +		if (cpu == smp_processor_id())			
> > 	\
> > +			local_lock_irqsave(lvar, _flags);		
> > \
> > +		else							
> > \
> > +			local_lock_other_cpu(lvar, cpu);		
> > \
> > +	} while (0)
> > +
> > +#define local_unlock_irqrestore_other_cpu(lvar, _flags, cpu)	
> >         \
> > +	do {								
> > \
> > +		if (cpu == smp_processor_id())			
> > 	\
> > +			local_unlock_irqrestore(lvar, _flags);	
> > 	\
> > +		else							
> > \
> > +			local_unlock_other_cpu(lvar, cpu);		
> > \
> > +	} while (0)
> > +
> >  static inline int __local_unlock_irqrestore(struct local_irq_lock
> > *lv,
> >  					    unsigned long flags)
> >  {
> > diff --git a/mm/swap.c b/mm/swap.c
> > index ca194ae..9dc6956 100644
> > --- a/mm/swap.c
> > +++ b/mm/swap.c
> > @@ -821,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
> >  		unsigned long flags;
> >  
> >  		/* No harm done if a racing interrupt already did
> > this */
> > -		local_lock_irqsave(rotate_lock, flags);
> > +		local_lock_irqsave_other_cpu(rotate_lock, flags,
> > cpu);
> >  		pagevec_move_tail(pvec);
> > -		local_unlock_irqrestore(rotate_lock, flags);
> > +		local_unlock_irqrestore_other_cpu(rotate_lock,
> > flags, cpu);
> >  	}
> >  
> >  	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
> > @@ -866,12 +866,32 @@ void lru_add_drain(void)
> >  	local_unlock_cpu(swapvec_lock);
> >  }
> >  
> > +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
> > +
> > +#ifdef CONFIG_PREEMPT_RT_BASE
> > +static inline void remote_lru_add_drain(int cpu, struct cpumask
> > *has_work)
> > +{
> > +	local_lock_other_cpu(swapvec_lock, cpu);
> > +	lru_add_drain_cpu(cpu);
> > +	local_unlock_other_cpu(swapvec_lock, cpu);
> > +}
> > +#else
> >  static void lru_add_drain_per_cpu(struct work_struct *dummy)
> >  {
> >  	lru_add_drain();
> >  }
> >  
> > -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
> > +static inline void remote_lru_add_drain(int cpu, struct cpumask
> > *has_work)
> > +{
> > +		struct work_struct *work =
> > &per_cpu(lru_add_drain_work, cpu);
> > +
> > +		INIT_WORK(work, lru_add_drain_per_cpu);
> > +		schedule_work_on(cpu, work);
> > +		cpumask_set_cpu(cpu, &has_work);
> > +
> > +}
> > +#endif
> > +
> >  
> >  void lru_add_drain_all(void)
> >  {
> > @@ -884,16 +904,11 @@ void lru_add_drain_all(void)
> >  	cpumask_clear(&has_work);
> >  
> >  	for_each_online_cpu(cpu) {
> > -		struct work_struct *work =
> > &per_cpu(lru_add_drain_work, cpu);
> > -
> >  		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
> >  		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu))
> > ||
> >  		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs
> > , cpu)) ||
> > -		    need_activate_page_drain(cpu)) {
> > -			INIT_WORK(work, lru_add_drain_per_cpu);
> > -			schedule_work_on(cpu, work);
> > -			cpumask_set_cpu(cpu, &has_work);
> > -		}
> > +		    need_activate_page_drain(cpu))
> > +				remote_lru_add_drain(cpu,
> > &has_work);
> >  	}
> >  
> >  	for_each_cpu(cpu, &has_work)  

--
To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2016-04-29 12:19 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-04-28 20:31 [PATCH RFC -rt] mm: perform lru_add_drain_all() remotely Luiz Capitulino
2016-04-28 22:47 ` Rik van Riel
2016-04-29 12:19   ` Luiz Capitulino

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.