All of lore.kernel.org
 help / color / mirror / Atom feed
From: Luiz Capitulino <lcapitulino@redhat.com>
To: Rik van Riel <riel@redhat.com>
Cc: linux-rt-users@vger.kernel.org, bigeasy@linutronix.de,
	tglx@linutronix.de, srostedt@redhat.com, williams@redhat.com
Subject: Re: [PATCH RFC -rt] mm: perform lru_add_drain_all() remotely
Date: Fri, 29 Apr 2016 08:19:50 -0400	[thread overview]
Message-ID: <20160429081950.0d8c5afd@redhat.com> (raw)
In-Reply-To: <1461883636.13397.58.camel@redhat.com>

On Thu, 28 Apr 2016 18:47:16 -0400
Rik van Riel <riel@redhat.com> wrote:

> On Thu, 2016-04-28 at 16:31 -0400, Luiz Capitulino wrote:
> > lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
> > on all CPUs that have non-empty LRU pagevecs and then waiting for
> > the scheduled work to complete. However, workqueue threads may never
> > have the chance to run on a CPU that's running a SCHED_FIFO task.
> > This causes lru_add_drain_all() to block forever.
> > 
> > This commit solves this problem by changing lru_add_drain_all()
> > to drain the LRU pagevecs of remote CPUs. This is done by grabbing
> > swapvec_lock and calling lru_add_drain_cpu().
> > 
> > PS: This is based on an idea and initial implementation by
> >     Rik van Riel.  
> 
> I wrote maybe half the code in this patch.

And it was the hard part, sorry for not adding your signed-off-by.

> It should
> probably have my signed-off-by line too :)
> 
> Anyway, the patch looks fine to me and seems to work.
> 
>   Signed-off-by: Rik van Riel <riel@redhat.com>
> > Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
> > ---
> >  include/linux/locallock.h | 27 +++++++++++++++++++++++++++
> >  mm/swap.c                 | 35 +++++++++++++++++++++++++----------
> >  2 files changed, 52 insertions(+), 10 deletions(-)
> > 
> > diff --git a/include/linux/locallock.h b/include/linux/locallock.h
> > index 6fe5928..2de478b 100644
> > --- a/include/linux/locallock.h
> > +++ b/include/linux/locallock.h
> > @@ -104,6 +104,17 @@ static inline void __local_unlock(struct
> > local_irq_lock *lv)
> >  		put_local_var(lvar);				
> > \
> >  	} while (0)
> >  
> > +#define local_lock_other_cpu(lvar, cpu)                         \
> > +	do {                                                    \
> > +		__local_lock(&per_cpu(lvar, cpu));              \
> > +	} while (0)
> > +
> > +#define local_unlock_other_cpu(lvar, cpu)                       \
> > +	do {                                                    \
> > +		__local_unlock(&per_cpu(lvar, cpu));            \
> > +	} while (0)
> > +
> > +
> >  static inline void __local_lock_irq(struct local_irq_lock *lv)
> >  {
> >  	spin_lock_irqsave(&lv->lock, lv->flags);
> > @@ -163,6 +174,22 @@ static inline int __local_lock_irqsave(struct
> > local_irq_lock *lv)
> >  		_flags = per_cpu(lvar, cpu).flags;			
> > \
> >  	} while (0)
> >  
> > +#define local_lock_irqsave_other_cpu(lvar, _flags, cpu)		
> > 	\
> > +	do {								
> > \
> > +		if (cpu == smp_processor_id())			
> > 	\
> > +			local_lock_irqsave(lvar, _flags);		
> > \
> > +		else							
> > \
> > +			local_lock_other_cpu(lvar, cpu);		
> > \
> > +	} while (0)
> > +
> > +#define local_unlock_irqrestore_other_cpu(lvar, _flags, cpu)	
> >         \
> > +	do {								
> > \
> > +		if (cpu == smp_processor_id())			
> > 	\
> > +			local_unlock_irqrestore(lvar, _flags);	
> > 	\
> > +		else							
> > \
> > +			local_unlock_other_cpu(lvar, cpu);		
> > \
> > +	} while (0)
> > +
> >  static inline int __local_unlock_irqrestore(struct local_irq_lock
> > *lv,
> >  					    unsigned long flags)
> >  {
> > diff --git a/mm/swap.c b/mm/swap.c
> > index ca194ae..9dc6956 100644
> > --- a/mm/swap.c
> > +++ b/mm/swap.c
> > @@ -821,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
> >  		unsigned long flags;
> >  
> >  		/* No harm done if a racing interrupt already did
> > this */
> > -		local_lock_irqsave(rotate_lock, flags);
> > +		local_lock_irqsave_other_cpu(rotate_lock, flags,
> > cpu);
> >  		pagevec_move_tail(pvec);
> > -		local_unlock_irqrestore(rotate_lock, flags);
> > +		local_unlock_irqrestore_other_cpu(rotate_lock,
> > flags, cpu);
> >  	}
> >  
> >  	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
> > @@ -866,12 +866,32 @@ void lru_add_drain(void)
> >  	local_unlock_cpu(swapvec_lock);
> >  }
> >  
> > +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
> > +
> > +#ifdef CONFIG_PREEMPT_RT_BASE
> > +static inline void remote_lru_add_drain(int cpu, struct cpumask
> > *has_work)
> > +{
> > +	local_lock_other_cpu(swapvec_lock, cpu);
> > +	lru_add_drain_cpu(cpu);
> > +	local_unlock_other_cpu(swapvec_lock, cpu);
> > +}
> > +#else
> >  static void lru_add_drain_per_cpu(struct work_struct *dummy)
> >  {
> >  	lru_add_drain();
> >  }
> >  
> > -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
> > +static inline void remote_lru_add_drain(int cpu, struct cpumask
> > *has_work)
> > +{
> > +		struct work_struct *work =
> > &per_cpu(lru_add_drain_work, cpu);
> > +
> > +		INIT_WORK(work, lru_add_drain_per_cpu);
> > +		schedule_work_on(cpu, work);
> > +		cpumask_set_cpu(cpu, &has_work);
> > +
> > +}
> > +#endif
> > +
> >  
> >  void lru_add_drain_all(void)
> >  {
> > @@ -884,16 +904,11 @@ void lru_add_drain_all(void)
> >  	cpumask_clear(&has_work);
> >  
> >  	for_each_online_cpu(cpu) {
> > -		struct work_struct *work =
> > &per_cpu(lru_add_drain_work, cpu);
> > -
> >  		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
> >  		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu))
> > ||
> >  		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs
> > , cpu)) ||
> > -		    need_activate_page_drain(cpu)) {
> > -			INIT_WORK(work, lru_add_drain_per_cpu);
> > -			schedule_work_on(cpu, work);
> > -			cpumask_set_cpu(cpu, &has_work);
> > -		}
> > +		    need_activate_page_drain(cpu))
> > +				remote_lru_add_drain(cpu,
> > &has_work);
> >  	}
> >  
> >  	for_each_cpu(cpu, &has_work)  

--
To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

      reply	other threads:[~2016-04-29 12:19 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-28 20:31 [PATCH RFC -rt] mm: perform lru_add_drain_all() remotely Luiz Capitulino
2016-04-28 22:47 ` Rik van Riel
2016-04-29 12:19   ` Luiz Capitulino [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160429081950.0d8c5afd@redhat.com \
    --to=lcapitulino@redhat.com \
    --cc=bigeasy@linutronix.de \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=riel@redhat.com \
    --cc=srostedt@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=williams@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.