linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] sched,fair: use list_for_each_entry() in print_cfs_stats()
@ 2020-09-25 19:10 Hui Su
  2020-09-29  9:56 ` Dietmar Eggemann
  0 siblings, 1 reply; 3+ messages in thread
From: Hui Su @ 2020-09-25 19:10 UTC (permalink / raw)
  To: mingo, peterz, juri.lelli, vincent.guittot, dietmar.eggemann,
	rostedt, bsegall, mgorman, linux-kernel

Macro for_each_leaf_cfs_rq_safe() use list_for_each_entry_safe(),
which can against removal of list entry, but we only
print the cfs_rq data and won't remove the list entry in
print_cfs_stats().

Thus, add macro for_each_leaf_cfs_rq() based on
list_for_each_entry(), and use for_each_leaf_cfs_rq() in
print_cfs_stats().

Signed-off-by: Hui Su <sh_def@163.com>
---
 kernel/sched/fair.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1a68a0536add..d40dfb4349b0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -391,11 +391,16 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
 	SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
 }
 
-/* Iterate thr' all leaf cfs_rq's on a runqueue */
+/* Iterate thr' all leaf cfs_rq's on a runqueue safely */
 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
 	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
 				 leaf_cfs_rq_list)
 
+/* Iterate thr' all leaf cfs_rq's on a runqueue */
+#define for_each_leaf_cfs_rq(rq, cfs_rq)			\
+	list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list,	\
+				 leaf_cfs_rq_list)
+
 /* Do the two (enqueued) entities belong to the same group ? */
 static inline struct cfs_rq *
 is_same_group(struct sched_entity *se, struct sched_entity *pse)
@@ -11185,10 +11190,10 @@ const struct sched_class fair_sched_class
 #ifdef CONFIG_SCHED_DEBUG
 void print_cfs_stats(struct seq_file *m, int cpu)
 {
-	struct cfs_rq *cfs_rq, *pos;
+	struct cfs_rq *cfs_rq;
 
 	rcu_read_lock();
-	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
+	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
 		print_cfs_rq(m, cpu, cfs_rq);
 	rcu_read_unlock();
 }
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] sched,fair: use list_for_each_entry() in print_cfs_stats()
  2020-09-25 19:10 [PATCH] sched,fair: use list_for_each_entry() in print_cfs_stats() Hui Su
@ 2020-09-29  9:56 ` Dietmar Eggemann
  2020-10-08 12:37   ` Hui Su
  0 siblings, 1 reply; 3+ messages in thread
From: Dietmar Eggemann @ 2020-09-29  9:56 UTC (permalink / raw)
  To: Hui Su, mingo, peterz, juri.lelli, vincent.guittot, rostedt,
	bsegall, mgorman, linux-kernel

On 25/09/2020 21:10, Hui Su wrote:
> Macro for_each_leaf_cfs_rq_safe() use list_for_each_entry_safe(),
> which can against removal of list entry, but we only
> print the cfs_rq data and won't remove the list entry in
> print_cfs_stats().
> 
> Thus, add macro for_each_leaf_cfs_rq() based on
> list_for_each_entry(), and use for_each_leaf_cfs_rq() in
> print_cfs_stats().
> 
> Signed-off-by: Hui Su <sh_def@163.com>
> ---
>  kernel/sched/fair.c | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 1a68a0536add..d40dfb4349b0 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -391,11 +391,16 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
>  	SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
>  }
>  
> -/* Iterate thr' all leaf cfs_rq's on a runqueue */
> +/* Iterate thr' all leaf cfs_rq's on a runqueue safely */
>  #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
>  	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
>  				 leaf_cfs_rq_list)
>  
> +/* Iterate thr' all leaf cfs_rq's on a runqueue */
> +#define for_each_leaf_cfs_rq(rq, cfs_rq)			\
> +	list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list,	\
> +				 leaf_cfs_rq_list)
> +
>  /* Do the two (enqueued) entities belong to the same group ? */
>  static inline struct cfs_rq *
>  is_same_group(struct sched_entity *se, struct sched_entity *pse)
> @@ -11185,10 +11190,10 @@ const struct sched_class fair_sched_class
>  #ifdef CONFIG_SCHED_DEBUG
>  void print_cfs_stats(struct seq_file *m, int cpu)
>  {
> -	struct cfs_rq *cfs_rq, *pos;
> +	struct cfs_rq *cfs_rq;
>  
>  	rcu_read_lock();
> -	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
> +	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
>  		print_cfs_rq(m, cpu, cfs_rq);
>  	rcu_read_unlock();
>  }

IMHO, for_each_leaf_cfs_rq_safe() was introduced in commit a9e7f6544b9c
("sched/fair: Fix O(nr_cgroups) in load balance path") and reintroduced
again by commit 039ae8bcf7a5 ("sched/fair: Fix O(nr_cgroups) in the load
balancing path") to prevent races between tasks running
print_cfs_stats() and today's  __update_blocked_fair() ->
list_del_leaf_cfs_rq(cfs_rq).

Your patch doesn't compile w/ !CONFIG_FAIR_GROUP_SCHED.

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] sched,fair: use list_for_each_entry() in print_cfs_stats()
  2020-09-29  9:56 ` Dietmar Eggemann
@ 2020-10-08 12:37   ` Hui Su
  0 siblings, 0 replies; 3+ messages in thread
From: Hui Su @ 2020-10-08 12:37 UTC (permalink / raw)
  To: Dietmar Eggemann; +Cc: mingo, linux-kernel, peterz, juri.lelli

On Tue, Sep 29, 2020 at 11:56:42AM +0200, Dietmar Eggemann wrote:
> On 25/09/2020 21:10, Hui Su wrote:
> > Macro for_each_leaf_cfs_rq_safe() use list_for_each_entry_safe(),
> > which can against removal of list entry, but we only
> > print the cfs_rq data and won't remove the list entry in
> > print_cfs_stats().
> > 
> > Thus, add macro for_each_leaf_cfs_rq() based on
> > list_for_each_entry(), and use for_each_leaf_cfs_rq() in
> > print_cfs_stats().
> > 
> > Signed-off-by: Hui Su <sh_def@163.com>
> > ---
> >  kernel/sched/fair.c | 11 ++++++++---
> >  1 file changed, 8 insertions(+), 3 deletions(-)
> > 
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 1a68a0536add..d40dfb4349b0 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -391,11 +391,16 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
> >  	SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
> >  }
> >  
> > -/* Iterate thr' all leaf cfs_rq's on a runqueue */
> > +/* Iterate thr' all leaf cfs_rq's on a runqueue safely */
> >  #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
> >  	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
> >  				 leaf_cfs_rq_list)
> >  
> > +/* Iterate thr' all leaf cfs_rq's on a runqueue */
> > +#define for_each_leaf_cfs_rq(rq, cfs_rq)			\
> > +	list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list,	\
> > +				 leaf_cfs_rq_list)
> > +
> >  /* Do the two (enqueued) entities belong to the same group ? */
> >  static inline struct cfs_rq *
> >  is_same_group(struct sched_entity *se, struct sched_entity *pse)
> > @@ -11185,10 +11190,10 @@ const struct sched_class fair_sched_class
> >  #ifdef CONFIG_SCHED_DEBUG
> >  void print_cfs_stats(struct seq_file *m, int cpu)
> >  {
> > -	struct cfs_rq *cfs_rq, *pos;
> > +	struct cfs_rq *cfs_rq;
> >  
> >  	rcu_read_lock();
> > -	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
> > +	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
> >  		print_cfs_rq(m, cpu, cfs_rq);
> >  	rcu_read_unlock();
> >  }
> 
> IMHO, for_each_leaf_cfs_rq_safe() was introduced in commit a9e7f6544b9c
> ("sched/fair: Fix O(nr_cgroups) in load balance path") and reintroduced
> again by commit 039ae8bcf7a5 ("sched/fair: Fix O(nr_cgroups) in the load
> balancing path") to prevent races between tasks running
> print_cfs_stats() and today's  __update_blocked_fair() ->
> list_del_leaf_cfs_rq(cfs_rq).
> 
> Your patch doesn't compile w/ !CONFIG_FAIR_GROUP_SCHED.

Thanks for your explanation, please ignore this change.


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-10-08 12:37 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-25 19:10 [PATCH] sched,fair: use list_for_each_entry() in print_cfs_stats() Hui Su
2020-09-29  9:56 ` Dietmar Eggemann
2020-10-08 12:37   ` Hui Su

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).