All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch based tip/sched/core] sched/debug: remove CONFIG_FAIR_GROUP_SCHED mask
@ 2013-06-28 11:10 Alex Shi
  2013-06-28 16:06 ` Kamalesh Babulal
  2013-06-28 16:09 ` [tip:sched/core] sched/debug: Remove " tip-bot for Alex Shi
  0 siblings, 2 replies; 3+ messages in thread
From: Alex Shi @ 2013-06-28 11:10 UTC (permalink / raw)
  To: mingo, peterz; +Cc: linux-kernel, pjt, kamalesh

Now we are using runnable load avg in sched balance. So don't need
keep marking them under CONFIG_FAIR_GROUP_SCHED. Also align the code
style to #ifdef instead of #if defined() and reorder the tg output info.

Signed-off-by: Alex Shi <alex.shi@intel.com>
---
 kernel/sched/debug.c |   10 ++++++----
 1 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 62632098..3d1fe86 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -209,22 +209,24 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 			cfs_rq->nr_spread_over);
 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
-#ifdef CONFIG_FAIR_GROUP_SCHED
 #ifdef CONFIG_SMP
 	SEQ_printf(m, "  .%-30s: %ld\n", "runnable_load_avg",
 			cfs_rq->runnable_load_avg);
 	SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
 			cfs_rq->blocked_load_avg);
-	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
-			atomic_long_read(&cfs_rq->tg->load_avg));
+#ifdef CONFIG_FAIR_GROUP_SCHED
 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_contrib",
 			cfs_rq->tg_load_contrib);
 	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
 			cfs_rq->tg_runnable_contrib);
+	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
+			atomic_long_read(&cfs_rq->tg->load_avg));
 	SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
 			atomic_read(&cfs_rq->tg->runnable_avg));
 #endif
+#endif
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
 #endif
 }
@@ -566,7 +568,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 		   "nr_involuntary_switches", (long long)p->nivcsw);
 
 	P(se.load.weight);
-#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+#ifdef CONFIG_SMP
 	P(se.avg.runnable_avg_sum);
 	P(se.avg.runnable_avg_period);
 	P(se.avg.load_avg_contrib);
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2013-06-28 16:11 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-06-28 11:10 [patch based tip/sched/core] sched/debug: remove CONFIG_FAIR_GROUP_SCHED mask Alex Shi
2013-06-28 16:06 ` Kamalesh Babulal
2013-06-28 16:09 ` [tip:sched/core] sched/debug: Remove " tip-bot for Alex Shi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.