linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] sched/pelt: fix warning and cleanup irq pelt config
@ 2018-09-25  9:17 Vincent Guittot
  2018-10-02  7:09 ` Vincent Guittot
  2018-10-02 10:06 ` [tip:sched/core] sched/pelt: Fix warning and clean up IRQ PELT config tip-bot for Vincent Guittot
  0 siblings, 2 replies; 3+ messages in thread
From: Vincent Guittot @ 2018-09-25  9:17 UTC (permalink / raw)
  To: peterz, mingo, linux-kernel
  Cc: miguel.ojeda.sandonis, bp, dou_liyang, Vincent Guittot

Create a config for enabling irq load tracking in the scheduler.
irq load tracking is useful only when irq or paravirtual time is
accounted but it's only possible with SMP for now.

Also use __maybe_unused to remove the compilation warning in
update_rq_clock_task() that has been introduced by:
  commit 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")

Reported-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
Reported-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Suggested-by: Ingo Molnar <mingo@redhat.com>
Fixes: 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
 init/Kconfig         | 5 +++++
 kernel/sched/core.c  | 7 +++----
 kernel/sched/fair.c  | 2 +-
 kernel/sched/pelt.c  | 2 +-
 kernel/sched/pelt.h  | 2 +-
 kernel/sched/sched.h | 5 ++---
 6 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/init/Kconfig b/init/Kconfig
index 1e234e2..317d5cc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
 
 	  If in doubt, say N here.
 
+config HAVE_SCHED_AVG_IRQ
+	def_bool y
+	depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
+	depends on SMP
+
 config BSD_PROCESS_ACCT
 	bool "BSD Process Accounting"
 	depends on MULTIUSER
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 625bc98..bf7b745 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
  * In theory, the compile should just see 0 here, and optimize out the call
  * to sched_rt_avg_update. But I don't trust it...
  */
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-	s64 steal = 0, irq_delta = 0;
-#endif
+	s64 __maybe_unused steal = 0, irq_delta = 0;
+
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
 
@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
 
 	rq->clock_task += delta;
 
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
 		update_irq_load_avg(rq, irq_delta + steal);
 #endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6bd142d..2c05aac 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7249,7 +7249,7 @@ static inline bool others_have_blocked(struct rq *rq)
 	if (READ_ONCE(rq->avg_dl.util_avg))
 		return true;
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 	if (READ_ONCE(rq->avg_irq.util_avg))
 		return true;
 #endif
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 35475c0..48a1264 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
 	return 0;
 }
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 /*
  * irq:
  *
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index d2894db..7e56b48 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 int update_irq_load_avg(struct rq *rq, u64 running);
 #else
 static inline int
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3a4ef8f..f3477e0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -861,8 +861,7 @@ struct rq {
 
 	struct sched_avg	avg_rt;
 	struct sched_avg	avg_dl;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-#define HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 	struct sched_avg	avg_irq;
 #endif
 	u64			idle_stamp;
@@ -2222,7 +2221,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
 }
 #endif
 
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 static inline unsigned long cpu_util_irq(struct rq *rq)
 {
 	return rq->avg_irq.util_avg;
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] sched/pelt: fix warning and cleanup irq pelt config
  2018-09-25  9:17 [PATCH] sched/pelt: fix warning and cleanup irq pelt config Vincent Guittot
@ 2018-10-02  7:09 ` Vincent Guittot
  2018-10-02 10:06 ` [tip:sched/core] sched/pelt: Fix warning and clean up IRQ PELT config tip-bot for Vincent Guittot
  1 sibling, 0 replies; 3+ messages in thread
From: Vincent Guittot @ 2018-10-02  7:09 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, linux-kernel
  Cc: Miguel Ojeda, Borislav Petkov, dou_liyang

On Tue, 25 Sep 2018 at 11:17, Vincent Guittot
<vincent.guittot@linaro.org> wrote:
>
> Create a config for enabling irq load tracking in the scheduler.
> irq load tracking is useful only when irq or paravirtual time is
> accounted but it's only possible with SMP for now.
>
> Also use __maybe_unused to remove the compilation warning in
> update_rq_clock_task() that has been introduced by:
>   commit 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")

Gentle ping.


>
> Reported-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
> Reported-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
> Suggested-by: Ingo Molnar <mingo@redhat.com>
> Fixes: 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
>  init/Kconfig         | 5 +++++
>  kernel/sched/core.c  | 7 +++----
>  kernel/sched/fair.c  | 2 +-
>  kernel/sched/pelt.c  | 2 +-
>  kernel/sched/pelt.h  | 2 +-
>  kernel/sched/sched.h | 5 ++---
>  6 files changed, 13 insertions(+), 10 deletions(-)
>
> diff --git a/init/Kconfig b/init/Kconfig
> index 1e234e2..317d5cc 100644
> --- a/init/Kconfig
> +++ b/init/Kconfig
> @@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
>
>           If in doubt, say N here.
>
> +config HAVE_SCHED_AVG_IRQ
> +       def_bool y
> +       depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
> +       depends on SMP
> +
>  config BSD_PROCESS_ACCT
>         bool "BSD Process Accounting"
>         depends on MULTIUSER
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 625bc98..bf7b745 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
>   * In theory, the compile should just see 0 here, and optimize out the call
>   * to sched_rt_avg_update. But I don't trust it...
>   */
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> -       s64 steal = 0, irq_delta = 0;
> -#endif
> +       s64 __maybe_unused steal = 0, irq_delta = 0;
> +
>  #ifdef CONFIG_IRQ_TIME_ACCOUNTING
>         irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
>
> @@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
>
>         rq->clock_task += delta;
>
> -#ifdef HAVE_SCHED_AVG_IRQ
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
>         if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
>                 update_irq_load_avg(rq, irq_delta + steal);
>  #endif
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 6bd142d..2c05aac 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -7249,7 +7249,7 @@ static inline bool others_have_blocked(struct rq *rq)
>         if (READ_ONCE(rq->avg_dl.util_avg))
>                 return true;
>
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
>         if (READ_ONCE(rq->avg_irq.util_avg))
>                 return true;
>  #endif
> diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
> index 35475c0..48a1264 100644
> --- a/kernel/sched/pelt.c
> +++ b/kernel/sched/pelt.c
> @@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
>         return 0;
>  }
>
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
>  /*
>   * irq:
>   *
> diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
> index d2894db..7e56b48 100644
> --- a/kernel/sched/pelt.h
> +++ b/kernel/sched/pelt.h
> @@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
>  int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
>  int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
>
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
>  int update_irq_load_avg(struct rq *rq, u64 running);
>  #else
>  static inline int
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 3a4ef8f..f3477e0 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -861,8 +861,7 @@ struct rq {
>
>         struct sched_avg        avg_rt;
>         struct sched_avg        avg_dl;
> -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
> -#define HAVE_SCHED_AVG_IRQ
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
>         struct sched_avg        avg_irq;
>  #endif
>         u64                     idle_stamp;
> @@ -2222,7 +2221,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
>  }
>  #endif
>
> -#ifdef HAVE_SCHED_AVG_IRQ
> +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
>  static inline unsigned long cpu_util_irq(struct rq *rq)
>  {
>         return rq->avg_irq.util_avg;
> --
> 2.7.4
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [tip:sched/core] sched/pelt: Fix warning and clean up IRQ PELT config
  2018-09-25  9:17 [PATCH] sched/pelt: fix warning and cleanup irq pelt config Vincent Guittot
  2018-10-02  7:09 ` Vincent Guittot
@ 2018-10-02 10:06 ` tip-bot for Vincent Guittot
  1 sibling, 0 replies; 3+ messages in thread
From: tip-bot for Vincent Guittot @ 2018-10-02 10:06 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: mingo, miguel.ojeda.sandonis, peterz, hpa, torvalds, tglx, mingo,
	vincent.guittot, douly.fnst, linux-kernel

Commit-ID:  11d4afd4ff667f9b6178ee8c142c36cb78bd84db
Gitweb:     https://git.kernel.org/tip/11d4afd4ff667f9b6178ee8c142c36cb78bd84db
Author:     Vincent Guittot <vincent.guittot@linaro.org>
AuthorDate: Tue, 25 Sep 2018 11:17:42 +0200
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 2 Oct 2018 09:45:00 +0200

sched/pelt: Fix warning and clean up IRQ PELT config

Create a config for enabling irq load tracking in the scheduler.
irq load tracking is useful only when irq or paravirtual time is
accounted but it's only possible with SMP for now.

Also use __maybe_unused to remove the compilation warning in
update_rq_clock_task() that has been introduced by:

  2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")

Suggested-by: Ingo Molnar <mingo@redhat.com>
Reported-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
Reported-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bp@alien8.de
Cc: dou_liyang@163.com
Fixes: 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()")
Link: http://lkml.kernel.org/r/1537867062-27285-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 init/Kconfig         | 5 +++++
 kernel/sched/core.c  | 7 +++----
 kernel/sched/fair.c  | 2 +-
 kernel/sched/pelt.c  | 2 +-
 kernel/sched/pelt.h  | 2 +-
 kernel/sched/sched.h | 5 ++---
 6 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/init/Kconfig b/init/Kconfig
index 1e234e2f1cba..317d5ccb5191 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
 
 	  If in doubt, say N here.
 
+config HAVE_SCHED_AVG_IRQ
+	def_bool y
+	depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
+	depends on SMP
+
 config BSD_PROCESS_ACCT
 	bool "BSD Process Accounting"
 	depends on MULTIUSER
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ad97f3ba5ec5..f2caf1bae4a3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
  * In theory, the compile should just see 0 here, and optimize out the call
  * to sched_rt_avg_update. But I don't trust it...
  */
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-	s64 steal = 0, irq_delta = 0;
-#endif
+	s64 __maybe_unused steal = 0, irq_delta = 0;
+
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
 
@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
 
 	rq->clock_task += delta;
 
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
 		update_irq_load_avg(rq, irq_delta + steal);
 #endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1d92ed2eca8b..d59307ecd67d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7317,7 +7317,7 @@ static inline bool others_have_blocked(struct rq *rq)
 	if (READ_ONCE(rq->avg_dl.util_avg))
 		return true;
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 	if (READ_ONCE(rq->avg_irq.util_avg))
 		return true;
 #endif
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 35475c0c5419..48a126486435 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
 	return 0;
 }
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 /*
  * irq:
  *
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index d2894db28955..7e56b489ff32 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
 
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 int update_irq_load_avg(struct rq *rq, u64 running);
 #else
 static inline int
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 632804fa0b12..798b1afd5092 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -862,8 +862,7 @@ struct rq {
 
 	struct sched_avg	avg_rt;
 	struct sched_avg	avg_dl;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-#define HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 	struct sched_avg	avg_irq;
 #endif
 	u64			idle_stamp;
@@ -2223,7 +2222,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
 }
 #endif
 
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 static inline unsigned long cpu_util_irq(struct rq *rq)
 {
 	return rq->avg_irq.util_avg;

^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2018-10-02 10:07 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-25  9:17 [PATCH] sched/pelt: fix warning and cleanup irq pelt config Vincent Guittot
2018-10-02  7:09 ` Vincent Guittot
2018-10-02 10:06 ` [tip:sched/core] sched/pelt: Fix warning and clean up IRQ PELT config tip-bot for Vincent Guittot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).