kernel test robot reported some build warnings with W=1 as below, kernel/sched/fair.c:892:34: warning: variable 'stats' set but not used kernel/sched/core.c:10238:42: warning: variable 'stats' set but not used kernel/sched/fair.c:893:29: warning: variable 'p' set but not used kernel/sched/rt.c:1292:29: warning: variable 'p' set but not used kernel/sched/deadline.c:1486:34: warning: variable 'stats' set but not used arch/nds32/include/asm/current.h:10:13: warning: variable '$r25' set but not used These warnings happen when CONFIG_SCHEDSTATS is not set, in which case the schedstat_* functions will be none. We should add '__maybe_unused' to fix it Reported-by: kernel test robot Signed-off-by: Yafang Shao --- kernel/sched/core.c | 4 ++-- kernel/sched/deadline.c | 6 +++--- kernel/sched/fair.c | 12 ++++++------ kernel/sched/rt.c | 6 +++--- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 85e212d3c7ff..7c3476f284d8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3462,7 +3462,7 @@ static inline bool rq_has_pinned_tasks(struct rq *rq) static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags) { - struct rq *rq; + struct rq __maybe_unused *rq; if (!schedstat_enabled()) return; @@ -10235,7 +10235,7 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); if (schedstat_enabled() && tg != &root_task_group) { - struct sched_statistics *stats; + struct sched_statistics __maybe_unused *stats; u64 ws = 0; int i; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d2c072b0ef01..f6ececf11725 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1483,7 +1483,7 @@ __schedstats_from_dl_se(struct sched_dl_entity *dl_se) static inline void update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; + struct sched_statistics __maybe_unused *stats; if (!schedstat_enabled()) return; @@ -1495,7 +1495,7 @@ update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) static inline void update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; + struct sched_statistics __maybe_unused *stats; if (!schedstat_enabled()) return; @@ -1507,7 +1507,7 @@ update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) static inline void update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; + struct sched_statistics __maybe_unused *stats; if (!schedstat_enabled()) return; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ea13d3d9e540..4d985d0364bc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -889,8 +889,8 @@ static void update_curr_fair(struct rq *rq) static inline void update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; - struct task_struct *p = NULL; + struct sched_statistics __maybe_unused *stats; + struct task_struct __maybe_unused *p = NULL; if (!schedstat_enabled()) return; @@ -906,8 +906,8 @@ update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) static inline void update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; - struct task_struct *p = NULL; + struct sched_statistics __maybe_unused *stats; + struct task_struct __maybe_unused *p = NULL; if (!schedstat_enabled()) return; @@ -932,8 +932,8 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) static inline void update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; - struct task_struct *tsk = NULL; + struct sched_statistics __maybe_unused *stats; + struct task_struct __maybe_unused *tsk = NULL; if (!schedstat_enabled()) return; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index bb945f8faeca..e19d1e7190cc 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1288,8 +1288,8 @@ __schedstats_from_rt_se(struct sched_rt_entity *rt_se) static inline void update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) { + struct task_struct __maybe_unused *p = NULL; struct sched_statistics *stats; - struct task_struct *p = NULL; if (!schedstat_enabled()) return; @@ -1307,8 +1307,8 @@ update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) static inline void update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) { + struct task_struct __maybe_unused *p = NULL; struct sched_statistics *stats; - struct task_struct *p = NULL; if (!schedstat_enabled()) return; @@ -1337,8 +1337,8 @@ update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, static inline void update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) { + struct task_struct __maybe_unused *p = NULL; struct sched_statistics *stats; - struct task_struct *p = NULL; if (!schedstat_enabled()) return; -- 2.17.1