Hi Xi, I love your patch! Yet something to improve: [auto build test ERROR on tip/sched/core] [also build test ERROR on tip/auto-latest linus/master v5.8-rc7] [cannot apply to block/for-next cgroup/for-next next-20200727] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Xi-Wang/sched-Make-select_idle_sibling-search-domain-configurable/20200728-150328 base: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 13efa616124f7eec7d6a58adeeef31864aa03879 config: arc-allyesconfig (attached as .config) compiler: arc-elf-gcc (GCC) 9.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arc If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All error/warnings (new ones prefixed by >>): kernel/sched/fair.c: In function 'select_idle_sibling': >> kernel/sched/fair.c:6285:27: error: passing argument 2 of 'select_idle_core' from incompatible pointer type [-Werror=incompatible-pointer-types] 6285 | r = select_idle_core(p, cpus, target); | ^~~~ | | | struct cpumask * kernel/sched/fair.c:6096:80: note: expected 'struct sched_domain *' but argument is of type 'struct cpumask *' 6096 | static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) | ~~~~~~~~~~~~~~~~~~~~~^~ kernel/sched/fair.c: At top level: >> kernel/sched/fair.c:6754:5: warning: no previous prototype for 'proc_sched_wake_idle_domain_handler' [-Wmissing-prototypes] 6754 | int proc_sched_wake_idle_domain_handler(struct ctl_table *table, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In file included from include/linux/perf_event.h:25, from include/linux/trace_events.h:10, from include/trace/syscall.h:7, from include/linux/syscalls.h:85, from kernel/sched/sched.h:65, from kernel/sched/fair.c:23: arch/arc/include/asm/perf_event.h:126:23: warning: 'arc_pmu_cache_map' defined but not used [-Wunused-const-variable=] 126 | static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | ^~~~~~~~~~~~~~~~~ arch/arc/include/asm/perf_event.h:91:27: warning: 'arc_pmu_ev_hw_map' defined but not used [-Wunused-const-variable=] 91 | static const char * const arc_pmu_ev_hw_map[] = { | ^~~~~~~~~~~~~~~~~ cc1: some warnings being treated as errors -- >> kernel/cgroup/cpuset.c:1018:6: warning: no previous prototype for '__rebuild_sched_domains' [-Wmissing-prototypes] 1018 | void __rebuild_sched_domains(int force_update) | ^~~~~~~~~~~~~~~~~~~~~~~ vim +/select_idle_core +6285 kernel/sched/fair.c 6196 6197 /* 6198 * Try and locate an idle core/thread in the sis domain. 6199 */ 6200 static int select_idle_sibling(struct task_struct *p, int prev, int target) 6201 { 6202 struct sched_domain *sd_asym; 6203 struct sched_domain *sd[2]; 6204 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 6205 int i, r, recent_used_cpu; 6206 6207 /* 6208 * For asymmetric CPU capacity systems, our domain of interest is 6209 * sd_asym_cpucapacity rather than sd_sis. 6210 */ 6211 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 6212 sd_asym = rcu_dereference(per_cpu(sd_asym_cpucapacity, target)); 6213 /* 6214 * On an asymmetric CPU capacity system where an exclusive 6215 * cpuset defines a symmetric island (i.e. one unique 6216 * capacity_orig value through the cpuset), the key will be set 6217 * but the CPUs within that cpuset will not have a domain with 6218 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric 6219 * capacity path. 6220 */ 6221 if (!sd_asym) 6222 goto symmetric; 6223 6224 i = select_idle_capacity(p, sd_asym, target); 6225 return ((unsigned)i < nr_cpumask_bits) ? i : target; 6226 } 6227 6228 symmetric: 6229 if (available_idle_cpu(target) || sched_idle_cpu(target)) 6230 return target; 6231 6232 /* 6233 * If the previous CPU is cache affine and idle, don't be stupid: 6234 */ 6235 if (prev != target && cpus_share_sis(prev, target) && 6236 (available_idle_cpu(prev) || sched_idle_cpu(prev))) 6237 return prev; 6238 6239 /* 6240 * Allow a per-cpu kthread to stack with the wakee if the 6241 * kworker thread and the tasks previous CPUs are the same. 6242 * The assumption is that the wakee queued work for the 6243 * per-cpu kthread that is now complete and the wakeup is 6244 * essentially a sync wakeup. An obvious example of this 6245 * pattern is IO completions. 6246 */ 6247 if (is_per_cpu_kthread(current) && 6248 prev == smp_processor_id() && 6249 this_rq()->nr_running <= 1) { 6250 return prev; 6251 } 6252 6253 /* Check a recently used CPU as a potential idle candidate: */ 6254 recent_used_cpu = p->recent_used_cpu; 6255 if (recent_used_cpu != prev && 6256 recent_used_cpu != target && 6257 cpus_share_sis(recent_used_cpu, target) && 6258 (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && 6259 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) { 6260 /* 6261 * Replace recent_used_cpu with prev as it is a potential 6262 * candidate for the next wake: 6263 */ 6264 p->recent_used_cpu = prev; 6265 return recent_used_cpu; 6266 } 6267 6268 for (i = 0; ; i++) { 6269 if (i == 0) { 6270 sd[0] = rcu_dereference(per_cpu(sd_sis_pre, target)); 6271 if (!sd[0]) 6272 continue; 6273 cpumask_and(cpus, sched_domain_span(sd[0]), p->cpus_ptr); 6274 } else if (i == 1) { 6275 sd[1] = rcu_dereference(per_cpu(sd_sis, target)); 6276 if (!sd[1]) 6277 continue; 6278 cpumask_and(cpus, sched_domain_span(sd[1]), p->cpus_ptr); 6279 if (sd[0]) 6280 cpumask_andnot(cpus, cpus, sched_domain_span(sd[0])); 6281 } else { 6282 break; 6283 } 6284 > 6285 r = select_idle_core(p, cpus, target); 6286 if ((unsigned)r < nr_cpumask_bits) 6287 return r; 6288 6289 r = select_idle_cpu(p, cpus, (i == 1), sd[i]->span_weight, target); 6290 if ((unsigned)r < nr_cpumask_bits) 6291 return r; 6292 6293 r = select_idle_smt(p, target); 6294 if ((unsigned)r < nr_cpumask_bits) 6295 return r; 6296 } 6297 6298 return target; 6299 } 6300 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org