* [mel:sched-imbalancespread-v3r1 5/5] kernel/sched/fair.c:8897:8: error: implicit declaration of function 'allow_numa_imbalance'
@ 2020-11-19 21:45 kernel test robot
0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2020-11-19 21:45 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 12993 bytes --]
tree: https://git.kernel.org/pub/scm/linux/kernel/git/mel/linux.git sched-imbalancespread-v3r1
head: 6c487a4a345505854f9a80fd57197b9b06cf2a48
commit: 6c487a4a345505854f9a80fd57197b9b06cf2a48 [5/5] sched: Limit the amount of NUMA imbalance that can exist at fork time
config: arm64-randconfig-r001-20201119 (attached as .config)
compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project b2613fb2f0f53691dd0211895afbb9413457fca7)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install arm64 cross compiling tool for clang build
# apt-get install binutils-aarch64-linux-gnu
# https://git.kernel.org/pub/scm/linux/kernel/git/mel/linux.git/commit/?id=6c487a4a345505854f9a80fd57197b9b06cf2a48
git remote add mel https://git.kernel.org/pub/scm/linux/kernel/git/mel/linux.git
git fetch --no-tags mel sched-imbalancespread-v3r1
git checkout 6c487a4a345505854f9a80fd57197b9b06cf2a48
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
kernel/sched/fair.c:5371:6: warning: no previous prototype for function 'init_cfs_bandwidth' [-Wmissing-prototypes]
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
^
kernel/sched/fair.c:5371:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
^
static
>> kernel/sched/fair.c:8897:8: error: implicit declaration of function 'allow_numa_imbalance' [-Werror,-Wimplicit-function-declaration]
if (allow_numa_imbalance(local_sgs.sum_nr_running, sd->span_weight))
^
>> kernel/sched/fair.c:9006:20: error: static declaration of 'allow_numa_imbalance' follows non-static declaration
static inline bool allow_numa_imbalance(int dst_running, int dst_weight)
^
kernel/sched/fair.c:8897:8: note: previous implicit declaration is here
if (allow_numa_imbalance(local_sgs.sum_nr_running, sd->span_weight))
^
kernel/sched/fair.c:11171:6: warning: no previous prototype for function 'free_fair_sched_group' [-Wmissing-prototypes]
void free_fair_sched_group(struct task_group *tg) { }
^
kernel/sched/fair.c:11171:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void free_fair_sched_group(struct task_group *tg) { }
^
static
kernel/sched/fair.c:11173:5: warning: no previous prototype for function 'alloc_fair_sched_group' [-Wmissing-prototypes]
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
^
kernel/sched/fair.c:11173:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
^
static
kernel/sched/fair.c:11178:6: warning: no previous prototype for function 'online_fair_sched_group' [-Wmissing-prototypes]
void online_fair_sched_group(struct task_group *tg) { }
^
kernel/sched/fair.c:11178:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void online_fair_sched_group(struct task_group *tg) { }
^
static
kernel/sched/fair.c:11180:6: warning: no previous prototype for function 'unregister_fair_sched_group' [-Wmissing-prototypes]
void unregister_fair_sched_group(struct task_group *tg) { }
^
kernel/sched/fair.c:11180:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void unregister_fair_sched_group(struct task_group *tg) { }
^
static
5 warnings and 2 errors generated.
vim +/allow_numa_imbalance +8897 kernel/sched/fair.c
8764
8765 /*
8766 * find_idlest_group() finds and returns the least busy CPU group within the
8767 * domain.
8768 *
8769 * Assumes p is allowed on at least one CPU in sd.
8770 */
8771 static struct sched_group *
8772 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
8773 {
8774 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
8775 struct sg_lb_stats local_sgs, tmp_sgs;
8776 struct sg_lb_stats *sgs;
8777 unsigned long imbalance;
8778 struct sg_lb_stats idlest_sgs = {
8779 .avg_load = UINT_MAX,
8780 .group_type = group_overloaded,
8781 };
8782
8783 do {
8784 int local_group;
8785
8786 /* Skip over this group if it has no CPUs allowed */
8787 if (!cpumask_intersects(sched_group_span(group),
8788 p->cpus_ptr))
8789 continue;
8790
8791 local_group = cpumask_test_cpu(this_cpu,
8792 sched_group_span(group));
8793
8794 if (local_group) {
8795 sgs = &local_sgs;
8796 local = group;
8797 } else {
8798 sgs = &tmp_sgs;
8799 }
8800
8801 update_sg_wakeup_stats(sd, group, sgs, p);
8802
8803 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
8804 idlest = group;
8805 idlest_sgs = *sgs;
8806 }
8807
8808 } while (group = group->next, group != sd->groups);
8809
8810
8811 /* There is no idlest group to push tasks to */
8812 if (!idlest)
8813 return NULL;
8814
8815 /* The local group has been skipped because of CPU affinity */
8816 if (!local)
8817 return idlest;
8818
8819 /*
8820 * If the local group is idler than the selected idlest group
8821 * don't try and push the task.
8822 */
8823 if (local_sgs.group_type < idlest_sgs.group_type)
8824 return NULL;
8825
8826 /*
8827 * If the local group is busier than the selected idlest group
8828 * try and push the task.
8829 */
8830 if (local_sgs.group_type > idlest_sgs.group_type)
8831 return idlest;
8832
8833 switch (local_sgs.group_type) {
8834 case group_overloaded:
8835 case group_fully_busy:
8836
8837 /* Calculate allowed imbalance based on load */
8838 imbalance = scale_load_down(NICE_0_LOAD) *
8839 (sd->imbalance_pct-100) / 100;
8840
8841 /*
8842 * When comparing groups across NUMA domains, it's possible for
8843 * the local domain to be very lightly loaded relative to the
8844 * remote domains but "imbalance" skews the comparison making
8845 * remote CPUs look much more favourable. When considering
8846 * cross-domain, add imbalance to the load on the remote node
8847 * and consider staying local.
8848 */
8849
8850 if ((sd->flags & SD_NUMA) &&
8851 ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
8852 return NULL;
8853
8854 /*
8855 * If the local group is less loaded than the selected
8856 * idlest group don't try and push any tasks.
8857 */
8858 if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
8859 return NULL;
8860
8861 if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
8862 return NULL;
8863 break;
8864
8865 case group_imbalanced:
8866 case group_asym_packing:
8867 /* Those type are not used in the slow wakeup path */
8868 return NULL;
8869
8870 case group_misfit_task:
8871 /* Select group with the highest max capacity */
8872 if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
8873 return NULL;
8874 break;
8875
8876 case group_has_spare:
8877 if (sd->flags & SD_NUMA) {
8878 #ifdef CONFIG_NUMA_BALANCING
8879 int idlest_cpu;
8880 /*
8881 * If there is spare capacity at NUMA, try to select
8882 * the preferred node
8883 */
8884 if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
8885 return NULL;
8886
8887 idlest_cpu = cpumask_first(sched_group_span(idlest));
8888 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
8889 return idlest;
8890 #endif
8891 /*
8892 * Otherwise, keep the task on this node to stay close
8893 * its wakeup source and improve locality. If there is
8894 * a real need of migration, periodic load balance will
8895 * take care of it.
8896 */
> 8897 if (allow_numa_imbalance(local_sgs.sum_nr_running, sd->span_weight))
8898 return NULL;
8899 }
8900
8901 /*
8902 * Select group with highest number of idle CPUs. We could also
8903 * compare the utilization which is more stable but it can end
8904 * up that the group has less spare capacity but finally more
8905 * idle CPUs which means more opportunity to run task.
8906 */
8907 if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
8908 return NULL;
8909 break;
8910 }
8911
8912 return idlest;
8913 }
8914
8915 /**
8916 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
8917 * @env: The load balancing environment.
8918 * @sds: variable to hold the statistics for this sched_domain.
8919 */
8920
8921 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
8922 {
8923 struct sched_domain *child = env->sd->child;
8924 struct sched_group *sg = env->sd->groups;
8925 struct sg_lb_stats *local = &sds->local_stat;
8926 struct sg_lb_stats tmp_sgs;
8927 int sg_status = 0;
8928
8929 #ifdef CONFIG_NO_HZ_COMMON
8930 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
8931 env->flags |= LBF_NOHZ_STATS;
8932 #endif
8933
8934 do {
8935 struct sg_lb_stats *sgs = &tmp_sgs;
8936 int local_group;
8937
8938 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
8939 if (local_group) {
8940 sds->local = sg;
8941 sgs = local;
8942
8943 if (env->idle != CPU_NEWLY_IDLE ||
8944 time_after_eq(jiffies, sg->sgc->next_update))
8945 update_group_capacity(env->sd, env->dst_cpu);
8946 }
8947
8948 update_sg_lb_stats(env, sg, sgs, &sg_status);
8949
8950 if (local_group)
8951 goto next_group;
8952
8953
8954 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
8955 sds->busiest = sg;
8956 sds->busiest_stat = *sgs;
8957 }
8958
8959 next_group:
8960 /* Now, start updating sd_lb_stats */
8961 sds->total_load += sgs->group_load;
8962 sds->total_capacity += sgs->group_capacity;
8963
8964 sg = sg->next;
8965 } while (sg != env->sd->groups);
8966
8967 /* Tag domain that child domain prefers tasks go to siblings first */
8968 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
8969
8970 #ifdef CONFIG_NO_HZ_COMMON
8971 if ((env->flags & LBF_NOHZ_AGAIN) &&
8972 cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
8973
8974 WRITE_ONCE(nohz.next_blocked,
8975 jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD));
8976 }
8977 #endif
8978
8979 if (env->sd->flags & SD_NUMA)
8980 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
8981
8982 if (!env->sd->parent) {
8983 struct root_domain *rd = env->dst_rq->rd;
8984
8985 /* update overload indicator if we are at root domain */
8986 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
8987
8988 /* Update over-utilization (tipping point, U >= 0) indicator */
8989 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
8990 trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
8991 } else if (sg_status & SG_OVERUTILIZED) {
8992 struct root_domain *rd = env->dst_rq->rd;
8993
8994 WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
8995 trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
8996 }
8997 }
8998
8999 #define NUMA_IMBALANCE_MIN 2
9000
9001 /*
9002 * Allow a NUMA imbalance if busy CPUs is less than 25% of the domain.
9003 * This is an approximation as the number of running tasks may not be
9004 * related to the number of busy CPUs due to sched_setaffinity.
9005 */
> 9006 static inline bool allow_numa_imbalance(int dst_running, int dst_weight)
9007 {
9008 return (dst_running < (dst_weight >> 2));
9009 }
9010
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 41931 bytes --]
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2020-11-19 21:45 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-19 21:45 [mel:sched-imbalancespread-v3r1 5/5] kernel/sched/fair.c:8897:8: error: implicit declaration of function 'allow_numa_imbalance' kernel test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.