From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1161627AbbKFMF6 (ORCPT ); Fri, 6 Nov 2015 07:05:58 -0500 Received: from mailout4.w1.samsung.com ([210.118.77.14]:51929 "EHLO mailout4.w1.samsung.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1161067AbbKFMDj (ORCPT ); Fri, 6 Nov 2015 07:03:39 -0500 X-AuditID: cbfec7f5-f794b6d000001495-d0-563c9718e779 From: Arseniy Krasnov To: linux@arm.linux.org.uk, mingo@redhat.com, peterz@infradead.org Cc: a.krasnov@samsung.com, v.tyrtov@samsung.com, s.rogachev@samsung.com, linux-kernel@vger.kernel.org, Tarek Dakhran , Sergey Dyasly , Dmitriy Safonov , Ilya Maximets Subject: [PATCH 04/13] hperf_hmp: scheduler initialization routines. Date: Fri, 06 Nov 2015 15:02:38 +0300 Message-id: <1446811367-23783-5-git-send-email-a.krasnov@samsung.com> X-Mailer: git-send-email 1.9.1 In-reply-to: <1446811367-23783-1-git-send-email-a.krasnov@samsung.com> References: <1446811367-23783-1-git-send-email-a.krasnov@samsung.com> X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFlrJLMWRmVeSWpSXmKPExsVy+t/xK7oS023CDLaeVLCY91nQ4tPKp2wW V9p/sltc3jWHzeL2ZV6LSwcWMFkc7z3AZDF5tpTFu63XWC3WPz/FaDF1xg92B26PluYeNo/N K7Q8Dr7bw+Txft9VNo++LasYPT5vkgtgi+KySUnNySxLLdK3S+DKaLhwkrngoFLFy0cNzA2M XbJdjJwcEgImEj9PPWGCsMUkLtxbz9bFyMUhJLCUUaLt3CF2CKedSWLJnk1gVWwCuhI/N25h A7FFBJwkep6dZQSxmQUmM0ls+JoGYgsLuEjsb1rFCmKzCKhKtO2cBtbLK+Aqsf36VxaIbXIS J49NBqvhFHCT6Oi6zQ5iCwHVdJx9xzKBkXcBI8MqRtHU0uSC4qT0XCO94sTc4tK8dL3k/NxN jJAA/LqDcekxq0OMAhyMSjy8Bsutw4RYE8uKK3MPMUpwMCuJ8Mox24QJ8aYkVlalFuXHF5Xm pBYfYpTmYFES5525632IkEB6YklqdmpqQWoRTJaJg1OqgdHvwXzDNLa0nrm3rx79aOtR5JqQ bpTxdrGlvXRCohGrn4G/6rwzR1KCivKKypcsbpeySrBXNn/8Iytp6d1NG3RCHIS1vCX+NPzo lrk1q8avMn5d/bnKmV9fvpcxqLsdlCpyofalc5lR9htdP1mJIs8LP2Jsg0+xyducWMR2b05O oKHfG6/tSizFGYmGWsxFxYkAIs9SXDwCAAA= Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Adds new fields to 'rq' structure and routine called during fair class setup, which initializes some HMP scheduler variables: big and little cluster masks. They are read from kernel config(if set), else default values are used. Signed-off-by: Tarek Dakhran Signed-off-by: Sergey Dyasly Signed-off-by: Dmitriy Safonov Signed-off-by: Arseniy Krasnov Signed-off-by: Ilya Maximets --- kernel/sched/core.c | 4 ++++ kernel/sched/fair.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 15 +++++++++++++++ 3 files changed, 65 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e3a632f..8747e06 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7488,6 +7488,10 @@ void __init sched_init(void) #endif init_rq_hrtick(rq); atomic_set(&rq->nr_iowait, 0); +#ifdef CONFIG_HPERF_HMP + rq->druntime_sum = 0; + rq->nr_hmp_tasks = 0; +#endif } set_load_weight(&init_task); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9a5e60f..c57007f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -100,6 +100,11 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL; */ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; +#ifdef CONFIG_HPERF_HMP +extern void hmp_set_cpu_masks(struct cpumask *, struct cpumask *); +static unsigned int freq_scale_cpu_power[CONFIG_NR_CPUS]; +#endif /* CONFIG_HPERF_HMP */ + #ifdef CONFIG_CFS_BANDWIDTH /* * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool @@ -8305,8 +8310,38 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m) #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ +#ifdef CONFIG_HPERF_HMP +static unsigned long default_fast_mask = 0x0F; +static unsigned long default_slow_mask = 0xF0; + +void hmp_set_cpu_masks(struct cpumask *fast_mask, struct cpumask *slow_mask) +{ + cpumask_clear(fast_mask); + cpumask_clear(slow_mask); + + /* try to parse CPU masks from config */ + if (strlen(CONFIG_HMP_FAST_CPU_MASK) && + strlen(CONFIG_HMP_SLOW_CPU_MASK)) { + if (cpumask_parse(CONFIG_HMP_FAST_CPU_MASK, fast_mask) || + cpumask_parse(CONFIG_HMP_SLOW_CPU_MASK, slow_mask)) + pr_err("hperf_hmp: Failed to get CPU masks from config!\n"); + else + return; + } + + pr_err("hperf_hmp: Fast mask will be: %08lX, slow mask: %08lX\n", + default_fast_mask, default_slow_mask); + + fast_mask->bits[0] = default_fast_mask; + slow_mask->bits[0] = default_slow_mask; +} +#endif + __init void init_sched_fair_class(void) { +#ifdef CONFIG_HPERF_HMP + int cpu; +#endif #ifdef CONFIG_SMP open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); @@ -8315,6 +8350,17 @@ __init void init_sched_fair_class(void) zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); cpu_notifier(sched_ilb_notifier, 0); #endif + +#ifdef CONFIG_HPERF_HMP + for_each_possible_cpu(cpu) + freq_scale_cpu_power[cpu] = SCHED_CAPACITY_SCALE; + hmp_set_cpu_masks(cpu_fastest_mask, cpu_slowest_mask); + pr_info("hperf_hmp: fast CPUs mask: %08X\n", + (unsigned int)cpumask_bits(cpu_fastest_mask)[0]); + pr_info("hperf_hmp: slow CPUs mask: %08X\n", + (unsigned int)cpumask_bits(cpu_slowest_mask)[0]); +#endif + #endif /* SMP */ } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6d2a119..94828dc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -597,6 +597,11 @@ struct rq { */ unsigned long nr_uninterruptible; +#ifdef CONFIG_HPERF_HMP + /* shows the amount of accumulated unfairness by tasks of this rq */ + long druntime_sum; + unsigned int nr_hmp_tasks; +#endif struct task_struct *curr, *idle, *stop; unsigned long next_balance; struct mm_struct *prev_mm; @@ -892,6 +897,16 @@ static inline unsigned int group_first_cpu(struct sched_group *group) extern int group_balance_cpu(struct sched_group *sg); +#ifdef CONFIG_HPERF_HMP +extern struct cpumask *cpu_fastest_mask; +extern struct cpumask *cpu_slowest_mask; + +static inline bool cpu_is_fastest(int cpu) +{ + return cpumask_test_cpu(cpu, cpu_fastest_mask); +} +#endif + #else static inline void sched_ttwu_pending(void) { } -- 1.9.1