From: Arseniy Krasnov <a.krasnov@samsung.com>
To: linux@arm.linux.org.uk, mingo@redhat.com, peterz@infradead.org
Cc: a.krasnov@samsung.com, v.tyrtov@samsung.com,
s.rogachev@samsung.com, linux-kernel@vger.kernel.org,
Tarek Dakhran <t.dakhran@samsung.com>,
Sergey Dyasly <s.dyasly@samsung.com>,
Dmitriy Safonov <d.safonov@partner.samsung.com>,
Ilya Maximets <i.maximets@samsung.com>
Subject: [PATCH 05/13] hperf_hmp: introduce druntime metric.
Date: Fri, 06 Nov 2015 15:02:39 +0300 [thread overview]
Message-ID: <1446811367-23783-6-git-send-email-a.krasnov@samsung.com> (raw)
In-Reply-To: <1446811367-23783-1-git-send-email-a.krasnov@samsung.com>
This patch adds special per-task metric to look for candidate for
migration between HMP domains(clusters). 'druntime' grows up when task runs on
A7 cluster, and goes down on A15 cluster. Also druntime is scaled according load
on little cluster in order to align its value with big cluster's total druntime.
For migration from big/little to little/big cluster task with lowest/highest
'druntime' chosen. 'druntime' is used to execute each task on each cluster
approximately same amount of time. 'druntime' is calculated each call of default
'update_curr' function.
Signed-off-by: Tarek Dakhran <t.dakhran@samsung.com>
Signed-off-by: Sergey Dyasly <s.dyasly@samsung.com>
Signed-off-by: Dmitriy Safonov <d.safonov@partner.samsung.com>
Signed-off-by: Arseniy Krasnov <a.krasnov@samsung.com>
Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
include/linux/sched.h | 3 ++
kernel/sched/core.c | 3 ++
kernel/sched/fair.c | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 121 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index aa72125..89c1bf3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1257,6 +1257,9 @@ struct sched_entity {
struct list_head group_node;
unsigned int on_rq;
+#ifdef CONFIG_HPERF_HMP
+ long druntime;
+#endif
u64 exec_start;
u64 sum_exec_runtime;
u64 vruntime;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8747e06..6883a00 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2085,6 +2085,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+#ifdef CONFIG_HPERF_HMP
+ p->se.druntime = 0;
+#endif
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_SCHEDSTATS
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c57007f..e94fab4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -102,6 +102,10 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
#ifdef CONFIG_HPERF_HMP
extern void hmp_set_cpu_masks(struct cpumask *, struct cpumask *);
+static atomic_t a15_nr_hmp_busy = ATOMIC_INIT(0);
+static atomic_t a7_nr_hmp_busy = ATOMIC_INIT(0);
+static atomic_t hmp_imbalance = ATOMIC_INIT(0);
+
static unsigned int freq_scale_cpu_power[CONFIG_NR_CPUS];
#endif /* CONFIG_HPERF_HMP */
@@ -660,6 +664,115 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+#ifdef CONFIG_HPERF_HMP
+static bool
+is_task_hmp(struct task_struct *task, const struct cpumask *task_cpus)
+{
+ if (!task_cpus)
+ task_cpus = tsk_cpus_allowed(task);
+
+ /*
+ * Check if a task has cpus_allowed only for one CPU domain (A15 or A7)
+ */
+ return !(cpumask_intersects(task_cpus, cpu_fastest_mask) ^
+ cpumask_intersects(task_cpus, cpu_slowest_mask));
+}
+
+#ifdef CONFIG_HPERF_HMP_DEBUG
+static inline void check_druntime_sum(struct rq *rq, long druntime_sum)
+{
+ BUG_ON(rq->cfs.h_nr_running == 0 && druntime_sum != 0);
+
+ if (cpu_is_fastest(rq->cpu))
+ BUG_ON(druntime_sum > 0);
+ else
+ BUG_ON(druntime_sum < 0);
+}
+#else
+static inline void check_druntime_sum(struct rq *rq, long druntime_sum)
+{
+}
+#endif
+
+static inline void add_druntime_sum(struct rq *rq, long delta)
+{
+ rq->druntime_sum += delta;
+ check_druntime_sum(rq, rq->druntime_sum);
+}
+/* Updates druntime for a task */
+static inline void
+update_hmp_stat(struct cfs_rq *cfs_rq, struct sched_entity *curr,
+ unsigned long delta_exec)
+{
+ long to_add;
+ unsigned int hmp_fairness_threshold = 240;
+ struct rq *rq = rq_of(cfs_rq);
+ int a7_nr_hmp_busy_tmp;
+
+ if (atomic_read(&hmp_imbalance) == 0)
+ return;
+
+ if (!curr->on_rq)
+ return;
+
+ if (!entity_is_task(curr))
+ return;
+
+ if (!task_of(curr)->on_rq)
+ return;
+
+ if (!cfs_rq->h_nr_running)
+ return;
+
+ if (!is_task_hmp(task_of(curr), NULL))
+ return;
+
+ delta_exec = delta_exec >> 10;
+
+ if (cpu_is_fastest(rq->cpu))
+ to_add = -delta_exec;
+ else
+ to_add = delta_exec;
+
+ to_add -= curr->druntime;
+
+ /* Avoid values with the different sign */
+ if ((cpu_is_fastest(rq->cpu) && to_add >= 0) ||
+ (!cpu_is_fastest(rq->cpu) && to_add <= 0))
+ return;
+
+ to_add /= (long)(2 + 4 * hmp_fairness_threshold /
+ (cfs_rq->h_nr_running + 1));
+
+ a7_nr_hmp_busy_tmp = atomic_read(&a7_nr_hmp_busy);
+ /* druntime balancing between the domains */
+ if (!cpu_is_fastest(rq->cpu) && a7_nr_hmp_busy_tmp) {
+ to_add *= atomic_read(&a15_nr_hmp_busy);
+ to_add /= a7_nr_hmp_busy_tmp;
+ }
+
+ if (cpu_is_fastest(rq->cpu)) {
+ if (curr->druntime < 0)
+ add_druntime_sum(rq, to_add);
+ else if ((curr->druntime + to_add) < 0)
+ add_druntime_sum(rq, curr->druntime + to_add);
+ } else {
+ if (curr->druntime > 0)
+ add_druntime_sum(rq, to_add);
+ else if ((curr->druntime + to_add) > 0)
+ add_druntime_sum(rq, curr->druntime + to_add);
+ }
+
+ curr->druntime += to_add;
+}
+#else
+static inline void
+update_hmp_stat(struct cfs_rq *cfs_rq, struct sched_entity *curr,
+ unsigned long delta_exec)
+{
+}
+#endif /* CONFIG_HPERF_HMP */
+
#ifdef CONFIG_SMP
static int select_idle_sibling(struct task_struct *p, int cpu);
static unsigned long task_h_load(struct task_struct *p);
@@ -735,6 +848,8 @@ static void update_curr(struct cfs_rq *cfs_rq)
}
account_cfs_rq_runtime(cfs_rq, delta_exec);
+
+ update_hmp_stat(cfs_rq, curr, delta_exec);
}
static void update_curr_fair(struct rq *rq)
--
1.9.1
next prev parent reply other threads:[~2015-11-06 12:05 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-11-06 12:02 [PATCH 00/13] High performance balancing logic for big.LITTLE Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 01/13] hperf_hmp: add new config for arm and arm64 Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 02/13] hperf_hmp: introduce hew domain flag Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 03/13] hperf_hmp: add sched domains initialization Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 04/13] hperf_hmp: scheduler initialization routines Arseniy Krasnov
2015-11-06 12:02 ` Arseniy Krasnov [this message]
2015-11-06 12:02 ` [PATCH 06/13] hperf_hmp: is_hmp_imbalance introduced Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 07/13] hperf_hmp: migration auxiliary functions Arseniy Krasnov
2015-11-06 13:03 ` kbuild test robot
2015-11-06 12:02 ` [PATCH 08/13] hperf_hmp: swap tasks function Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 09/13] hperf_hmp: one way balancing function Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 10/13] hperf_hmp: idle pull function Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 11/13] hperf_hmp: task CPU selection logic Arseniy Krasnov
2015-11-06 12:29 ` kbuild test robot
2015-11-06 12:02 ` [PATCH 12/13] hperf_hmp: rest of logic Arseniy Krasnov
2015-11-06 12:02 ` [PATCH 13/13] hperf_hmp: cpufreq routines Arseniy Krasnov
2015-11-07 9:52 ` [PATCH 00/13] High performance balancing logic for big.LITTLE Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1446811367-23783-6-git-send-email-a.krasnov@samsung.com \
--to=a.krasnov@samsung.com \
--cc=d.safonov@partner.samsung.com \
--cc=i.maximets@samsung.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@arm.linux.org.uk \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=s.dyasly@samsung.com \
--cc=s.rogachev@samsung.com \
--cc=t.dakhran@samsung.com \
--cc=v.tyrtov@samsung.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).