From: vpillai <vpillai@digitalocean.com>
To: Nishanth Aravamudan <naravamudan@digitalocean.com>,
Julien Desfossez <jdesfossez@digitalocean.com>,
Peter Zijlstra <peterz@infradead.org>,
Tim Chen <tim.c.chen@linux.intel.com>,
mingo@kernel.org, tglx@linutronix.de, pjt@google.com,
torvalds@linux-foundation.org
Cc: linux-kernel@vger.kernel.org, fweisbec@gmail.com,
keescook@chromium.org, kerrnel@google.com,
Phil Auld <pauld@redhat.com>, Aaron Lu <aaron.lwe@gmail.com>,
Aubrey Li <aubrey.intel@gmail.com>,
aubrey.li@linux.intel.com,
Valentin Schneider <valentin.schneider@arm.com>,
Mel Gorman <mgorman@techsingularity.net>,
Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Joel Fernandes <joelaf@google.com>,
joel@joelfernandes.org
Subject: [RFC PATCH 10/13] sched: Trivial forced-newidle balancer
Date: Wed, 4 Mar 2020 17:00:00 +0000 [thread overview]
Message-ID: <f402d614d8ffbf7bfb58399833731c919e3f95c6.1583332765.git.vpillai@digitalocean.com> (raw)
In-Reply-To: <cover.1583332764.git.vpillai@digitalocean.com>
In-Reply-To: <cover.1583332764.git.vpillai@digitalocean.com>
From: Peter Zijlstra <peterz@infradead.org>
When a sibling is forced-idle to match the core-cookie; search for
matching tasks to fill the core.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
include/linux/sched.h | 1 +
kernel/sched/core.c | 131 +++++++++++++++++++++++++++++++++++++++++-
kernel/sched/idle.c | 1 +
kernel/sched/sched.h | 6 ++
4 files changed, 138 insertions(+), 1 deletion(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 80ec54706282..c9406a5b678f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -685,6 +685,7 @@ struct task_struct {
#ifdef CONFIG_SCHED_CORE
struct rb_node core_node;
unsigned long core_cookie;
+ unsigned int core_occupation;
#endif
#ifdef CONFIG_CGROUP_SCHED
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 556bf054b896..18ee8e10a171 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -218,6 +218,21 @@ static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
return match;
}
+static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
+{
+ struct rb_node *node = &p->core_node;
+
+ node = rb_next(node);
+ if (!node)
+ return NULL;
+
+ p = container_of(node, struct task_struct, core_node);
+ if (p->core_cookie != cookie)
+ return NULL;
+
+ return p;
+}
+
/*
* The static-key + stop-machine variable are needed such that:
*
@@ -4369,7 +4384,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
struct task_struct *next, *max = NULL;
const struct sched_class *class;
const struct cpumask *smt_mask;
- int i, j, cpu;
+ int i, j, cpu, occ = 0;
bool need_sync = false;
cpu = cpu_of(rq);
@@ -4476,6 +4491,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
goto done;
}
+ if (!is_idle_task(p))
+ occ++;
+
rq_i->core_pick = p;
/*
@@ -4501,6 +4519,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
cpu_rq(j)->core_pick = NULL;
}
+ occ = 1;
goto again;
} else {
/*
@@ -4540,6 +4559,8 @@ next_class:;
if (is_idle_task(rq_i->core_pick) && rq_i->nr_running)
rq_i->core_forceidle = true;
+ rq_i->core_pick->core_occupation = occ;
+
if (i == cpu)
continue;
@@ -4555,6 +4576,114 @@ next_class:;
return next;
}
+static bool try_steal_cookie(int this, int that)
+{
+ struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
+ struct task_struct *p;
+ unsigned long cookie;
+ bool success = false;
+
+ local_irq_disable();
+ double_rq_lock(dst, src);
+
+ cookie = dst->core->core_cookie;
+ if (!cookie)
+ goto unlock;
+
+ if (dst->curr != dst->idle)
+ goto unlock;
+
+ p = sched_core_find(src, cookie);
+ if (p == src->idle)
+ goto unlock;
+
+ do {
+ if (p == src->core_pick || p == src->curr)
+ goto next;
+
+ if (!cpumask_test_cpu(this, &p->cpus_mask))
+ goto next;
+
+ if (p->core_occupation > dst->idle->core_occupation)
+ goto next;
+
+ p->on_rq = TASK_ON_RQ_MIGRATING;
+ deactivate_task(src, p, 0);
+ set_task_cpu(p, this);
+ activate_task(dst, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+
+ resched_curr(dst);
+
+ success = true;
+ break;
+
+next:
+ p = sched_core_next(p, cookie);
+ } while (p);
+
+unlock:
+ double_rq_unlock(dst, src);
+ local_irq_enable();
+
+ return success;
+}
+
+static bool steal_cookie_task(int cpu, struct sched_domain *sd)
+{
+ int i;
+
+ for_each_cpu_wrap(i, sched_domain_span(sd), cpu) {
+ if (i == cpu)
+ continue;
+
+ if (need_resched())
+ break;
+
+ if (try_steal_cookie(cpu, i))
+ return true;
+ }
+
+ return false;
+}
+
+static void sched_core_balance(struct rq *rq)
+{
+ struct sched_domain *sd;
+ int cpu = cpu_of(rq);
+
+ rcu_read_lock();
+ raw_spin_unlock_irq(rq_lockp(rq));
+ for_each_domain(cpu, sd) {
+ if (!(sd->flags & SD_LOAD_BALANCE))
+ break;
+
+ if (need_resched())
+ break;
+
+ if (steal_cookie_task(cpu, sd))
+ break;
+ }
+ raw_spin_lock_irq(rq_lockp(rq));
+ rcu_read_unlock();
+}
+
+static DEFINE_PER_CPU(struct callback_head, core_balance_head);
+
+void queue_core_balance(struct rq *rq)
+{
+ if (!sched_core_enabled(rq))
+ return;
+
+ if (!rq->core->core_cookie)
+ return;
+
+ if (!rq->nr_running) /* not forced idle */
+ return;
+
+ queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
+}
+
#else /* !CONFIG_SCHED_CORE */
static struct task_struct *
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 46c18e3dab13..b2f08431f0f1 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -395,6 +395,7 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
{
update_idle_core(rq);
schedstat_inc(rq->sched_goidle);
+ queue_core_balance(rq);
}
static struct task_struct *pick_task_idle(struct rq *rq)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ef9e08e5da6a..552c80b70757 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1057,6 +1057,8 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq)
return &rq->__lock;
}
+extern void queue_core_balance(struct rq *rq);
+
void sched_core_add(struct rq *rq, struct task_struct *p);
void sched_core_remove(struct rq *rq, struct task_struct *p);
@@ -1072,6 +1074,10 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq)
return &rq->__lock;
}
+static inline void queue_core_balance(struct rq *rq)
+{
+}
+
#endif /* CONFIG_SCHED_CORE */
#ifdef CONFIG_SCHED_SMT
--
2.17.1
next prev parent reply other threads:[~2020-03-04 17:00 UTC|newest]
Thread overview: 110+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-04 16:59 [RFC PATCH 00/13] Core scheduling v5 vpillai
2020-03-04 16:59 ` [RFC PATCH 01/13] sched: Wrap rq::lock access vpillai
2020-03-04 16:59 ` [RFC PATCH 02/13] sched: Introduce sched_class::pick_task() vpillai
2020-03-04 16:59 ` [RFC PATCH 03/13] sched: Core-wide rq->lock vpillai
2020-04-01 11:42 ` [PATCH] sched/arm64: store cpu topology before notify_cpu_starting Cheng Jian
2020-04-01 13:23 ` Valentin Schneider
2020-04-06 8:00 ` chengjian (D)
2020-04-09 9:59 ` Sudeep Holla
2020-04-09 10:32 ` Valentin Schneider
2020-04-09 11:08 ` Sudeep Holla
2020-04-09 17:54 ` Joel Fernandes
2020-04-10 13:49 ` chengjian (D)
2020-04-14 11:36 ` [RFC PATCH 03/13] sched: Core-wide rq->lock Peter Zijlstra
2020-04-14 21:35 ` Vineeth Remanan Pillai
2020-04-15 10:55 ` Peter Zijlstra
2020-04-14 14:32 ` Peter Zijlstra
2020-03-04 16:59 ` [RFC PATCH 04/13] sched/fair: Add a few assertions vpillai
2020-03-04 16:59 ` [RFC PATCH 05/13] sched: Basic tracking of matching tasks vpillai
2020-03-04 16:59 ` [RFC PATCH 06/13] sched: Update core scheduler queue when taking cpu online/offline vpillai
2020-03-04 16:59 ` [RFC PATCH 07/13] sched: Add core wide task selection and scheduling vpillai
2020-04-14 13:35 ` Peter Zijlstra
2020-04-16 23:32 ` Tim Chen
2020-04-17 10:57 ` Peter Zijlstra
2020-04-16 3:39 ` Chen Yu
2020-04-16 19:59 ` Vineeth Remanan Pillai
2020-04-17 11:18 ` Peter Zijlstra
2020-04-19 15:31 ` Chen Yu
2020-05-21 23:14 ` Joel Fernandes
2020-05-21 23:16 ` Joel Fernandes
2020-05-22 2:35 ` Joel Fernandes
2020-05-22 3:44 ` Aaron Lu
2020-05-22 20:13 ` Joel Fernandes
2020-03-04 16:59 ` [RFC PATCH 08/13] sched/fair: wrapper for cfs_rq->min_vruntime vpillai
2020-03-04 16:59 ` [RFC PATCH 09/13] sched/fair: core wide vruntime comparison vpillai
2020-04-14 13:56 ` Peter Zijlstra
2020-04-15 3:34 ` Aaron Lu
2020-04-15 4:07 ` Aaron Lu
2020-04-15 21:24 ` Vineeth Remanan Pillai
2020-04-17 9:40 ` Aaron Lu
2020-04-20 8:07 ` [PATCH updated] sched/fair: core wide cfs task priority comparison Aaron Lu
2020-04-20 22:26 ` Vineeth Remanan Pillai
2020-04-21 2:51 ` Aaron Lu
2020-04-24 14:24 ` [PATCH updated v2] " Aaron Lu
2020-05-06 14:35 ` Peter Zijlstra
2020-05-08 8:44 ` Aaron Lu
2020-05-08 9:09 ` Peter Zijlstra
2020-05-08 12:34 ` Aaron Lu
2020-05-14 13:02 ` Peter Zijlstra
2020-05-14 22:51 ` Vineeth Remanan Pillai
2020-05-15 10:38 ` Peter Zijlstra
2020-05-15 10:43 ` Peter Zijlstra
2020-05-15 14:24 ` Vineeth Remanan Pillai
2020-05-16 3:42 ` Aaron Lu
2020-05-22 9:40 ` Aaron Lu
2020-06-08 1:41 ` Ning, Hongyu
2020-03-04 17:00 ` vpillai [this message]
2020-03-04 17:00 ` [RFC PATCH 11/13] sched: migration changes for core scheduling vpillai
2020-06-12 13:21 ` Joel Fernandes
2020-06-12 21:32 ` Vineeth Remanan Pillai
2020-06-13 2:25 ` Joel Fernandes
2020-06-13 18:59 ` Vineeth Remanan Pillai
2020-06-15 2:05 ` Li, Aubrey
2020-03-04 17:00 ` [RFC PATCH 12/13] sched: cgroup tagging interface " vpillai
2020-06-26 15:06 ` Vineeth Remanan Pillai
2020-03-04 17:00 ` [RFC PATCH 13/13] sched: Debug bits vpillai
2020-03-04 17:36 ` [RFC PATCH 00/13] Core scheduling v5 Tim Chen
2020-03-04 17:42 ` Vineeth Remanan Pillai
2020-04-14 14:21 ` Peter Zijlstra
2020-04-15 16:32 ` Joel Fernandes
2020-04-17 11:12 ` Peter Zijlstra
2020-04-17 12:35 ` Alexander Graf
2020-04-17 13:08 ` Peter Zijlstra
2020-04-18 2:25 ` Joel Fernandes
2020-05-09 14:35 ` Dario Faggioli
[not found] ` <38805656-2e2f-222a-c083-692f4b113313@linux.intel.com>
2020-05-09 3:39 ` Ning, Hongyu
2020-05-14 20:51 ` FW: " Gruza, Agata
2020-05-10 23:46 ` [PATCH RFC] Add support for core-wide protection of IRQ and softirq Joel Fernandes (Google)
2020-05-11 13:49 ` Peter Zijlstra
2020-05-11 14:54 ` Joel Fernandes
2020-05-20 22:26 ` [PATCH RFC] sched: Add a per-thread core scheduling interface Joel Fernandes (Google)
2020-05-21 4:09 ` [PATCH RFC] sched: Add a per-thread core scheduling interface(Internet mail) benbjiang(蒋彪)
2020-05-21 13:49 ` Joel Fernandes
2020-05-21 8:51 ` [PATCH RFC] sched: Add a per-thread core scheduling interface Peter Zijlstra
2020-05-21 13:47 ` Joel Fernandes
2020-05-21 20:20 ` Vineeth Remanan Pillai
2020-05-22 12:59 ` Peter Zijlstra
2020-05-22 21:35 ` Joel Fernandes
2020-05-24 14:00 ` Phil Auld
2020-05-28 14:51 ` Joel Fernandes
2020-05-28 17:01 ` Peter Zijlstra
2020-05-28 18:17 ` Phil Auld
2020-05-28 18:34 ` Phil Auld
2020-05-28 18:23 ` Joel Fernandes
2020-05-21 18:31 ` Linus Torvalds
2020-05-21 20:40 ` Joel Fernandes
2020-05-21 21:58 ` Jesse Barnes
2020-05-22 16:33 ` Linus Torvalds
2020-05-20 22:37 ` [PATCH RFC v2] Add support for core-wide protection of IRQ and softirq Joel Fernandes (Google)
2020-05-20 22:48 ` [PATCH RFC] sched: Use sched-RCU in core-scheduling balancing logic Joel Fernandes (Google)
2020-05-21 22:52 ` Paul E. McKenney
2020-05-22 1:26 ` Joel Fernandes
2020-06-25 20:12 ` [RFC PATCH 00/13] Core scheduling v5 Vineeth Remanan Pillai
2020-06-26 1:47 ` Joel Fernandes
2020-06-26 14:36 ` Vineeth Remanan Pillai
2020-06-26 15:10 ` Joel Fernandes
2020-06-26 15:12 ` Joel Fernandes
2020-06-27 16:21 ` Joel Fernandes
2020-06-30 14:11 ` Phil Auld
2020-06-29 12:33 ` Li, Aubrey
2020-06-29 19:41 ` Vineeth Remanan Pillai
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f402d614d8ffbf7bfb58399833731c919e3f95c6.1583332765.git.vpillai@digitalocean.com \
--to=vpillai@digitalocean.com \
--cc=aaron.lwe@gmail.com \
--cc=aubrey.intel@gmail.com \
--cc=aubrey.li@linux.intel.com \
--cc=fweisbec@gmail.com \
--cc=jdesfossez@digitalocean.com \
--cc=joel@joelfernandes.org \
--cc=joelaf@google.com \
--cc=keescook@chromium.org \
--cc=kerrnel@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@techsingularity.net \
--cc=mingo@kernel.org \
--cc=naravamudan@digitalocean.com \
--cc=pauld@redhat.com \
--cc=pawan.kumar.gupta@linux.intel.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=tglx@linutronix.de \
--cc=tim.c.chen@linux.intel.com \
--cc=torvalds@linux-foundation.org \
--cc=valentin.schneider@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).