All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
To: Nishanth Aravamudan <naravamudan@digitalocean.com>,
	Julien Desfossez <jdesfossez@digitalocean.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Tim Chen <tim.c.chen@linux.intel.com>,
	Vineeth Pillai <viremana@linux.microsoft.com>,
	Aaron Lu <aaron.lwe@gmail.com>,
	Aubrey Li <aubrey.intel@gmail.com>,
	tglx@linutronix.de, linux-kernel@vger.kernel.org
Cc: mingo@kernel.org, torvalds@linux-foundation.org,
	fweisbec@gmail.com, keescook@chromium.org, kerrnel@google.com,
	Phil Auld <pauld@redhat.com>,
	Valentin Schneider <valentin.schneider@arm.com>,
	Mel Gorman <mgorman@techsingularity.net>,
	Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	joel@joelfernandes.org, vineeth@bitbyteword.org,
	Chen Yu <yu.c.chen@intel.com>,
	Christian Brauner <christian.brauner@ubuntu.com>,
	Agata Gruza <agata.gruza@intel.com>,
	Antonio Gomez Iglesias <antonio.gomez.iglesias@intel.com>,
	graf@amazon.com, konrad.wilk@oracle.com, dfaggioli@suse.com,
	pjt@google.com, rostedt@goodmis.org, derkling@google.com,
	benbjiang@tencent.com,
	Alexandre Chartre <alexandre.chartre@oracle.com>,
	James.Bottomley@hansenpartnership.com, OWeisse@umich.edu,
	Dhaval Giani <dhaval.giani@oracle.com>,
	Junaid Shahid <junaids@google.com>,
	jsbarnes@google.com, chris.hyser@oracle.com,
	Ben Segall <bsegall@google.com>, Josh Don <joshdon@google.com>,
	Hao Luo <haoluo@google.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Aaron Lu <aaron.lu@linux.alibaba.com>,
	Aubrey Li <aubrey.li@linux.intel.com>,
	"Paul E. McKenney" <paulmck@kernel.org>,
	Tim Chen <tim.c.chen@intel.com>
Subject: [PATCH -tip 07/32] sched: Add core wide task selection and scheduling.
Date: Tue, 17 Nov 2020 18:19:37 -0500	[thread overview]
Message-ID: <20201117232003.3580179-8-joel@joelfernandes.org> (raw)
In-Reply-To: <20201117232003.3580179-1-joel@joelfernandes.org>

From: Peter Zijlstra <peterz@infradead.org>

Instead of only selecting a local task, select a task for all SMT
siblings for every reschedule on the core (irrespective which logical
CPU does the reschedule).

Tested-by: Julien Desfossez <jdesfossez@digitalocean.com>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Julien Desfossez <jdesfossez@digitalocean.com>
Signed-off-by: Vineeth Remanan Pillai <viremana@linux.microsoft.com>
Signed-off-by: Aaron Lu <aaron.lu@linux.alibaba.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
---
 kernel/sched/core.c  | 301 ++++++++++++++++++++++++++++++++++++++++++-
 kernel/sched/sched.h |   6 +-
 2 files changed, 305 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9d521033777f..1bd0b0bbb040 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5029,7 +5029,7 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
  * Pick up the highest-prio task:
  */
 static inline struct task_struct *
-pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
 	const struct sched_class *class;
 	struct task_struct *p;
@@ -5070,6 +5070,294 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 }
 
 #ifdef CONFIG_SCHED_CORE
+static inline bool is_task_rq_idle(struct task_struct *t)
+{
+	return (task_rq(t)->idle == t);
+}
+
+static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
+{
+	return is_task_rq_idle(a) || (a->core_cookie == cookie);
+}
+
+static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
+{
+	if (is_task_rq_idle(a) || is_task_rq_idle(b))
+		return true;
+
+	return a->core_cookie == b->core_cookie;
+}
+
+// XXX fairness/fwd progress conditions
+/*
+ * Returns
+ * - NULL if there is no runnable task for this class.
+ * - the highest priority task for this runqueue if it matches
+ *   rq->core->core_cookie or its priority is greater than max.
+ * - Else returns idle_task.
+ */
+static struct task_struct *
+pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *max)
+{
+	struct task_struct *class_pick, *cookie_pick;
+	unsigned long cookie = rq->core->core_cookie;
+
+	class_pick = class->pick_task(rq);
+	if (!class_pick)
+		return NULL;
+
+	if (!cookie) {
+		/*
+		 * If class_pick is tagged, return it only if it has
+		 * higher priority than max.
+		 */
+		if (max && class_pick->core_cookie &&
+		    prio_less(class_pick, max))
+			return idle_sched_class.pick_task(rq);
+
+		return class_pick;
+	}
+
+	/*
+	 * If class_pick is idle or matches cookie, return early.
+	 */
+	if (cookie_equals(class_pick, cookie))
+		return class_pick;
+
+	cookie_pick = sched_core_find(rq, cookie);
+
+	/*
+	 * If class > max && class > cookie, it is the highest priority task on
+	 * the core (so far) and it must be selected, otherwise we must go with
+	 * the cookie pick in order to satisfy the constraint.
+	 */
+	if (prio_less(cookie_pick, class_pick) &&
+	    (!max || prio_less(max, class_pick)))
+		return class_pick;
+
+	return cookie_pick;
+}
+
+static struct task_struct *
+pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+	struct task_struct *next, *max = NULL;
+	const struct sched_class *class;
+	const struct cpumask *smt_mask;
+	bool need_sync;
+	int i, j, cpu;
+
+	if (!sched_core_enabled(rq))
+		return __pick_next_task(rq, prev, rf);
+
+	cpu = cpu_of(rq);
+
+	/* Stopper task is switching into idle, no need core-wide selection. */
+	if (cpu_is_offline(cpu)) {
+		/*
+		 * Reset core_pick so that we don't enter the fastpath when
+		 * coming online. core_pick would already be migrated to
+		 * another cpu during offline.
+		 */
+		rq->core_pick = NULL;
+		return __pick_next_task(rq, prev, rf);
+	}
+
+	/*
+	 * If there were no {en,de}queues since we picked (IOW, the task
+	 * pointers are all still valid), and we haven't scheduled the last
+	 * pick yet, do so now.
+	 *
+	 * rq->core_pick can be NULL if no selection was made for a CPU because
+	 * it was either offline or went offline during a sibling's core-wide
+	 * selection. In this case, do a core-wide selection.
+	 */
+	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
+	    rq->core->core_pick_seq != rq->core_sched_seq &&
+	    rq->core_pick) {
+		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
+
+		next = rq->core_pick;
+		if (next != prev) {
+			put_prev_task(rq, prev);
+			set_next_task(rq, next);
+		}
+
+		rq->core_pick = NULL;
+		return next;
+	}
+
+	put_prev_task_balance(rq, prev, rf);
+
+	smt_mask = cpu_smt_mask(cpu);
+
+	/*
+	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
+	 *
+	 * @task_seq guards the task state ({en,de}queues)
+	 * @pick_seq is the @task_seq we did a selection on
+	 * @sched_seq is the @pick_seq we scheduled
+	 *
+	 * However, preemptions can cause multiple picks on the same task set.
+	 * 'Fix' this by also increasing @task_seq for every pick.
+	 */
+	rq->core->core_task_seq++;
+	need_sync = !!rq->core->core_cookie;
+
+	/* reset state */
+	rq->core->core_cookie = 0UL;
+	for_each_cpu(i, smt_mask) {
+		struct rq *rq_i = cpu_rq(i);
+
+		rq_i->core_pick = NULL;
+
+		if (rq_i->core_forceidle) {
+			need_sync = true;
+			rq_i->core_forceidle = false;
+		}
+
+		if (i != cpu)
+			update_rq_clock(rq_i);
+	}
+
+	/*
+	 * Try and select tasks for each sibling in decending sched_class
+	 * order.
+	 */
+	for_each_class(class) {
+again:
+		for_each_cpu_wrap(i, smt_mask, cpu) {
+			struct rq *rq_i = cpu_rq(i);
+			struct task_struct *p;
+
+			if (rq_i->core_pick)
+				continue;
+
+			/*
+			 * If this sibling doesn't yet have a suitable task to
+			 * run; ask for the most elegible task, given the
+			 * highest priority task already selected for this
+			 * core.
+			 */
+			p = pick_task(rq_i, class, max);
+			if (!p) {
+				/*
+				 * If there weren't no cookies; we don't need to
+				 * bother with the other siblings.
+				 * If the rest of the core is not running a tagged
+				 * task, i.e.  need_sync == 0, and the current CPU
+				 * which called into the schedule() loop does not
+				 * have any tasks for this class, skip selecting for
+				 * other siblings since there's no point. We don't skip
+				 * for RT/DL because that could make CFS force-idle RT.
+				 */
+				if (i == cpu && !need_sync && class == &fair_sched_class)
+					goto next_class;
+
+				continue;
+			}
+
+			/*
+			 * Optimize the 'normal' case where there aren't any
+			 * cookies and we don't need to sync up.
+			 */
+			if (i == cpu && !need_sync && !p->core_cookie) {
+				next = p;
+				goto done;
+			}
+
+			rq_i->core_pick = p;
+
+			/*
+			 * If this new candidate is of higher priority than the
+			 * previous; and they're incompatible; we need to wipe
+			 * the slate and start over. pick_task makes sure that
+			 * p's priority is more than max if it doesn't match
+			 * max's cookie.
+			 *
+			 * NOTE: this is a linear max-filter and is thus bounded
+			 * in execution time.
+			 */
+			if (!max || !cookie_match(max, p)) {
+				struct task_struct *old_max = max;
+
+				rq->core->core_cookie = p->core_cookie;
+				max = p;
+
+				if (old_max) {
+					for_each_cpu(j, smt_mask) {
+						if (j == i)
+							continue;
+
+						cpu_rq(j)->core_pick = NULL;
+					}
+					goto again;
+				} else {
+					/*
+					 * Once we select a task for a cpu, we
+					 * should not be doing an unconstrained
+					 * pick because it might starve a task
+					 * on a forced idle cpu.
+					 */
+					need_sync = true;
+				}
+
+			}
+		}
+next_class:;
+	}
+
+	rq->core->core_pick_seq = rq->core->core_task_seq;
+	next = rq->core_pick;
+	rq->core_sched_seq = rq->core->core_pick_seq;
+
+	/* Something should have been selected for current CPU */
+	WARN_ON_ONCE(!next);
+
+	/*
+	 * Reschedule siblings
+	 *
+	 * NOTE: L1TF -- at this point we're no longer running the old task and
+	 * sending an IPI (below) ensures the sibling will no longer be running
+	 * their task. This ensures there is no inter-sibling overlap between
+	 * non-matching user state.
+	 */
+	for_each_cpu(i, smt_mask) {
+		struct rq *rq_i = cpu_rq(i);
+
+		/*
+		 * An online sibling might have gone offline before a task
+		 * could be picked for it, or it might be offline but later
+		 * happen to come online, but its too late and nothing was
+		 * picked for it.  That's Ok - it will pick tasks for itself,
+		 * so ignore it.
+		 */
+		if (!rq_i->core_pick)
+			continue;
+
+		if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running)
+			rq_i->core_forceidle = true;
+
+		if (i == cpu) {
+			rq_i->core_pick = NULL;
+			continue;
+		}
+
+		/* Did we break L1TF mitigation requirements? */
+		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
+
+		if (rq_i->curr == rq_i->core_pick) {
+			rq_i->core_pick = NULL;
+			continue;
+		}
+
+		resched_curr(rq_i);
+	}
+
+done:
+	set_next_task(rq, next);
+	return next;
+}
 
 static inline void sched_core_cpu_starting(unsigned int cpu)
 {
@@ -5103,6 +5391,12 @@ static inline void sched_core_cpu_starting(unsigned int cpu)
 
 static inline void sched_core_cpu_starting(unsigned int cpu) {}
 
+static struct task_struct *
+pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+{
+	return __pick_next_task(rq, prev, rf);
+}
+
 #endif /* CONFIG_SCHED_CORE */
 
 /*
@@ -7999,7 +8293,12 @@ void __init sched_init(void)
 
 #ifdef CONFIG_SCHED_CORE
 		rq->core = NULL;
+		rq->core_pick = NULL;
 		rq->core_enabled = 0;
+		rq->core_tree = RB_ROOT;
+		rq->core_forceidle = false;
+
+		rq->core_cookie = 0UL;
 #endif
 	}
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 8ee0ca8ee5c3..63b28e1843ee 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1065,11 +1065,16 @@ struct rq {
 #ifdef CONFIG_SCHED_CORE
 	/* per rq */
 	struct rq		*core;
+	struct task_struct	*core_pick;
 	unsigned int		core_enabled;
+	unsigned int		core_sched_seq;
 	struct rb_root		core_tree;
+	unsigned char		core_forceidle;
 
 	/* shared state */
 	unsigned int		core_task_seq;
+	unsigned int		core_pick_seq;
+	unsigned long		core_cookie;
 #endif
 };
 
@@ -1977,7 +1982,6 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
 
 static inline void set_next_task(struct rq *rq, struct task_struct *next)
 {
-	WARN_ON_ONCE(rq->curr != next);
 	next->sched_class->set_next_task(rq, next, false);
 }
 
-- 
2.29.2.299.gdc1121823c-goog


  parent reply	other threads:[~2020-11-17 23:21 UTC|newest]

Thread overview: 150+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-17 23:19 [PATCH -tip 00/32] Core scheduling (v9) Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 01/32] sched: Wrap rq::lock access Joel Fernandes (Google)
2020-11-19 23:31   ` Singh, Balbir
2020-11-20 16:55     ` Joel Fernandes
2020-11-22  8:52       ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 02/32] sched: Introduce sched_class::pick_task() Joel Fernandes (Google)
2020-11-19 23:56   ` Singh, Balbir
2020-11-20 16:58     ` Joel Fernandes
2020-11-25 23:19       ` Balbir Singh
2020-11-25 16:28   ` Vincent Guittot
2020-11-26  9:07     ` Peter Zijlstra
2020-11-26 10:17       ` Vincent Guittot
2020-11-26 12:40         ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 03/32] sched/fair: Fix pick_task_fair crashes due to empty rbtree Joel Fernandes (Google)
2020-11-20 10:15   ` Singh, Balbir
2020-11-20 18:11     ` Vineeth Pillai
2020-11-23 22:31       ` Balbir Singh
2020-11-24  8:31     ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 04/32] sched: Core-wide rq->lock Joel Fernandes (Google)
2020-11-22  9:11   ` Balbir Singh
2020-11-24  8:16     ` Peter Zijlstra
2020-11-26  0:35       ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 05/32] sched/fair: Add a few assertions Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 06/32] sched: Basic tracking of matching tasks Joel Fernandes (Google)
2020-11-17 23:19 ` Joel Fernandes (Google) [this message]
2020-11-17 23:19 ` [PATCH -tip 08/32] sched/fair: Fix forced idle sibling starvation corner case Joel Fernandes (Google)
2020-11-22 10:35   ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 09/32] sched/fair: Snapshot the min_vruntime of CPUs on force idle Joel Fernandes (Google)
2020-11-22 11:44   ` Balbir Singh
2020-11-23 12:31     ` Vineeth Pillai
2020-11-23 23:31       ` Balbir Singh
2020-11-24  9:09         ` Peter Zijlstra
2020-11-25 23:17           ` Balbir Singh
2020-11-26  8:23             ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 10/32] sched: Fix priority inversion of cookied task with sibling Joel Fernandes (Google)
2020-11-22 22:41   ` Balbir Singh
2020-11-24 18:30     ` Joel Fernandes
2020-11-25 23:05       ` Balbir Singh
2020-11-26  8:29         ` Peter Zijlstra
2020-11-26 22:27           ` Balbir Singh
2020-12-01 17:49         ` Joel Fernandes
2020-11-17 23:19 ` [PATCH -tip 11/32] sched: Enqueue task into core queue only after vruntime is updated Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 12/32] sched: Simplify the core pick loop for optimized case Joel Fernandes (Google)
2020-11-24 12:04   ` Peter Zijlstra
2020-11-24 17:04     ` Joel Fernandes
2020-11-25  8:37       ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 13/32] sched: Trivial forced-newidle balancer Joel Fernandes (Google)
2020-11-23  4:38   ` Balbir Singh
2020-11-23 15:07     ` Li, Aubrey
2020-11-23 23:35       ` Balbir Singh
2020-11-24  0:32         ` Li, Aubrey
2020-11-25 21:28           ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 14/32] sched: migration changes for core scheduling Joel Fernandes (Google)
2020-11-22 23:54   ` Balbir Singh
2020-11-23  4:36     ` Li, Aubrey
2020-11-24 15:42       ` Peter Zijlstra
2020-11-25  3:12         ` Li, Aubrey
2020-11-25 22:57           ` Balbir Singh
2020-11-26  3:20             ` Li, Aubrey
2020-11-26  8:32               ` Balbir Singh
2020-11-26  9:26                 ` Li, Aubrey
2020-11-30  9:33                   ` Balbir Singh
2020-11-30 12:29                     ` Li, Aubrey
2020-12-02 14:09                       ` Li, Aubrey
2020-12-03  1:06                         ` Li, Aubrey
2020-11-30 10:35   ` Vincent Guittot
2020-11-30 12:32     ` Li, Aubrey
2020-11-17 23:19 ` [PATCH -tip 15/32] sched: Improve snapshotting of min_vruntime for CGroups Joel Fernandes (Google)
2020-11-24 10:27   ` Peter Zijlstra
2020-11-24 17:07     ` Joel Fernandes
2020-11-25  8:41       ` Peter Zijlstra
2020-11-24 10:41   ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 16/32] irq_work: Cleanup Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 17/32] arch/x86: Add a new TIF flag for untrusted tasks Joel Fernandes (Google)
2020-11-23  5:18   ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 18/32] kernel/entry: Add support for core-wide protection of kernel-mode Joel Fernandes (Google)
2020-11-24 16:09   ` Peter Zijlstra
2020-11-24 17:52     ` Joel Fernandes
2020-11-25  9:37   ` Peter Zijlstra
2020-12-01 17:55     ` Joel Fernandes
2020-11-26  5:37   ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 19/32] entry/idle: Enter and exit kernel protection during idle entry and exit Joel Fernandes (Google)
2020-11-24 16:13   ` Peter Zijlstra
2020-11-24 18:03     ` Joel Fernandes
2020-11-25  8:49       ` Peter Zijlstra
2020-12-01 18:24         ` Joel Fernandes
2020-11-17 23:19 ` [PATCH -tip 20/32] entry/kvm: Protect the kernel when entering from guest Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 21/32] sched: CGroup tagging interface for core scheduling Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 22/32] sched: Split the cookie and setup per-task cookie on fork Joel Fernandes (Google)
2020-11-25 11:07   ` Peter Zijlstra
2020-12-01 18:56     ` Joel Fernandes
2020-11-25 11:10   ` Peter Zijlstra
2020-12-01 19:20     ` Joel Fernandes
2020-12-01 19:34       ` Peter Zijlstra
2020-12-02  6:36         ` Josh Don
2020-12-02  7:54           ` Peter Zijlstra
2020-12-04  0:20             ` Josh Don
2020-12-06 17:49         ` Joel Fernandes
2020-11-25 11:11   ` Peter Zijlstra
2020-12-01 19:16     ` Joel Fernandes
2020-11-25 11:15   ` Peter Zijlstra
2020-12-01 19:11     ` Joel Fernandes
2020-12-01 19:20       ` Peter Zijlstra
2020-12-06 18:15         ` Joel Fernandes
2020-11-25 12:54   ` Peter Zijlstra
2020-12-01 18:38     ` Joel Fernandes
2020-11-25 13:03   ` Peter Zijlstra
2020-12-01 18:52     ` Joel Fernandes
2020-11-30 23:05   ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 23/32] sched: Add a per-thread core scheduling interface Joel Fernandes (Google)
2020-11-25 13:08   ` Peter Zijlstra
2020-12-01 19:36     ` Joel Fernandes
2020-12-02 21:47   ` Chris Hyser
2020-12-02 23:13     ` chris hyser
2020-12-06 17:34     ` Joel Fernandes
2020-12-07 21:48       ` chris hyser
2020-12-09 18:52       ` Chris Hyser
2020-12-14 19:31         ` Joel Fernandes
2020-12-14 19:44           ` chris hyser
2020-12-14 23:25             ` Joel Fernandes
2020-12-15 14:56               ` chris hyser
2020-12-15 16:23               ` chris hyser
2020-12-15 18:13               ` Dhaval Giani
2020-12-16  0:35                 ` Joel Fernandes
2020-11-17 23:19 ` [PATCH -tip 24/32] sched: Release references to the per-task cookie on exit Joel Fernandes (Google)
2020-11-25 13:03   ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 25/32] sched: Refactor core cookie into struct Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 26/32] sched: Add a second-level tag for nested CGroup usecase Joel Fernandes (Google)
2020-11-25 13:42   ` Peter Zijlstra
2020-11-30 23:10     ` Balbir Singh
2020-12-01 20:08     ` Joel Fernandes
2020-12-02  6:18     ` Josh Don
2020-12-02  8:02       ` Peter Zijlstra
2020-12-02 18:53         ` Tejun Heo
2020-12-04  0:51         ` Josh Don
2020-12-04 15:45           ` Tejun Heo
2020-11-17 23:19 ` [PATCH -tip 27/32] sched/debug: Add CGroup node for printing group cookie if SCHED_DEBUG Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 28/32] kselftest: Add tests for core-sched interface Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 29/32] sched: Move core-scheduler interfacing code to a new file Joel Fernandes (Google)
2020-11-17 23:20 ` [PATCH -tip 30/32] Documentation: Add core scheduling documentation Joel Fernandes (Google)
2020-11-17 23:20 ` [PATCH -tip 31/32] sched: Add a coresched command line option Joel Fernandes (Google)
2020-11-19 23:39   ` Randy Dunlap
2020-11-25 13:45   ` Peter Zijlstra
2020-11-26  0:11     ` Balbir Singh
2020-11-17 23:20 ` [PATCH -tip 32/32] sched: Debug bits Joel Fernandes (Google)
2020-12-01  0:21   ` Balbir Singh
2021-01-15 15:10     ` Joel Fernandes
2020-11-24 11:48 ` [PATCH -tip 00/32] Core scheduling (v9) Vincent Guittot
2020-11-24 15:08   ` Joel Fernandes
2020-12-03  6:16     ` Ning, Hongyu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201117232003.3580179-8-joel@joelfernandes.org \
    --to=joel@joelfernandes.org \
    --cc=James.Bottomley@hansenpartnership.com \
    --cc=OWeisse@umich.edu \
    --cc=aaron.lu@linux.alibaba.com \
    --cc=aaron.lwe@gmail.com \
    --cc=agata.gruza@intel.com \
    --cc=alexandre.chartre@oracle.com \
    --cc=antonio.gomez.iglesias@intel.com \
    --cc=aubrey.intel@gmail.com \
    --cc=aubrey.li@linux.intel.com \
    --cc=benbjiang@tencent.com \
    --cc=bsegall@google.com \
    --cc=chris.hyser@oracle.com \
    --cc=christian.brauner@ubuntu.com \
    --cc=derkling@google.com \
    --cc=dfaggioli@suse.com \
    --cc=dhaval.giani@oracle.com \
    --cc=fweisbec@gmail.com \
    --cc=graf@amazon.com \
    --cc=haoluo@google.com \
    --cc=jdesfossez@digitalocean.com \
    --cc=joshdon@google.com \
    --cc=jsbarnes@google.com \
    --cc=junaids@google.com \
    --cc=keescook@chromium.org \
    --cc=kerrnel@google.com \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@kernel.org \
    --cc=naravamudan@digitalocean.com \
    --cc=pauld@redhat.com \
    --cc=paulmck@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=pjt@google.com \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tim.c.chen@intel.com \
    --cc=tim.c.chen@linux.intel.com \
    --cc=torvalds@linux-foundation.org \
    --cc=valentin.schneider@arm.com \
    --cc=vineeth@bitbyteword.org \
    --cc=viremana@linux.microsoft.com \
    --cc=yu.c.chen@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.