linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
To: Nishanth Aravamudan <naravamudan@digitalocean.com>,
	Julien Desfossez <jdesfossez@digitalocean.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Tim Chen <tim.c.chen@linux.intel.com>,
	Vineeth Pillai <viremana@linux.microsoft.com>,
	Aaron Lu <aaron.lwe@gmail.com>,
	Aubrey Li <aubrey.intel@gmail.com>,
	tglx@linutronix.de, linux-kernel@vger.kernel.org
Cc: mingo@kernel.org, torvalds@linux-foundation.org,
	fweisbec@gmail.com, keescook@chromium.org, kerrnel@google.com,
	Phil Auld <pauld@redhat.com>,
	Valentin Schneider <valentin.schneider@arm.com>,
	Mel Gorman <mgorman@techsingularity.net>,
	Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	joel@joelfernandes.org, vineeth@bitbyteword.org,
	Chen Yu <yu.c.chen@intel.com>,
	Christian Brauner <christian.brauner@ubuntu.com>,
	Agata Gruza <agata.gruza@intel.com>,
	Antonio Gomez Iglesias <antonio.gomez.iglesias@intel.com>,
	graf@amazon.com, konrad.wilk@oracle.com, dfaggioli@suse.com,
	pjt@google.com, rostedt@goodmis.org, derkling@google.com,
	benbjiang@tencent.com,
	Alexandre Chartre <alexandre.chartre@oracle.com>,
	James.Bottomley@hansenpartnership.com, OWeisse@umich.edu,
	Dhaval Giani <dhaval.giani@oracle.com>,
	Junaid Shahid <junaids@google.com>,
	jsbarnes@google.com, chris.hyser@oracle.com,
	Ben Segall <bsegall@google.com>, Josh Don <joshdon@google.com>,
	Hao Luo <haoluo@google.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Aubrey Li <aubrey.li@linux.intel.com>,
	"Paul E. McKenney" <paulmck@kernel.org>,
	Tim Chen <tim.c.chen@intel.com>
Subject: [PATCH -tip 16/32] irq_work: Cleanup
Date: Tue, 17 Nov 2020 18:19:46 -0500	[thread overview]
Message-ID: <20201117232003.3580179-17-joel@joelfernandes.org> (raw)
In-Reply-To: <20201117232003.3580179-1-joel@joelfernandes.org>

From: Peter Zijlstra <peterz@infradead.org>

Get rid of the __call_single_node union and clean up the API a little
to avoid external code relying on the structure layout as much.

(Needed for irq_work_is_busy() API in core-scheduling series).

Tested-by: Julien Desfossez <jdesfossez@digitalocean.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
---
 drivers/gpu/drm/i915/i915_request.c |  4 ++--
 include/linux/irq_work.h            | 33 ++++++++++++++++++-----------
 include/linux/irqflags.h            |  4 ++--
 kernel/bpf/stackmap.c               |  2 +-
 kernel/irq_work.c                   | 18 ++++++++--------
 kernel/printk/printk.c              |  6 ++----
 kernel/rcu/tree.c                   |  3 +--
 kernel/time/tick-sched.c            |  6 ++----
 kernel/trace/bpf_trace.c            |  2 +-
 9 files changed, 41 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 0e813819b041..5385b081a376 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -197,7 +197,7 @@ __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
 
 	llist_for_each_entry_safe(cb, cn,
 				  llist_del_all(&rq->execute_cb),
-				  work.llnode)
+				  work.node.llist)
 		fn(&cb->work);
 }
 
@@ -460,7 +460,7 @@ __await_execution(struct i915_request *rq,
 	 * callback first, then checking the ACTIVE bit, we serialise with
 	 * the completed/retired request.
 	 */
-	if (llist_add(&cb->work.llnode, &signal->execute_cb)) {
+	if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
 		if (i915_request_is_active(signal) ||
 		    __request_in_flight(signal))
 			__notify_execute_cb_imm(signal);
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 30823780c192..ec2a47a81e42 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -14,28 +14,37 @@
  */
 
 struct irq_work {
-	union {
-		struct __call_single_node node;
-		struct {
-			struct llist_node llnode;
-			atomic_t flags;
-		};
-	};
+	struct __call_single_node node;
 	void (*func)(struct irq_work *);
 };
 
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){	\
+	.node = { .u_flags = (_flags), },			\
+	.func = (_func),					\
+}
+
+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
+
+#define DEFINE_IRQ_WORK(name, _f)				\
+	struct irq_work name = IRQ_WORK_INIT(_f)
+
 static inline
 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
 {
-	atomic_set(&work->flags, 0);
-	work->func = func;
+	*work = IRQ_WORK_INIT(func);
 }
 
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = {	\
-		.flags = ATOMIC_INIT(0),			\
-		.func  = (_f)					\
+static inline bool irq_work_is_pending(struct irq_work *work)
+{
+	return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
 }
 
+static inline bool irq_work_is_busy(struct irq_work *work)
+{
+	return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
+}
 
 bool irq_work_queue(struct irq_work *work);
 bool irq_work_queue_on(struct irq_work *work, int cpu);
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 3ed4e8771b64..fef2d43a7a1d 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -109,12 +109,12 @@ do {						\
 
 # define lockdep_irq_work_enter(__work)					\
 	  do {								\
-		  if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+		  if (!(atomic_read(&__work->node.a_flags) & IRQ_WORK_HARD_IRQ))\
 			current->irq_config = 1;			\
 	  } while (0)
 # define lockdep_irq_work_exit(__work)					\
 	  do {								\
-		  if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+		  if (!(atomic_read(&__work->node.a_flags) & IRQ_WORK_HARD_IRQ))\
 			current->irq_config = 0;			\
 	  } while (0)
 
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 06065fa27124..599041cd0c8a 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -298,7 +298,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
 	if (irqs_disabled()) {
 		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
 			work = this_cpu_ptr(&up_read_work);
-			if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
+			if (irq_work_is_busy(&work->irq_work)) {
 				/* cannot queue more up_read, fallback */
 				irq_work_busy = true;
 			}
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index eca83965b631..fbff25adb574 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -31,7 +31,7 @@ static bool irq_work_claim(struct irq_work *work)
 {
 	int oflags;
 
-	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags);
+	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
 	/*
 	 * If the work is already pending, no need to raise the IPI.
 	 * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
@@ -53,12 +53,12 @@ void __weak arch_irq_work_raise(void)
 static void __irq_work_queue_local(struct irq_work *work)
 {
 	/* If the work is "lazy", handle it from next tick if any */
-	if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
-		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+	if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) {
+		if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) &&
 		    tick_nohz_tick_stopped())
 			arch_irq_work_raise();
 	} else {
-		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+		if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list)))
 			arch_irq_work_raise();
 	}
 }
@@ -102,7 +102,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
 	if (cpu != smp_processor_id()) {
 		/* Arch remote IPI send/receive backend aren't NMI safe */
 		WARN_ON_ONCE(in_nmi());
-		__smp_call_single_queue(cpu, &work->llnode);
+		__smp_call_single_queue(cpu, &work->node.llist);
 	} else {
 		__irq_work_queue_local(work);
 	}
@@ -142,7 +142,7 @@ void irq_work_single(void *arg)
 	 * to claim that work don't rely on us to handle their data
 	 * while we are in the middle of the func.
 	 */
-	flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
+	flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->node.a_flags);
 
 	lockdep_irq_work_enter(work);
 	work->func(work);
@@ -152,7 +152,7 @@ void irq_work_single(void *arg)
 	 * no-one else claimed it meanwhile.
 	 */
 	flags &= ~IRQ_WORK_PENDING;
-	(void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
+	(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
 }
 
 static void irq_work_run_list(struct llist_head *list)
@@ -166,7 +166,7 @@ static void irq_work_run_list(struct llist_head *list)
 		return;
 
 	llnode = llist_del_all(list);
-	llist_for_each_entry_safe(work, tmp, llnode, llnode)
+	llist_for_each_entry_safe(work, tmp, llnode, node.llist)
 		irq_work_single(work);
 }
 
@@ -198,7 +198,7 @@ void irq_work_sync(struct irq_work *work)
 {
 	lockdep_assert_irqs_enabled();
 
-	while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
+	while (irq_work_is_busy(work))
 		cpu_relax();
 }
 EXPORT_SYMBOL_GPL(irq_work_sync);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index fe64a49344bf..9ef23d4b07c7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3025,10 +3025,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
 		wake_up_interruptible(&log_wait);
 }
 
-static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
-	.func = wake_up_klogd_work_func,
-	.flags = ATOMIC_INIT(IRQ_WORK_LAZY),
-};
+static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
+	IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
 
 void wake_up_klogd(void)
 {
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index bd04b09b84b3..ed4941f0bd59 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1311,8 +1311,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
 		    (rnp->ffmask & rdp->grpmask)) {
-			init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
-			atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
 			rdp->rcu_iw_pending = true;
 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
@@ -3964,6 +3962,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
 	rdp->cpu_no_qs.b.norm = true;
 	rdp->core_needs_qs = false;
 	rdp->rcu_iw_pending = false;
+	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 81632cd5e3b7..1b734070f028 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -243,10 +243,8 @@ static void nohz_full_kick_func(struct irq_work *work)
 	/* Empty, the tick restart happens on tick_nohz_irq_exit() */
 }
 
-static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
-	.func = nohz_full_kick_func,
-	.flags = ATOMIC_INIT(IRQ_WORK_HARD_IRQ),
-};
+static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
+	IRQ_WORK_INIT_HARD(nohz_full_kick_func);
 
 /*
  * Kick this CPU if it's full dynticks in order to force it to
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 4517c8b66518..a6903912f7a0 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1086,7 +1086,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
 			return -EINVAL;
 
 		work = this_cpu_ptr(&send_signal_work);
-		if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
+		if (irq_work_is_busy(&work->irq_work))
 			return -EBUSY;
 
 		/* Add the current task, which is the target of sending signal,
-- 
2.29.2.299.gdc1121823c-goog


  parent reply	other threads:[~2020-11-17 23:21 UTC|newest]

Thread overview: 150+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-17 23:19 [PATCH -tip 00/32] Core scheduling (v9) Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 01/32] sched: Wrap rq::lock access Joel Fernandes (Google)
2020-11-19 23:31   ` Singh, Balbir
2020-11-20 16:55     ` Joel Fernandes
2020-11-22  8:52       ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 02/32] sched: Introduce sched_class::pick_task() Joel Fernandes (Google)
2020-11-19 23:56   ` Singh, Balbir
2020-11-20 16:58     ` Joel Fernandes
2020-11-25 23:19       ` Balbir Singh
2020-11-25 16:28   ` Vincent Guittot
2020-11-26  9:07     ` Peter Zijlstra
2020-11-26 10:17       ` Vincent Guittot
2020-11-26 12:40         ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 03/32] sched/fair: Fix pick_task_fair crashes due to empty rbtree Joel Fernandes (Google)
2020-11-20 10:15   ` Singh, Balbir
2020-11-20 18:11     ` Vineeth Pillai
2020-11-23 22:31       ` Balbir Singh
2020-11-24  8:31     ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 04/32] sched: Core-wide rq->lock Joel Fernandes (Google)
2020-11-22  9:11   ` Balbir Singh
2020-11-24  8:16     ` Peter Zijlstra
2020-11-26  0:35       ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 05/32] sched/fair: Add a few assertions Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 06/32] sched: Basic tracking of matching tasks Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 07/32] sched: Add core wide task selection and scheduling Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 08/32] sched/fair: Fix forced idle sibling starvation corner case Joel Fernandes (Google)
2020-11-22 10:35   ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 09/32] sched/fair: Snapshot the min_vruntime of CPUs on force idle Joel Fernandes (Google)
2020-11-22 11:44   ` Balbir Singh
2020-11-23 12:31     ` Vineeth Pillai
2020-11-23 23:31       ` Balbir Singh
2020-11-24  9:09         ` Peter Zijlstra
2020-11-25 23:17           ` Balbir Singh
2020-11-26  8:23             ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 10/32] sched: Fix priority inversion of cookied task with sibling Joel Fernandes (Google)
2020-11-22 22:41   ` Balbir Singh
2020-11-24 18:30     ` Joel Fernandes
2020-11-25 23:05       ` Balbir Singh
2020-11-26  8:29         ` Peter Zijlstra
2020-11-26 22:27           ` Balbir Singh
2020-12-01 17:49         ` Joel Fernandes
2020-11-17 23:19 ` [PATCH -tip 11/32] sched: Enqueue task into core queue only after vruntime is updated Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 12/32] sched: Simplify the core pick loop for optimized case Joel Fernandes (Google)
2020-11-24 12:04   ` Peter Zijlstra
2020-11-24 17:04     ` Joel Fernandes
2020-11-25  8:37       ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 13/32] sched: Trivial forced-newidle balancer Joel Fernandes (Google)
2020-11-23  4:38   ` Balbir Singh
2020-11-23 15:07     ` Li, Aubrey
2020-11-23 23:35       ` Balbir Singh
2020-11-24  0:32         ` Li, Aubrey
2020-11-25 21:28           ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 14/32] sched: migration changes for core scheduling Joel Fernandes (Google)
2020-11-22 23:54   ` Balbir Singh
2020-11-23  4:36     ` Li, Aubrey
2020-11-24 15:42       ` Peter Zijlstra
2020-11-25  3:12         ` Li, Aubrey
2020-11-25 22:57           ` Balbir Singh
2020-11-26  3:20             ` Li, Aubrey
2020-11-26  8:32               ` Balbir Singh
2020-11-26  9:26                 ` Li, Aubrey
2020-11-30  9:33                   ` Balbir Singh
2020-11-30 12:29                     ` Li, Aubrey
2020-12-02 14:09                       ` Li, Aubrey
2020-12-03  1:06                         ` Li, Aubrey
2020-11-30 10:35   ` Vincent Guittot
2020-11-30 12:32     ` Li, Aubrey
2020-11-17 23:19 ` [PATCH -tip 15/32] sched: Improve snapshotting of min_vruntime for CGroups Joel Fernandes (Google)
2020-11-24 10:27   ` Peter Zijlstra
2020-11-24 17:07     ` Joel Fernandes
2020-11-25  8:41       ` Peter Zijlstra
2020-11-24 10:41   ` Peter Zijlstra
2020-11-17 23:19 ` Joel Fernandes (Google) [this message]
2020-11-17 23:19 ` [PATCH -tip 17/32] arch/x86: Add a new TIF flag for untrusted tasks Joel Fernandes (Google)
2020-11-23  5:18   ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 18/32] kernel/entry: Add support for core-wide protection of kernel-mode Joel Fernandes (Google)
2020-11-24 16:09   ` Peter Zijlstra
2020-11-24 17:52     ` Joel Fernandes
2020-11-25  9:37   ` Peter Zijlstra
2020-12-01 17:55     ` Joel Fernandes
2020-11-26  5:37   ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 19/32] entry/idle: Enter and exit kernel protection during idle entry and exit Joel Fernandes (Google)
2020-11-24 16:13   ` Peter Zijlstra
2020-11-24 18:03     ` Joel Fernandes
2020-11-25  8:49       ` Peter Zijlstra
2020-12-01 18:24         ` Joel Fernandes
2020-11-17 23:19 ` [PATCH -tip 20/32] entry/kvm: Protect the kernel when entering from guest Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 21/32] sched: CGroup tagging interface for core scheduling Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 22/32] sched: Split the cookie and setup per-task cookie on fork Joel Fernandes (Google)
2020-11-25 11:07   ` Peter Zijlstra
2020-12-01 18:56     ` Joel Fernandes
2020-11-25 11:10   ` Peter Zijlstra
2020-12-01 19:20     ` Joel Fernandes
2020-12-01 19:34       ` Peter Zijlstra
2020-12-02  6:36         ` Josh Don
2020-12-02  7:54           ` Peter Zijlstra
2020-12-04  0:20             ` Josh Don
2020-12-06 17:49         ` Joel Fernandes
2020-11-25 11:11   ` Peter Zijlstra
2020-12-01 19:16     ` Joel Fernandes
2020-11-25 11:15   ` Peter Zijlstra
2020-12-01 19:11     ` Joel Fernandes
2020-12-01 19:20       ` Peter Zijlstra
2020-12-06 18:15         ` Joel Fernandes
2020-11-25 12:54   ` Peter Zijlstra
2020-12-01 18:38     ` Joel Fernandes
2020-11-25 13:03   ` Peter Zijlstra
2020-12-01 18:52     ` Joel Fernandes
2020-11-30 23:05   ` Balbir Singh
2020-11-17 23:19 ` [PATCH -tip 23/32] sched: Add a per-thread core scheduling interface Joel Fernandes (Google)
2020-11-25 13:08   ` Peter Zijlstra
2020-12-01 19:36     ` Joel Fernandes
2020-12-02 21:47   ` Chris Hyser
2020-12-02 23:13     ` chris hyser
2020-12-06 17:34     ` Joel Fernandes
2020-12-07 21:48       ` chris hyser
2020-12-09 18:52       ` Chris Hyser
2020-12-14 19:31         ` Joel Fernandes
2020-12-14 19:44           ` chris hyser
2020-12-14 23:25             ` Joel Fernandes
2020-12-15 14:56               ` chris hyser
2020-12-15 16:23               ` chris hyser
2020-12-15 18:13               ` Dhaval Giani
2020-12-16  0:35                 ` Joel Fernandes
2020-11-17 23:19 ` [PATCH -tip 24/32] sched: Release references to the per-task cookie on exit Joel Fernandes (Google)
2020-11-25 13:03   ` Peter Zijlstra
2020-11-17 23:19 ` [PATCH -tip 25/32] sched: Refactor core cookie into struct Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 26/32] sched: Add a second-level tag for nested CGroup usecase Joel Fernandes (Google)
2020-11-25 13:42   ` Peter Zijlstra
2020-11-30 23:10     ` Balbir Singh
2020-12-01 20:08     ` Joel Fernandes
2020-12-02  6:18     ` Josh Don
2020-12-02  8:02       ` Peter Zijlstra
2020-12-02 18:53         ` Tejun Heo
2020-12-04  0:51         ` Josh Don
2020-12-04 15:45           ` Tejun Heo
2020-11-17 23:19 ` [PATCH -tip 27/32] sched/debug: Add CGroup node for printing group cookie if SCHED_DEBUG Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 28/32] kselftest: Add tests for core-sched interface Joel Fernandes (Google)
2020-11-17 23:19 ` [PATCH -tip 29/32] sched: Move core-scheduler interfacing code to a new file Joel Fernandes (Google)
2020-11-17 23:20 ` [PATCH -tip 30/32] Documentation: Add core scheduling documentation Joel Fernandes (Google)
2020-11-17 23:20 ` [PATCH -tip 31/32] sched: Add a coresched command line option Joel Fernandes (Google)
2020-11-19 23:39   ` Randy Dunlap
2020-11-25 13:45   ` Peter Zijlstra
2020-11-26  0:11     ` Balbir Singh
2020-11-17 23:20 ` [PATCH -tip 32/32] sched: Debug bits Joel Fernandes (Google)
2020-12-01  0:21   ` Balbir Singh
2021-01-15 15:10     ` Joel Fernandes
2020-11-24 11:48 ` [PATCH -tip 00/32] Core scheduling (v9) Vincent Guittot
2020-11-24 15:08   ` Joel Fernandes
2020-12-03  6:16     ` Ning, Hongyu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201117232003.3580179-17-joel@joelfernandes.org \
    --to=joel@joelfernandes.org \
    --cc=James.Bottomley@hansenpartnership.com \
    --cc=OWeisse@umich.edu \
    --cc=aaron.lwe@gmail.com \
    --cc=agata.gruza@intel.com \
    --cc=alexandre.chartre@oracle.com \
    --cc=antonio.gomez.iglesias@intel.com \
    --cc=aubrey.intel@gmail.com \
    --cc=aubrey.li@linux.intel.com \
    --cc=benbjiang@tencent.com \
    --cc=bsegall@google.com \
    --cc=chris.hyser@oracle.com \
    --cc=christian.brauner@ubuntu.com \
    --cc=derkling@google.com \
    --cc=dfaggioli@suse.com \
    --cc=dhaval.giani@oracle.com \
    --cc=fweisbec@gmail.com \
    --cc=graf@amazon.com \
    --cc=haoluo@google.com \
    --cc=jdesfossez@digitalocean.com \
    --cc=joshdon@google.com \
    --cc=jsbarnes@google.com \
    --cc=junaids@google.com \
    --cc=keescook@chromium.org \
    --cc=kerrnel@google.com \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@kernel.org \
    --cc=naravamudan@digitalocean.com \
    --cc=pauld@redhat.com \
    --cc=paulmck@kernel.org \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=pjt@google.com \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tim.c.chen@intel.com \
    --cc=tim.c.chen@linux.intel.com \
    --cc=torvalds@linux-foundation.org \
    --cc=valentin.schneider@arm.com \
    --cc=vineeth@bitbyteword.org \
    --cc=viremana@linux.microsoft.com \
    --cc=yu.c.chen@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).