linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ankur Arora <ankur.a.arora@oracle.com>
To: linux-kernel@vger.kernel.org
Cc: tglx@linutronix.de, peterz@infradead.org,
	torvalds@linux-foundation.org, paulmck@kernel.org,
	linux-mm@kvack.org, x86@kernel.org, akpm@linux-foundation.org,
	luto@kernel.org, bp@alien8.de, dave.hansen@linux.intel.com,
	hpa@zytor.com, mingo@redhat.com, juri.lelli@redhat.com,
	vincent.guittot@linaro.org, willy@infradead.org, mgorman@suse.de,
	jon.grimm@amd.com, bharata@amd.com, raghavendra.kt@amd.com,
	boris.ostrovsky@oracle.com, konrad.wilk@oracle.com,
	jgross@suse.com, andrew.cooper3@citrix.com, mingo@kernel.org,
	bristot@kernel.org, mathieu.desnoyers@efficios.com,
	geert@linux-m68k.org, glaubitz@physik.fu-berlin.de,
	anton.ivanov@cambridgegreys.com, mattst88@gmail.com,
	krypton@ulrich-teichert.org, rostedt@goodmis.org,
	David.Laight@ACULAB.COM, richard@nod.at, mjguzik@gmail.com,
	Ankur Arora <ankur.a.arora@oracle.com>
Subject: [RFC PATCH 38/86] sched: *_tsk_need_resched() now takes resched_t
Date: Tue,  7 Nov 2023 13:57:24 -0800	[thread overview]
Message-ID: <20231107215742.363031-39-ankur.a.arora@oracle.com> (raw)
In-Reply-To: <20231107215742.363031-1-ankur.a.arora@oracle.com>

*_tsk_need_resched() need to test for the specific need-resched
flag.

The only users are RCU and the scheduler. For RCU we always want
to schedule at the earliest opportunity and that is always
RESCHED_eager.

For the scheduler, keep everything as RESCHED_eager for now.

Originally-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 include/linux/sched.h    | 17 ++++++++++++-----
 kernel/rcu/tree.c        |  4 ++--
 kernel/rcu/tree_exp.h    |  4 ++--
 kernel/rcu/tree_plugin.h |  4 ++--
 kernel/rcu/tree_stall.h  |  2 +-
 kernel/sched/core.c      |  9 +++++----
 kernel/sched/deadline.c  |  4 ++--
 kernel/sched/fair.c      |  2 +-
 kernel/sched/idle.c      |  2 +-
 kernel/sched/rt.c        |  4 ++--
 10 files changed, 30 insertions(+), 22 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 12d0626601a0..6dd206b2ef50 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2060,19 +2060,26 @@ static inline bool test_tsk_thread_flag(struct task_struct *tsk, int flag)
 	return test_ti_thread_flag(task_thread_info(tsk), flag);
 }
 
-static inline void set_tsk_need_resched(struct task_struct *tsk)
+static inline void set_tsk_need_resched(struct task_struct *tsk, resched_t lazy)
 {
-	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+	set_tsk_thread_flag(tsk, tif_resched(lazy));
 }
 
 static inline void clear_tsk_need_resched(struct task_struct *tsk)
 {
-	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+	clear_tsk_thread_flag(tsk, tif_resched(RESCHED_eager));
+	clear_tsk_thread_flag(tsk, tif_resched(RESCHED_lazy));
 }
 
-static inline bool test_tsk_need_resched(struct task_struct *tsk)
+static inline bool test_tsk_need_resched(struct task_struct *tsk, resched_t lazy)
 {
-	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+	return unlikely(test_tsk_thread_flag(tsk, tif_resched(lazy)));
+}
+
+static inline bool test_tsk_need_resched_any(struct task_struct *tsk)
+{
+	return test_tsk_need_resched(tsk, RESCHED_eager) ||
+			test_tsk_need_resched(tsk, RESCHED_lazy);
 }
 
 /*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index cb1caefa8bd0..a7776ae78900 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2231,7 +2231,7 @@ void rcu_sched_clock_irq(int user)
 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
 		/* Idle and userspace execution already are quiescent states. */
 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
-			set_tsk_need_resched(current);
+			set_tsk_need_resched(current, RESCHED_eager);
 			set_preempt_need_resched();
 		}
 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
@@ -2379,7 +2379,7 @@ static __latent_entropy void rcu_core(void)
 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
 		rcu_preempt_deferred_qs(current);
 	} else if (rcu_preempt_need_deferred_qs(current)) {
-		set_tsk_need_resched(current);
+		set_tsk_need_resched(current, RESCHED_eager);
 		set_preempt_need_resched();
 	}
 
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 8239b39d945b..a4a23ac1115b 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -755,7 +755,7 @@ static void rcu_exp_handler(void *unused)
 			rcu_report_exp_rdp(rdp);
 		} else {
 			WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
-			set_tsk_need_resched(t);
+			set_tsk_need_resched(t, RESCHED_eager);
 			set_preempt_need_resched();
 		}
 		return;
@@ -856,7 +856,7 @@ static void rcu_exp_need_qs(void)
 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
 	/* Store .exp before .rcu_urgent_qs. */
 	smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
-	set_tsk_need_resched(current);
+	set_tsk_need_resched(current, RESCHED_eager);
 	set_preempt_need_resched();
 }
 
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 41021080ad25..f87191e008ff 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -658,7 +658,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
 			// Also if no expediting and no possible deboosting,
 			// slow is OK.  Plus nohz_full CPUs eventually get
 			// tick enabled.
-			set_tsk_need_resched(current);
+			set_tsk_need_resched(current, RESCHED_eager);
 			set_preempt_need_resched();
 			if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
 			    expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
@@ -725,7 +725,7 @@ static void rcu_flavor_sched_clock_irq(int user)
 	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
 		/* No QS, force context switch if deferred. */
 		if (rcu_preempt_need_deferred_qs(t)) {
-			set_tsk_need_resched(t);
+			set_tsk_need_resched(t, RESCHED_eager);
 			set_preempt_need_resched();
 		}
 	} else if (rcu_preempt_need_deferred_qs(t)) {
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 6f06dc12904a..b74b7b04cf35 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -705,7 +705,7 @@ static void print_cpu_stall(unsigned long gps)
 	 * progress and it could be we're stuck in kernel space without context
 	 * switches for an entirely unreasonable amount of time.
 	 */
-	set_tsk_need_resched(current);
+	set_tsk_need_resched(current, RESCHED_eager);
 	set_preempt_need_resched();
 }
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e30007c11722..e2215c417323 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -927,7 +927,7 @@ static bool set_nr_if_polling(struct task_struct *p)
 #else
 static inline bool set_nr_and_not_polling(struct task_struct *p)
 {
-	set_tsk_need_resched(p);
+	set_tsk_need_resched(p, RESCHED_eager);
 	return true;
 }
 
@@ -1039,13 +1039,13 @@ void resched_curr(struct rq *rq)
 
 	lockdep_assert_rq_held(rq);
 
-	if (test_tsk_need_resched(curr))
+	if (test_tsk_need_resched(curr, RESCHED_eager))
 		return;
 
 	cpu = cpu_of(rq);
 
 	if (cpu == smp_processor_id()) {
-		set_tsk_need_resched(curr);
+		set_tsk_need_resched(curr, RESCHED_eager);
 		set_preempt_need_resched();
 		return;
 	}
@@ -2223,7 +2223,8 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 	 * A queue event has occurred, and we're going to schedule.  In
 	 * this case, we can save a useless back to back clock update.
 	 */
-	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
+	if (task_on_rq_queued(rq->curr) &&
+	    test_tsk_need_resched(rq->curr, RESCHED_eager))
 		rq_clock_skip_update(rq);
 }
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 58b542bf2893..e6815c3bd2f0 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1953,7 +1953,7 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
 	 * let us try to decide what's the best thing to do...
 	 */
 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
-	    !test_tsk_need_resched(rq->curr))
+	    !test_tsk_need_resched(rq->curr, RESCHED_eager))
 		check_preempt_equal_dl(rq, p);
 #endif /* CONFIG_SMP */
 }
@@ -2467,7 +2467,7 @@ static void pull_dl_task(struct rq *this_rq)
 static void task_woken_dl(struct rq *rq, struct task_struct *p)
 {
 	if (!task_on_cpu(rq, p) &&
-	    !test_tsk_need_resched(rq->curr) &&
+	    !test_tsk_need_resched(rq->curr, RESCHED_eager) &&
 	    p->nr_cpus_allowed > 1 &&
 	    dl_task(rq->curr) &&
 	    (rq->curr->nr_cpus_allowed < 2 ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df348aa55d3c..4d86c618ffa2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8087,7 +8087,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
 	 * prevents us from potentially nominating it as a false LAST_BUDDY
 	 * below.
 	 */
-	if (test_tsk_need_resched(curr))
+	if (test_tsk_need_resched(curr, RESCHED_eager))
 		return;
 
 	/* Idle tasks are by definition preempted by non-idle tasks. */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index d4a55448e459..eacd204e2879 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -329,7 +329,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
 	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
 
 	WRITE_ONCE(it->done, 1);
-	set_tsk_need_resched(current);
+	set_tsk_need_resched(current, RESCHED_eager);
 
 	return HRTIMER_NORESTART;
 }
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 0597ba0f85ff..a79ce6746dd0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1735,7 +1735,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
 	 * to move current somewhere else, making room for our non-migratable
 	 * task.
 	 */
-	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
+	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr, RESCHED_eager))
 		check_preempt_equal_prio(rq, p);
 #endif
 }
@@ -2466,7 +2466,7 @@ static void pull_rt_task(struct rq *this_rq)
 static void task_woken_rt(struct rq *rq, struct task_struct *p)
 {
 	bool need_to_push = !task_on_cpu(rq, p) &&
-			    !test_tsk_need_resched(rq->curr) &&
+			    !test_tsk_need_resched(rq->curr, RESCHED_eager) &&
 			    p->nr_cpus_allowed > 1 &&
 			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
 			    (rq->curr->nr_cpus_allowed < 2 ||
-- 
2.31.1


  parent reply	other threads:[~2023-11-07 22:04 UTC|newest]

Thread overview: 250+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-07 21:56 [RFC PATCH 00/86] Make the kernel preemptible Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 01/86] Revert "riscv: support PREEMPT_DYNAMIC with static keys" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 02/86] Revert "sched/core: Make sched_dynamic_mutex static" Ankur Arora
2023-11-07 23:04   ` Steven Rostedt
2023-11-07 21:56 ` [RFC PATCH 03/86] Revert "ftrace: Use preemption model accessors for trace header printout" Ankur Arora
2023-11-07 23:10   ` Steven Rostedt
2023-11-07 23:23     ` Ankur Arora
2023-11-07 23:31       ` Steven Rostedt
2023-11-07 23:34         ` Steven Rostedt
2023-11-08  0:12           ` Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 04/86] Revert "preempt/dynamic: Introduce preemption model accessors" Ankur Arora
2023-11-07 23:12   ` Steven Rostedt
2023-11-08  4:59     ` Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 05/86] Revert "kcsan: Use " Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 06/86] Revert "entry: Fix compile error in dynamic_irqentry_exit_cond_resched()" Ankur Arora
2023-11-08  7:47   ` Greg KH
2023-11-08  9:09     ` Ankur Arora
2023-11-08 10:00       ` Greg KH
2023-11-07 21:56 ` [RFC PATCH 07/86] Revert "livepatch,sched: Add livepatch task switching to cond_resched()" Ankur Arora
2023-11-07 23:16   ` Steven Rostedt
2023-11-08  4:55     ` Ankur Arora
2023-11-09 17:26     ` Josh Poimboeuf
2023-11-09 17:31       ` Steven Rostedt
2023-11-09 17:51         ` Josh Poimboeuf
2023-11-09 22:50           ` Ankur Arora
2023-11-09 23:47             ` Josh Poimboeuf
2023-11-10  0:46               ` Ankur Arora
2023-11-10  0:56           ` Steven Rostedt
2023-11-07 21:56 ` [RFC PATCH 08/86] Revert "arm64: Support PREEMPT_DYNAMIC" Ankur Arora
2023-11-07 23:17   ` Steven Rostedt
2023-11-08 15:44   ` Mark Rutland
2023-11-07 21:56 ` [RFC PATCH 09/86] Revert "sched/preempt: Add PREEMPT_DYNAMIC using static keys" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 10/86] Revert "sched/preempt: Decouple HAVE_PREEMPT_DYNAMIC from GENERIC_ENTRY" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 11/86] Revert "sched/preempt: Simplify irqentry_exit_cond_resched() callers" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 12/86] Revert "sched/preempt: Refactor sched_dynamic_update()" Ankur Arora
2023-11-07 21:56 ` [RFC PATCH 13/86] Revert "sched/preempt: Move PREEMPT_DYNAMIC logic later" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 14/86] Revert "preempt/dynamic: Fix setup_preempt_mode() return value" Ankur Arora
2023-11-07 23:20   ` Steven Rostedt
2023-11-07 21:57 ` [RFC PATCH 15/86] Revert "preempt: Restore preemption model selection configs" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 16/86] Revert "sched: Provide Kconfig support for default dynamic preempt mode" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 17/86] sched/preempt: remove PREEMPT_DYNAMIC from the build version Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 18/86] Revert "preempt/dynamic: Fix typo in macro conditional statement" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 19/86] Revert "sched,preempt: Move preempt_dynamic to debug.c" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 20/86] Revert "static_call: Relax static_call_update() function argument type" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 21/86] Revert "sched/core: Use -EINVAL in sched_dynamic_mode()" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 22/86] Revert "sched/core: Stop using magic values " Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 23/86] Revert "sched,x86: Allow !PREEMPT_DYNAMIC" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 24/86] Revert "sched: Harden PREEMPT_DYNAMIC" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 25/86] Revert "sched: Add /debug/sched_preempt" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 26/86] Revert "preempt/dynamic: Support dynamic preempt with preempt= boot option" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 27/86] Revert "preempt/dynamic: Provide irqentry_exit_cond_resched() static call" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 28/86] Revert "preempt/dynamic: Provide preempt_schedule[_notrace]() static calls" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 29/86] Revert "preempt/dynamic: Provide cond_resched() and might_resched() " Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 30/86] Revert "preempt: Introduce CONFIG_PREEMPT_DYNAMIC" Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 31/86] x86/thread_info: add TIF_NEED_RESCHED_LAZY Ankur Arora
2023-11-07 23:26   ` Steven Rostedt
2023-11-07 21:57 ` [RFC PATCH 32/86] entry: handle TIF_NEED_RESCHED_LAZY Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 33/86] entry/kvm: " Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 34/86] thread_info: accessors for TIF_NEED_RESCHED* Ankur Arora
2023-11-08  8:58   ` Peter Zijlstra
2023-11-21  5:59     ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 35/86] thread_info: change to tif_need_resched(resched_t) Ankur Arora
2023-11-08  9:00   ` Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 36/86] entry: irqentry_exit only preempts TIF_NEED_RESCHED Ankur Arora
2023-11-08  9:01   ` Peter Zijlstra
2023-11-21  6:00     ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 37/86] sched: make test_*_tsk_thread_flag() return bool Ankur Arora
2023-11-08  9:02   ` Peter Zijlstra
2023-11-07 21:57 ` Ankur Arora [this message]
2023-11-08  9:03   ` [RFC PATCH 38/86] sched: *_tsk_need_resched() now takes resched_t Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 39/86] sched: handle lazy resched in set_nr_*_polling() Ankur Arora
2023-11-08  9:15   ` Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 40/86] context_tracking: add ct_state_cpu() Ankur Arora
2023-11-08  9:16   ` Peter Zijlstra
2023-11-21  6:32     ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 41/86] sched: handle resched policy in resched_curr() Ankur Arora
2023-11-08  9:36   ` Peter Zijlstra
2023-11-08 10:26     ` Ankur Arora
2023-11-08 10:46       ` Peter Zijlstra
2023-11-21  6:34         ` Ankur Arora
2023-11-21  6:31       ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 42/86] sched: force preemption on tick expiration Ankur Arora
2023-11-08  9:56   ` Peter Zijlstra
2023-11-21  6:44     ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 43/86] sched: enable PREEMPT_COUNT, PREEMPTION for all preemption models Ankur Arora
2023-11-08  9:58   ` Peter Zijlstra
2023-11-07 21:57 ` [RFC PATCH 44/86] sched: voluntary preemption Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 45/86] preempt: ARCH_NO_PREEMPT only preempts lazily Ankur Arora
2023-11-08  0:07   ` Steven Rostedt
2023-11-08  8:47     ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 46/86] tracing: handle lazy resched Ankur Arora
2023-11-08  0:19   ` Steven Rostedt
2023-11-08  9:24     ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 47/86] rcu: select PREEMPT_RCU if PREEMPT Ankur Arora
2023-11-08  0:27   ` Steven Rostedt
2023-11-21  0:28     ` Paul E. McKenney
2023-11-21  3:43       ` Steven Rostedt
2023-11-21  5:04         ` Paul E. McKenney
2023-11-21  5:39           ` Ankur Arora
2023-11-21 15:00           ` Steven Rostedt
2023-11-21 15:19             ` Paul E. McKenney
2023-11-28 10:53               ` Thomas Gleixner
2023-11-28 18:30                 ` Ankur Arora
2023-12-05  1:03                   ` Paul E. McKenney
2023-12-05  1:01                 ` Paul E. McKenney
2023-12-05 15:01                   ` Steven Rostedt
2023-12-05 19:38                     ` Paul E. McKenney
2023-12-05 20:18                       ` Ankur Arora
2023-12-06  4:07                         ` Paul E. McKenney
2023-12-07  1:33                           ` Ankur Arora
2023-12-05 20:45                       ` Steven Rostedt
2023-12-06 10:08                         ` David Laight
2023-12-07  4:34                         ` Paul E. McKenney
2023-12-07 13:44                           ` Steven Rostedt
2023-12-08  4:28                             ` Paul E. McKenney
2023-11-08 12:15   ` Julian Anastasov
2023-11-07 21:57 ` [RFC PATCH 48/86] rcu: handle quiescent states for PREEMPT_RCU=n Ankur Arora
2023-11-21  0:38   ` Paul E. McKenney
2023-11-21  3:26     ` Ankur Arora
2023-11-21  5:17       ` Paul E. McKenney
2023-11-21  5:34         ` Paul E. McKenney
2023-11-21  6:13           ` Z qiang
2023-11-21 15:32             ` Paul E. McKenney
2023-11-21 19:25           ` Paul E. McKenney
2023-11-21 20:30             ` Peter Zijlstra
2023-11-21 21:14               ` Paul E. McKenney
2023-11-21 21:38                 ` Steven Rostedt
2023-11-21 22:26                   ` Paul E. McKenney
2023-11-21 22:52                     ` Steven Rostedt
2023-11-22  0:01                       ` Paul E. McKenney
2023-11-22  0:12                         ` Steven Rostedt
2023-11-22  1:09                           ` Paul E. McKenney
2023-11-28 17:04     ` Thomas Gleixner
2023-12-05  1:33       ` Paul E. McKenney
2023-12-06 15:10         ` Thomas Gleixner
2023-12-07  4:17           ` Paul E. McKenney
2023-12-07  1:31         ` Ankur Arora
2023-12-07  2:10           ` Steven Rostedt
2023-12-07  4:37             ` Paul E. McKenney
2023-12-07 14:22           ` Thomas Gleixner
2023-11-21  3:55   ` Z qiang
2023-11-07 21:57 ` [RFC PATCH 49/86] osnoise: handle quiescent states directly Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 50/86] rcu: TASKS_RCU does not need to depend on PREEMPTION Ankur Arora
2023-11-21  0:38   ` Paul E. McKenney
2023-11-07 21:57 ` [RFC PATCH 51/86] preempt: disallow !PREEMPT_COUNT or !PREEMPTION Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 52/86] sched: remove CONFIG_PREEMPTION from *_needbreak() Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 53/86] sched: fixup __cond_resched_*() Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 54/86] sched: add cond_resched_stall() Ankur Arora
2023-11-09 11:19   ` Thomas Gleixner
2023-11-09 22:27     ` Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 55/86] xarray: add cond_resched_xas_rcu() and cond_resched_xas_lock_irq() Ankur Arora
2023-11-07 21:57 ` [RFC PATCH 56/86] xarray: use cond_resched_xas*() Ankur Arora
2023-11-07 23:01 ` [RFC PATCH 00/86] Make the kernel preemptible Steven Rostedt
2023-11-07 23:43   ` Ankur Arora
2023-11-08  0:00     ` Steven Rostedt
2023-11-07 23:07 ` [RFC PATCH 57/86] coccinelle: script to remove cond_resched() Ankur Arora
2023-11-07 23:07   ` [RFC PATCH 58/86] treewide: x86: " Ankur Arora
2023-11-07 23:07   ` [RFC PATCH 59/86] treewide: rcu: " Ankur Arora
2023-11-21  1:01     ` Paul E. McKenney
2023-11-07 23:07   ` [RFC PATCH 60/86] treewide: torture: " Ankur Arora
2023-11-21  1:02     ` Paul E. McKenney
2023-11-07 23:07   ` [RFC PATCH 61/86] treewide: bpf: " Ankur Arora
2023-11-07 23:07   ` [RFC PATCH 62/86] treewide: trace: " Ankur Arora
2023-11-07 23:07   ` [RFC PATCH 63/86] treewide: futex: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 64/86] treewide: printk: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 65/86] treewide: task_work: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 66/86] treewide: kernel: " Ankur Arora
2023-11-17 18:14     ` Luis Chamberlain
2023-11-17 19:51       ` Steven Rostedt
2023-11-07 23:08   ` [RFC PATCH 67/86] treewide: kernel: remove cond_reshed() Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 68/86] treewide: mm: remove cond_resched() Ankur Arora
2023-11-08  1:28     ` Sergey Senozhatsky
2023-11-08  7:49       ` Vlastimil Babka
2023-11-08  8:02         ` Yosry Ahmed
2023-11-08  8:54           ` Ankur Arora
2023-11-08 12:58             ` Matthew Wilcox
2023-11-08 14:50               ` Steven Rostedt
2023-11-07 23:08   ` [RFC PATCH 69/86] treewide: io_uring: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 70/86] treewide: ipc: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 71/86] treewide: lib: " Ankur Arora
2023-11-08  9:15     ` Herbert Xu
2023-11-08 15:08       ` Steven Rostedt
2023-11-09  4:19         ` Herbert Xu
2023-11-09  4:43           ` Steven Rostedt
2023-11-08 19:15     ` Kees Cook
2023-11-08 19:41       ` Steven Rostedt
2023-11-08 22:16         ` Kees Cook
2023-11-08 22:21           ` Steven Rostedt
2023-11-09  9:39         ` David Laight
2023-11-07 23:08   ` [RFC PATCH 72/86] treewide: crypto: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 73/86] treewide: security: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 74/86] treewide: fs: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 75/86] treewide: virt: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 76/86] treewide: block: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 77/86] treewide: netfilter: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 78/86] treewide: net: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 79/86] " Ankur Arora
2023-11-08 12:16     ` Eric Dumazet
2023-11-08 17:11       ` Steven Rostedt
2023-11-08 20:59         ` Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 80/86] treewide: sound: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 81/86] treewide: md: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 82/86] treewide: mtd: " Ankur Arora
2023-11-08 16:28     ` Miquel Raynal
2023-11-08 16:32       ` Matthew Wilcox
2023-11-08 17:21         ` Steven Rostedt
2023-11-09  8:38           ` Miquel Raynal
2023-11-07 23:08   ` [RFC PATCH 83/86] treewide: drm: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 84/86] treewide: net: " Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 85/86] treewide: drivers: " Ankur Arora
2023-11-08  0:48     ` Chris Packham
2023-11-09  0:55       ` Ankur Arora
2023-11-09 23:25     ` Dmitry Torokhov
2023-11-09 23:41       ` Steven Rostedt
2023-11-10  0:01       ` Ankur Arora
2023-11-07 23:08   ` [RFC PATCH 86/86] sched: " Ankur Arora
2023-11-07 23:19   ` [RFC PATCH 57/86] coccinelle: script to " Julia Lawall
2023-11-08  8:29     ` Ankur Arora
2023-11-08  9:49       ` Julia Lawall
2023-11-21  0:45   ` Paul E. McKenney
2023-11-21  5:16     ` Ankur Arora
2023-11-21 15:26       ` Paul E. McKenney
2023-11-08  4:08 ` [RFC PATCH 00/86] Make the kernel preemptible Christoph Lameter
2023-11-08  4:33   ` Ankur Arora
2023-11-08  4:52     ` Christoph Lameter
2023-11-08  5:12       ` Steven Rostedt
2023-11-08  6:49         ` Ankur Arora
2023-11-08  7:54         ` Vlastimil Babka
2023-11-08  7:31 ` Juergen Gross
2023-11-08  8:51 ` Peter Zijlstra
2023-11-08  9:53   ` Daniel Bristot de Oliveira
2023-11-08 10:04   ` Ankur Arora
2023-11-08 10:13     ` Peter Zijlstra
2023-11-08 11:00       ` Ankur Arora
2023-11-08 11:14         ` Peter Zijlstra
2023-11-08 12:16           ` Peter Zijlstra
2023-11-08 15:38       ` Thomas Gleixner
2023-11-08 16:15         ` Peter Zijlstra
2023-11-08 16:22         ` Steven Rostedt
2023-11-08 16:49           ` Peter Zijlstra
2023-11-08 17:18             ` Steven Rostedt
2023-11-08 20:46             ` Ankur Arora
2023-11-08 20:26         ` Ankur Arora
2023-11-08  9:43 ` David Laight
2023-11-08 15:15   ` Steven Rostedt
2023-11-08 16:29     ` David Laight
2023-11-08 16:33 ` Mark Rutland
2023-11-09  0:34   ` Ankur Arora
2023-11-09 11:00     ` Mark Rutland
2023-11-09 22:36       ` Ankur Arora

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231107215742.363031-39-ankur.a.arora@oracle.com \
    --to=ankur.a.arora@oracle.com \
    --cc=David.Laight@ACULAB.COM \
    --cc=akpm@linux-foundation.org \
    --cc=andrew.cooper3@citrix.com \
    --cc=anton.ivanov@cambridgegreys.com \
    --cc=bharata@amd.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=bp@alien8.de \
    --cc=bristot@kernel.org \
    --cc=dave.hansen@linux.intel.com \
    --cc=geert@linux-m68k.org \
    --cc=glaubitz@physik.fu-berlin.de \
    --cc=hpa@zytor.com \
    --cc=jgross@suse.com \
    --cc=jon.grimm@amd.com \
    --cc=juri.lelli@redhat.com \
    --cc=konrad.wilk@oracle.com \
    --cc=krypton@ulrich-teichert.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mattst88@gmail.com \
    --cc=mgorman@suse.de \
    --cc=mingo@kernel.org \
    --cc=mingo@redhat.com \
    --cc=mjguzik@gmail.com \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=raghavendra.kt@amd.com \
    --cc=richard@nod.at \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=vincent.guittot@linaro.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).