All of lore.kernel.org
 help / color / mirror / Atom feed
From: paulmck@kernel.org
To: rcu@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, kernel-team@fb.com,
	mingo@kernel.org, jiangshanlai@gmail.com, dipankar@in.ibm.com,
	akpm@linux-foundation.org, mathieu.desnoyers@efficios.com,
	josh@joshtriplett.org, tglx@linutronix.de, peterz@infradead.org,
	rostedt@goodmis.org, dhowells@redhat.com, edumazet@google.com,
	fweisbec@gmail.com, oleg@redhat.com, joel@joelfernandes.org,
	"Paul E. McKenney" <paulmck@kernel.org>
Subject: [PATCH v3 tip/core/rcu 08/34] rcu-tasks: Refactor RCU-tasks to allow variants to be added
Date: Fri, 27 Mar 2020 15:24:30 -0700	[thread overview]
Message-ID: <20200327222456.12470-8-paulmck@kernel.org> (raw)
In-Reply-To: <20200327222346.GA12082@paulmck-ThinkPad-P72>

From: "Paul E. McKenney" <paulmck@kernel.org>

This commit splits out generic processing from RCU-tasks-specific
processing in order to allow additional flavors to be added.  It also
adds a def_bool TASKS_RCU_GENERIC to enable the common RCU-tasks
infrastructure code.

This is primarily, but not entirely, a code-movement commit.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 include/linux/rcupdate.h |   6 +-
 kernel/rcu/Kconfig       |  10 +-
 kernel/rcu/tasks.h       | 491 +++++++++++++++++++++++++----------------------
 kernel/rcu/update.c      |   4 +
 4 files changed, 272 insertions(+), 239 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2678a37..5523145 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -129,7 +129,7 @@ static inline void rcu_init_nohz(void) { }
  * Note a quasi-voluntary context switch for RCU-tasks's benefit.
  * This is a macro rather than an inline function to avoid #include hell.
  */
-#ifdef CONFIG_TASKS_RCU
+#ifdef CONFIG_TASKS_RCU_GENERIC
 #define rcu_tasks_qs(t) \
 	do { \
 		if (READ_ONCE((t)->rcu_tasks_holdout)) \
@@ -140,14 +140,14 @@ void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 void synchronize_rcu_tasks(void);
 void exit_tasks_rcu_start(void);
 void exit_tasks_rcu_finish(void);
-#else /* #ifdef CONFIG_TASKS_RCU */
+#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
 #define rcu_tasks_qs(t)	do { } while (0)
 #define rcu_note_voluntary_context_switch(t) do { } while (0)
 #define call_rcu_tasks call_rcu
 #define synchronize_rcu_tasks synchronize_rcu
 static inline void exit_tasks_rcu_start(void) { }
 static inline void exit_tasks_rcu_finish(void) { }
-#endif /* #else #ifdef CONFIG_TASKS_RCU */
+#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
 
 /**
  * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index 1cc940f..38475d0 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -70,13 +70,19 @@ config TREE_SRCU
 	help
 	  This option selects the full-fledged version of SRCU.
 
+config TASKS_RCU_GENERIC
+	def_bool TASKS_RCU
+	select SRCU
+	help
+	  This option enables generic infrastructure code supporting
+	  task-based RCU implementations.  Not for manual selection.
+
 config TASKS_RCU
 	def_bool PREEMPTION
-	select SRCU
 	help
 	  This option enables a task-based RCU implementation that uses
 	  only voluntary context switch (not preemption!), idle, and
-	  user-mode execution as quiescent states.
+	  user-mode execution as quiescent states.  Not for manual selection.
 
 config RCU_STALL_COMMON
 	def_bool TREE_RCU
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 5ccfe0d..d77921e 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -5,7 +5,13 @@
  * Copyright (C) 2020 Paul E. McKenney
  */
 
-#ifdef CONFIG_TASKS_RCU
+
+////////////////////////////////////////////////////////////////////////
+//
+// Generic data structures.
+
+struct rcu_tasks;
+typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
 
 /**
  * Definition for a Tasks-RCU-like mechanism.
@@ -14,6 +20,8 @@
  * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
  * @cbs_lock: Lock protecting callback list.
  * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
+ * @gp_func: This flavor's grace-period-wait function.
+ * @call_func: This flavor's call_rcu()-equivalent function.
  */
 struct rcu_tasks {
 	struct rcu_head *cbs_head;
@@ -21,29 +29,20 @@ struct rcu_tasks {
 	struct wait_queue_head cbs_wq;
 	raw_spinlock_t cbs_lock;
 	struct task_struct *kthread_ptr;
+	rcu_tasks_gp_func_t gp_func;
+	call_rcu_func_t call_func;
 };
 
-#define DEFINE_RCU_TASKS(name)						\
+#define DEFINE_RCU_TASKS(name, gp, call)				\
 static struct rcu_tasks name =						\
 {									\
 	.cbs_tail = &name.cbs_head,					\
 	.cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(name.cbs_wq),		\
 	.cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(name.cbs_lock),		\
+	.gp_func = gp,							\
+	.call_func = call,						\
 }
 
-/*
- * Simple variant of RCU whose quiescent states are voluntary context
- * switch, cond_resched_rcu_qs(), user-space execution, and idle.
- * As such, grace periods can take one good long time.  There are no
- * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
- * because this implementation is intended to get the system into a safe
- * state for some of the manipulations involved in tracing and the like.
- * Finally, this implementation does not support high call_rcu_tasks()
- * rates from multiple CPUs.  If this is required, per-CPU callback lists
- * will be needed.
- */
-DEFINE_RCU_TASKS(rcu_tasks);
-
 /* Track exiting tasks in order to allow them to be waited for. */
 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
 
@@ -52,29 +51,16 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
 module_param(rcu_task_stall_timeout, int, 0644);
 
-/**
- * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
- * @rhp: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_tasks() assumes
- * that the read-side critical sections end at a voluntary context
- * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
- * or transition to usermode execution.  As such, there are no read-side
- * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
- * this primitive is intended to determine that all tasks have passed
- * through a safe state, not so much for data-strcuture synchronization.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
-void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
+////////////////////////////////////////////////////////////////////////
+//
+// Generic code.
+
+// Enqueue a callback for the specified flavor of Tasks RCU.
+static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
+				   struct rcu_tasks *rtp)
 {
 	unsigned long flags;
 	bool needwake;
-	struct rcu_tasks *rtp = &rcu_tasks;
 
 	rhp->next = NULL;
 	rhp->func = func;
@@ -87,108 +73,25 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
 	if (needwake && READ_ONCE(rtp->kthread_ptr))
 		wake_up(&rtp->cbs_wq);
 }
-EXPORT_SYMBOL_GPL(call_rcu_tasks);
 
-/**
- * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
- *
- * Control will return to the caller some time after a full rcu-tasks
- * grace period has elapsed, in other words after all currently
- * executing rcu-tasks read-side critical sections have elapsed.  These
- * read-side critical sections are delimited by calls to schedule(),
- * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
- * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
- *
- * This is a very specialized primitive, intended only for a few uses in
- * tracing and other situations requiring manipulation of function
- * preambles and profiling hooks.  The synchronize_rcu_tasks() function
- * is not (yet) intended for heavy use from multiple CPUs.
- *
- * Note that this guarantee implies further memory-ordering guarantees.
- * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
- * each CPU is guaranteed to have executed a full memory barrier since the
- * end of its last RCU-tasks read-side critical section whose beginning
- * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
- * having an RCU-tasks read-side critical section that extends beyond
- * the return from synchronize_rcu_tasks() is guaranteed to have executed
- * a full memory barrier after the beginning of synchronize_rcu_tasks()
- * and before the beginning of that RCU-tasks read-side critical section.
- * Note that these guarantees include CPUs that are offline, idle, or
- * executing in user mode, as well as CPUs that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
- * to its caller on CPU B, then both CPU A and CPU B are guaranteed
- * to have executed a full memory barrier during the execution of
- * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
- * (but again only if the system has more than one CPU).
- */
-void synchronize_rcu_tasks(void)
+// Wait for a grace period for the specified flavor of Tasks RCU.
+static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
 {
 	/* Complain if the scheduler has not started.  */
 	RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
 			 "synchronize_rcu_tasks called too soon");
 
 	/* Wait for the grace period. */
-	wait_rcu_gp(call_rcu_tasks);
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
-
-/**
- * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
- *
- * Although the current implementation is guaranteed to wait, it is not
- * obligated to, for example, if there are no pending callbacks.
- */
-void rcu_barrier_tasks(void)
-{
-	/* There is only one callback queue, so this is easy.  ;-) */
-	synchronize_rcu_tasks();
-}
-EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
-
-/* See if tasks are still holding out, complain if so. */
-static void check_holdout_task(struct task_struct *t,
-			       bool needreport, bool *firstreport)
-{
-	int cpu;
-
-	if (!READ_ONCE(t->rcu_tasks_holdout) ||
-	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
-	    !READ_ONCE(t->on_rq) ||
-	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
-	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
-		WRITE_ONCE(t->rcu_tasks_holdout, false);
-		list_del_init(&t->rcu_tasks_holdout_list);
-		put_task_struct(t);
-		return;
-	}
-	rcu_request_urgent_qs_task(t);
-	if (!needreport)
-		return;
-	if (*firstreport) {
-		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
-		*firstreport = false;
-	}
-	cpu = task_cpu(t);
-	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
-		 t, ".I"[is_idle_task(t)],
-		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
-		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
-		 t->rcu_tasks_idle_cpu, cpu);
-	sched_show_task(t);
+	wait_rcu_gp(rtp->call_func);
 }
 
 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
 static int __noreturn rcu_tasks_kthread(void *arg)
 {
 	unsigned long flags;
-	struct task_struct *g, *t;
-	unsigned long lastreport;
 	struct rcu_head *list;
 	struct rcu_head *next;
-	LIST_HEAD(rcu_tasks_holdouts);
 	struct rcu_tasks *rtp = arg;
-	int fract;
 
 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
 	housekeeping_affine(current, HK_FLAG_RCU);
@@ -220,111 +123,8 @@ static int __noreturn rcu_tasks_kthread(void *arg)
 			continue;
 		}
 
-		/*
-		 * Wait for all pre-existing t->on_rq and t->nvcsw
-		 * transitions to complete.  Invoking synchronize_rcu()
-		 * suffices because all these transitions occur with
-		 * interrupts disabled.  Without this synchronize_rcu(),
-		 * a read-side critical section that started before the
-		 * grace period might be incorrectly seen as having started
-		 * after the grace period.
-		 *
-		 * This synchronize_rcu() also dispenses with the
-		 * need for a memory barrier on the first store to
-		 * t->rcu_tasks_holdout, as it forces the store to happen
-		 * after the beginning of the grace period.
-		 */
-		synchronize_rcu();
-
-		/*
-		 * There were callbacks, so we need to wait for an
-		 * RCU-tasks grace period.  Start off by scanning
-		 * the task list for tasks that are not already
-		 * voluntarily blocked.  Mark these tasks and make
-		 * a list of them in rcu_tasks_holdouts.
-		 */
-		rcu_read_lock();
-		for_each_process_thread(g, t) {
-			if (t != current && READ_ONCE(t->on_rq) &&
-			    !is_idle_task(t)) {
-				get_task_struct(t);
-				t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
-				WRITE_ONCE(t->rcu_tasks_holdout, true);
-				list_add(&t->rcu_tasks_holdout_list,
-					 &rcu_tasks_holdouts);
-			}
-		}
-		rcu_read_unlock();
-
-		/*
-		 * Wait for tasks that are in the process of exiting.
-		 * This does only part of the job, ensuring that all
-		 * tasks that were previously exiting reach the point
-		 * where they have disabled preemption, allowing the
-		 * later synchronize_rcu() to finish the job.
-		 */
-		synchronize_srcu(&tasks_rcu_exit_srcu);
-
-		/*
-		 * Each pass through the following loop scans the list
-		 * of holdout tasks, removing any that are no longer
-		 * holdouts.  When the list is empty, we are done.
-		 */
-		lastreport = jiffies;
-
-		/* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
-		fract = 10;
-
-		for (;;) {
-			bool firstreport;
-			bool needreport;
-			int rtst;
-			struct task_struct *t1;
-
-			if (list_empty(&rcu_tasks_holdouts))
-				break;
-
-			/* Slowly back off waiting for holdouts */
-			schedule_timeout_interruptible(HZ/fract);
-
-			if (fract > 1)
-				fract--;
-
-			rtst = READ_ONCE(rcu_task_stall_timeout);
-			needreport = rtst > 0 &&
-				     time_after(jiffies, lastreport + rtst);
-			if (needreport)
-				lastreport = jiffies;
-			firstreport = true;
-			WARN_ON(signal_pending(current));
-			list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
-						 rcu_tasks_holdout_list) {
-				check_holdout_task(t, needreport, &firstreport);
-				cond_resched();
-			}
-		}
-
-		/*
-		 * Because ->on_rq and ->nvcsw are not guaranteed
-		 * to have a full memory barriers prior to them in the
-		 * schedule() path, memory reordering on other CPUs could
-		 * cause their RCU-tasks read-side critical sections to
-		 * extend past the end of the grace period.  However,
-		 * because these ->nvcsw updates are carried out with
-		 * interrupts disabled, we can use synchronize_rcu()
-		 * to force the needed ordering on all such CPUs.
-		 *
-		 * This synchronize_rcu() also confines all
-		 * ->rcu_tasks_holdout accesses to be within the grace
-		 * period, avoiding the need for memory barriers for
-		 * ->rcu_tasks_holdout accesses.
-		 *
-		 * In addition, this synchronize_rcu() waits for exiting
-		 * tasks to complete their final preempt_disable() region
-		 * of execution, cleaning up after the synchronize_srcu()
-		 * above.
-		 */
-		synchronize_rcu();
+		// Wait for one grace period.
+		rtp->gp_func(rtp);
 
 		/* Invoke the callbacks. */
 		while (list) {
@@ -340,18 +140,16 @@ static int __noreturn rcu_tasks_kthread(void *arg)
 	}
 }
 
-/* Spawn rcu_tasks_kthread() at core_initcall() time. */
-static int __init rcu_spawn_tasks_kthread(void)
+/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
+static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
 {
 	struct task_struct *t;
 
-	t = kthread_run(rcu_tasks_kthread, &rcu_tasks, "rcu_tasks_kthread");
+	t = kthread_run(rcu_tasks_kthread, rtp, "rcu_tasks_kthread");
 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
-		return 0;
+		return;
 	smp_mb(); /* Ensure others see full kthread. */
-	return 0;
 }
-core_initcall(rcu_spawn_tasks_kthread);
 
 /* Do the srcu_read_lock() for the above synchronize_srcu().  */
 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
@@ -369,8 +167,6 @@ void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
 	preempt_enable();
 }
 
-#endif /* #ifdef CONFIG_TASKS_RCU */
-
 #ifndef CONFIG_TINY_RCU
 
 /*
@@ -387,3 +183,230 @@ static void __init rcu_tasks_bootup_oddness(void)
 }
 
 #endif /* #ifndef CONFIG_TINY_RCU */
+
+#ifdef CONFIG_TASKS_RCU
+
+////////////////////////////////////////////////////////////////////////
+//
+// Simple variant of RCU whose quiescent states are voluntary context
+// switch, cond_resched_rcu_qs(), user-space execution, and idle.
+// As such, grace periods can take one good long time.  There are no
+// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
+// because this implementation is intended to get the system into a safe
+// state for some of the manipulations involved in tracing and the like.
+// Finally, this implementation does not support high call_rcu_tasks()
+// rates from multiple CPUs.  If this is required, per-CPU callback lists
+// will be needed.
+
+/* See if tasks are still holding out, complain if so. */
+static void check_holdout_task(struct task_struct *t,
+			       bool needreport, bool *firstreport)
+{
+	int cpu;
+
+	if (!READ_ONCE(t->rcu_tasks_holdout) ||
+	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
+	    !READ_ONCE(t->on_rq) ||
+	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
+	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
+		WRITE_ONCE(t->rcu_tasks_holdout, false);
+		list_del_init(&t->rcu_tasks_holdout_list);
+		put_task_struct(t);
+		return;
+	}
+	rcu_request_urgent_qs_task(t);
+	if (!needreport)
+		return;
+	if (*firstreport) {
+		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
+		*firstreport = false;
+	}
+	cpu = task_cpu(t);
+	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
+		 t, ".I"[is_idle_task(t)],
+		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
+		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
+		 t->rcu_tasks_idle_cpu, cpu);
+	sched_show_task(t);
+}
+
+/* Wait for one RCU-tasks grace period. */
+static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
+{
+	struct task_struct *g, *t;
+	unsigned long lastreport;
+	LIST_HEAD(rcu_tasks_holdouts);
+	int fract;
+
+	/*
+	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
+	 * to complete.  Invoking synchronize_rcu() suffices because all
+	 * these transitions occur with interrupts disabled.  Without this
+	 * synchronize_rcu(), a read-side critical section that started
+	 * before the grace period might be incorrectly seen as having
+	 * started after the grace period.
+	 *
+	 * This synchronize_rcu() also dispenses with the need for a
+	 * memory barrier on the first store to t->rcu_tasks_holdout,
+	 * as it forces the store to happen after the beginning of the
+	 * grace period.
+	 */
+	synchronize_rcu();
+
+	/*
+	 * There were callbacks, so we need to wait for an RCU-tasks
+	 * grace period.  Start off by scanning the task list for tasks
+	 * that are not already voluntarily blocked.  Mark these tasks
+	 * and make a list of them in rcu_tasks_holdouts.
+	 */
+	rcu_read_lock();
+	for_each_process_thread(g, t) {
+		if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
+			get_task_struct(t);
+			t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
+			WRITE_ONCE(t->rcu_tasks_holdout, true);
+			list_add(&t->rcu_tasks_holdout_list,
+				 &rcu_tasks_holdouts);
+		}
+	}
+	rcu_read_unlock();
+
+	/*
+	 * Wait for tasks that are in the process of exiting.  This
+	 * does only part of the job, ensuring that all tasks that were
+	 * previously exiting reach the point where they have disabled
+	 * preemption, allowing the later synchronize_rcu() to finish
+	 * the job.
+	 */
+	synchronize_srcu(&tasks_rcu_exit_srcu);
+
+	/*
+	 * Each pass through the following loop scans the list of holdout
+	 * tasks, removing any that are no longer holdouts.  When the list
+	 * is empty, we are done.
+	 */
+	lastreport = jiffies;
+
+	/* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */
+	fract = 10;
+
+	for (;;) {
+		bool firstreport;
+		bool needreport;
+		int rtst;
+		struct task_struct *t1;
+
+		if (list_empty(&rcu_tasks_holdouts))
+			break;
+
+		/* Slowly back off waiting for holdouts */
+		schedule_timeout_interruptible(HZ/fract);
+
+		if (fract > 1)
+			fract--;
+
+		rtst = READ_ONCE(rcu_task_stall_timeout);
+		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
+		if (needreport)
+			lastreport = jiffies;
+		firstreport = true;
+		WARN_ON(signal_pending(current));
+		list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
+					 rcu_tasks_holdout_list) {
+			check_holdout_task(t, needreport, &firstreport);
+			cond_resched();
+		}
+	}
+
+	/*
+	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
+	 * memory barriers prior to them in the schedule() path, memory
+	 * reordering on other CPUs could cause their RCU-tasks read-side
+	 * critical sections to extend past the end of the grace period.
+	 * However, because these ->nvcsw updates are carried out with
+	 * interrupts disabled, we can use synchronize_rcu() to force the
+	 * needed ordering on all such CPUs.
+	 *
+	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
+	 * accesses to be within the grace period, avoiding the need for
+	 * memory barriers for ->rcu_tasks_holdout accesses.
+	 *
+	 * In addition, this synchronize_rcu() waits for exiting tasks
+	 * to complete their final preempt_disable() region of execution,
+	 * cleaning up after the synchronize_srcu() above.
+	 */
+	synchronize_rcu();
+}
+
+void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
+DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks);
+
+/**
+ * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
+ * @rhp: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_tasks() assumes
+ * that the read-side critical sections end at a voluntary context
+ * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
+ * or transition to usermode execution.  As such, there are no read-side
+ * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
+ * this primitive is intended to determine that all tasks have passed
+ * through a safe state, not so much for data-strcuture synchronization.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
+{
+	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
+}
+EXPORT_SYMBOL_GPL(call_rcu_tasks);
+
+/**
+ * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full rcu-tasks
+ * grace period has elapsed, in other words after all currently
+ * executing rcu-tasks read-side critical sections have elapsed.  These
+ * read-side critical sections are delimited by calls to schedule(),
+ * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
+ * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
+ *
+ * This is a very specialized primitive, intended only for a few uses in
+ * tracing and other situations requiring manipulation of function
+ * preambles and profiling hooks.  The synchronize_rcu_tasks() function
+ * is not (yet) intended for heavy use from multiple CPUs.
+ *
+ * See the description of synchronize_rcu() for more detailed information
+ * on memory ordering guarantees.
+ */
+void synchronize_rcu_tasks(void)
+{
+	synchronize_rcu_tasks_generic(&rcu_tasks);
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
+
+/**
+ * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
+ *
+ * Although the current implementation is guaranteed to wait, it is not
+ * obligated to, for example, if there are no pending callbacks.
+ */
+void rcu_barrier_tasks(void)
+{
+	/* There is only one callback queue, so this is easy.  ;-) */
+	synchronize_rcu_tasks();
+}
+EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
+
+static int __init rcu_spawn_tasks_kthread(void)
+{
+	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
+	return 0;
+}
+core_initcall(rcu_spawn_tasks_kthread);
+
+#endif /* #ifdef CONFIG_TASKS_RCU */
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 0fb2a9e..16058a5 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -559,7 +559,11 @@ late_initcall(rcu_verify_early_boot_tests);
 void rcu_early_boot_tests(void) {}
 #endif /* CONFIG_PROVE_RCU */
 
+#ifdef CONFIG_TASKS_RCU_GENERIC
 #include "tasks.h"
+#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+static inline void rcu_tasks_bootup_oddness(void) {}
+#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
 
 #ifndef CONFIG_TINY_RCU
 
-- 
2.9.5


  parent reply	other threads:[~2020-03-27 22:26 UTC|newest]

Thread overview: 171+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-12 18:16 [PATCH RFC tip/core/rcu 0/16] Prototype RCU usable from idle, exception, offline Paul E. McKenney
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 01/16] sched/core: Add function to sample state of non-running function paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 02/16] rcu: Add per-task state to RCU CPU stall warnings paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 03/16] rcutorture: Add flag to produce non-busy-wait task stalls paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 04/16] rcu-tasks: Move Tasks RCU to its own file paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 05/16] rcu-tasks: Create struct to hold state information paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 06/16] rcu: Reinstate synchronize_rcu_mult() paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 07/16] rcutorture: Add a test for synchronize_rcu_mult() paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 08/16] rcu-tasks: Refactor RCU-tasks to allow variants to be added paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 09/16] rcu-tasks: Add an RCU-tasks rude variant paulmck
2020-03-16 19:47   ` Joel Fernandes
2020-03-16 20:17     ` Joel Fernandes
2020-03-16 20:32       ` Paul E. McKenney
2020-03-16 21:32         ` Steven Rostedt
2020-03-16 21:45           ` Joel Fernandes
2020-03-16 22:03             ` Steven Rostedt
2020-03-16 22:11               ` Paul E. McKenney
2020-05-10  9:59               ` Lai Jiangshan
2020-05-10 15:49                 ` Paul E. McKenney
2020-05-11  0:06                   ` Lai Jiangshan
2020-05-11  2:44                     ` Paul E. McKenney
2020-05-11  0:44                 ` Masami Hiramatsu
2020-05-11 16:07                 ` Steven Rostedt
2020-05-11 19:48                 ` Steven Rostedt
2020-03-16 20:29     ` Paul E. McKenney
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 10/16] rcutorture: Add torture tests for RCU Tasks Rude paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 11/16] rcu-tasks: Use unique names for RCU-Tasks kthreads and messages paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 12/16] rcu-tasks: Further refactor RCU-tasks to allow adding more variants paulmck
2020-03-12 18:16 ` [PATCH RFC tip/core/rcu 13/16] rcu-tasks: Code movement to allow more Tasks RCU variants paulmck
2020-03-12 18:17 ` [PATCH RFC tip/core/rcu 14/16] rcu: Add an RCU Tasks Trace to simplify protection of tracing hooks paulmck
2020-03-12 18:17 ` [PATCH RFC tip/core/rcu 15/16] rcutorture: Add torture tests for RCU Tasks Trace paulmck
2020-03-12 18:17 ` [PATCH RFC tip/core/rcu 16/16] rcu-tasks: Add stall warnings " paulmck
2020-03-13 14:41 ` [PATCH RFC tip/core/rcu 0/16] Prototype RCU usable from idle, exception, offline Frederic Weisbecker
2020-03-13 15:42   ` Paul E. McKenney
2020-03-15 17:45     ` Mathieu Desnoyers
2020-03-15 17:59       ` Paul E. McKenney
2020-03-16 18:36         ` Steven Rostedt
2020-03-16 18:52           ` Paul E. McKenney
2020-03-16 14:45     ` Frederic Weisbecker
2020-03-16 15:39       ` Paul E. McKenney
2020-03-19  0:10 ` [PATCH RFC v2 tip/core/rcu 0/22] " Paul E. McKenney
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 01/22] sched/core: Add function to sample state of locked-down task paulmck
2020-03-19 17:22     ` Steven Rostedt
2020-03-19 17:35       ` Paul E. McKenney
2020-03-20  2:49         ` Paul E. McKenney
2020-03-20  3:09           ` Steven Rostedt
2020-03-20 16:27             ` Paul E. McKenney
2020-03-24  0:06           ` Joel Fernandes
2020-03-24  0:15             ` Joel Fernandes
2020-03-24 16:26               ` Paul E. McKenney
2020-03-24 15:48             ` Paul E. McKenney
2020-03-24 16:52               ` Joel Fernandes
2020-03-24 17:20                 ` Paul E. McKenney
2020-03-24 18:19                   ` Joel Fernandes
2020-03-25  0:58                     ` Paul E. McKenney
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 02/22] rcu: Add per-task state to RCU CPU stall warnings paulmck
2020-03-19 17:27     ` Steven Rostedt
2020-03-19 17:41       ` Paul E. McKenney
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 03/22] rcutorture: Add flag to produce non-busy-wait task stalls paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 04/22] rcu-tasks: Move Tasks RCU to its own file paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 05/22] rcu-tasks: Create struct to hold state information paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 06/22] rcu: Reinstate synchronize_rcu_mult() paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 07/22] rcutorture: Add a test for synchronize_rcu_mult() paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 08/22] rcu-tasks: Refactor RCU-tasks to allow variants to be added paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 09/22] rcu-tasks: Add an RCU-tasks rude variant paulmck
2020-03-19 19:04     ` Steven Rostedt
2020-03-19 23:58       ` Paul E. McKenney
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 10/22] rcutorture: Add torture tests for RCU Tasks Rude paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 11/22] rcu-tasks: Use unique names for RCU-Tasks kthreads and messages paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 12/22] rcu-tasks: Further refactor RCU-tasks to allow adding more variants paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 13/22] rcu-tasks: Code movement to allow more Tasks RCU variants paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 14/22] rcu-tasks: Add an RCU Tasks Trace to simplify protection of tracing hooks paulmck
2020-03-19  1:37     ` Joel Fernandes
2020-03-19  1:58       ` Joel Fernandes
2020-03-19  3:40         ` Paul E. McKenney
2020-03-19 19:42     ` Steven Rostedt
2020-03-20  0:28       ` Paul E. McKenney
2020-03-20  0:48         ` Steven Rostedt
2020-03-20  2:41           ` Paul E. McKenney
2020-03-28 14:06             ` Joel Fernandes
2020-03-28 15:34               ` Paul E. McKenney
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 15/22] rcutorture: Add torture tests for RCU Tasks Trace paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 16/22] rcu-tasks: Add stall warnings " paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 17/22] rcu-tasks: Move #ifdef into tasks.h paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 18/22] rcu-tasks: Add RCU tasks to rcutorture writer stall output paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 19/22] rcu-tasks: Make rcutorture writer stall output include GP state paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 20/22] rcu-tasks: Make RCU Tasks Trace make use of RCU scheduler hooks paulmck
2020-03-19  0:10   ` [PATCH RFC v2 tip/core/rcu 21/22] rcu-tasks: Add a grace-period start time for throttling and debug paulmck
2020-03-19  0:11   ` [PATCH RFC v2 tip/core/rcu 22/22] rcu-tasks: Provide boot parameter to delay IPIs until late in grace period paulmck
2020-03-19 11:31   ` [PATCH RFC v2 tip/core/rcu 0/22] Prototype RCU usable from idle, exception, offline Mathieu Desnoyers
2020-03-19 13:13     ` Paul E. McKenney
     [not found]   ` <20200319104614.11444-1-hdanton@sina.com>
2020-03-19 12:38     ` [PATCH RFC v2 tip/core/rcu 03/22] rcutorture: Add flag to produce non-busy-wait task stalls Paul E. McKenney
     [not found]     ` <20200319133947.12172-1-hdanton@sina.com>
2020-03-19 15:22       ` Paul E. McKenney
     [not found]       ` <20200320040329.9840-1-hdanton@sina.com>
2020-03-20 16:11         ` Paul E. McKenney
     [not found]   ` <20200320071228.9740-1-hdanton@sina.com>
2020-03-20 19:14     ` [PATCH RFC v2 tip/core/rcu 04/22] rcu-tasks: Move Tasks RCU to its own file Paul E. McKenney
2020-03-27 22:23   ` [PATCH RFC v3 tip/core/rcu 0/34] Prototype RCU usable from idle, exception, offline Paul E. McKenney
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 01/34] sched/core: Add function to sample state of locked-down task paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 02/34] rcu: Add per-task state to RCU CPU stall warnings paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 03/34] rcutorture: Add flag to produce non-busy-wait task stalls paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 04/34] rcu-tasks: Move Tasks RCU to its own file paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 05/34] rcu-tasks: Create struct to hold state information paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 06/34] rcu: Reinstate synchronize_rcu_mult() paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 07/34] rcutorture: Add a test for synchronize_rcu_mult() paulmck
2020-03-27 22:24     ` paulmck [this message]
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 09/34] rcu-tasks: Add an RCU-tasks rude variant paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 10/34] rcutorture: Add torture tests for RCU Tasks Rude paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 11/34] rcu-tasks: Use unique names for RCU-Tasks kthreads and messages paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 12/34] rcu-tasks: Further refactor RCU-tasks to allow adding more variants paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 13/34] rcu-tasks: Code movement to allow more Tasks RCU variants paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 14/34] rcu-tasks: Add an RCU Tasks Trace to simplify protection of tracing hooks paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 15/34] rcutorture: Add torture tests for RCU Tasks Trace paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 16/34] rcu-tasks: Add stall warnings " paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 17/34] rcu-tasks: Move #ifdef into tasks.h paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 18/34] rcu-tasks: Add RCU tasks to rcutorture writer stall output paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 19/34] rcu-tasks: Make rcutorture writer stall output include GP state paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 20/34] rcu-tasks: Make RCU Tasks Trace make use of RCU scheduler hooks paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 21/34] rcu-tasks: Add a grace-period start time for throttling and debug paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 22/34] rcu-tasks: Provide boot parameter to delay IPIs until late in grace period paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 23/34] rcu-tasks: Split ->trc_reader_need_end paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 24/34] rcu-tasks: Add grace-period and IPI counts to statistics paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 25/34] rcu-tasks: Add Kconfig option to mediate smp_mb() vs. IPI paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 26/34] rcu-tasks: Avoid IPIing userspace/idle tasks if kernel is so built paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 27/34] rcu-tasks: Allow rcu_read_unlock_trace() under scheduler locks paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 28/34] rcu-tasks: Disable CPU hotplug across RCU tasks trace scans paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 29/34] rcu-tasks: Handle the running-offline idle-task special case paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 30/34] rcu-tasks: Make RCU tasks trace also wait for idle tasks paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 31/34] rcu-tasks: Add rcu_dynticks_zero_in_eqs() effectiveness statistics paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 32/34] rcu-tasks: Add count for idle tasks on offline CPUs paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 33/34] rcutorture: Add TRACE02 scenario enabling RCU Tasks Trace IPIs paulmck
2020-03-27 22:24     ` [PATCH v3 tip/core/rcu 34/34] rcu-tasks: Add IPI failure count to statistics paulmck
2020-04-15 18:18     ` [PATCH v4 tip/core/rcu 0/38] Prototype RCU usable from idle, exception, offline Paul E. McKenney
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 01/38] rcu: Add comments marking transitions between RCU watching and not paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 02/38] rcu-tasks: Use context-switch hook for PREEMPT=y kernels paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 03/38] sched/core: Add function to sample state of locked-down task paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 04/38] rcu: Add per-task state to RCU CPU stall warnings paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 05/38] rcu-tasks: Move Tasks RCU to its own file paulmck
2020-05-10  7:42         ` Lai Jiangshan
2020-05-10 15:39           ` Paul E. McKenney
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 06/38] rcu-tasks: Create struct to hold state information paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 07/38] rcu: Reinstate synchronize_rcu_mult() paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 08/38] rcutorture: Add a test for synchronize_rcu_mult() paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 09/38] rcu-tasks: Refactor RCU-tasks to allow variants to be added paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 10/38] rcu-tasks: Add an RCU-tasks rude variant paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 11/38] rcutorture: Add torture tests for RCU Tasks Rude paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 12/38] rcu-tasks: Use unique names for RCU-Tasks kthreads and messages paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 13/38] rcu-tasks: Further refactor RCU-tasks to allow adding more variants paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 14/38] rcu-tasks: Code movement to allow more Tasks RCU variants paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 15/38] rcu-tasks: Add an RCU Tasks Trace to simplify protection of tracing hooks paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 16/38] rcutorture: Add torture tests for RCU Tasks Trace paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 17/38] rcu-tasks: Add stall warnings " paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 18/38] rcu-tasks: Move #ifdef into tasks.h paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 19/38] rcu-tasks: Add RCU tasks to rcutorture writer stall output paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 20/38] rcu-tasks: Make rcutorture writer stall output include GP state paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 21/38] rcu-tasks: Make RCU Tasks Trace make use of RCU scheduler hooks paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 22/38] rcu-tasks: Add a grace-period start time for throttling and debug paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 23/38] rcu-tasks: Provide boot parameter to delay IPIs until late in grace period paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 24/38] rcu-tasks: Split ->trc_reader_need_end paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 25/38] rcu-tasks: Add grace-period and IPI counts to statistics paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 26/38] rcu-tasks: Add Kconfig option to mediate smp_mb() vs. IPI paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 27/38] rcu-tasks: Avoid IPIing userspace/idle tasks if kernel is so built paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 28/38] rcu-tasks: Allow rcu_read_unlock_trace() under scheduler locks paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 29/38] rcu-tasks: Disable CPU hotplug across RCU tasks trace scans paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 30/38] rcu-tasks: Handle the running-offline idle-task special case paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 31/38] rcu-tasks: Make RCU tasks trace also wait for idle tasks paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 32/38] rcu-tasks: Add rcu_dynticks_zero_in_eqs() effectiveness statistics paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 33/38] rcu-tasks: Add count for idle tasks on offline CPUs paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 34/38] rcutorture: Add TRACE02 scenario enabling RCU Tasks Trace IPIs paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 35/38] rcu-tasks: Add IPI failure count to statistics paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 36/38] rcu-tasks: Allow standalone use of TASKS_{TRACE_,}RCU paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 37/38] ftrace: Use synchronize_rcu_tasks_rude() instead of ftrace_sync() paulmck
2020-04-15 18:19       ` [PATCH v4 tip/core/rcu 38/38] rcu: Don't acquire lock in NMI handler in rcu_nmi_enter_common() paulmck

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200327222456.12470-8-paulmck@kernel.org \
    --to=paulmck@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=dhowells@redhat.com \
    --cc=dipankar@in.ibm.com \
    --cc=edumazet@google.com \
    --cc=fweisbec@gmail.com \
    --cc=jiangshanlai@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=josh@joshtriplett.org \
    --cc=kernel-team@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mingo@kernel.org \
    --cc=oleg@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rcu@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.