linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
To: linux-kernel@vger.kernel.org
Cc: mingo@elte.hu, laijs@cn.fujitsu.com, dipankar@in.ibm.com,
	akpm@linux-foundation.org, mathieu.desnoyers@polymtl.ca,
	josh@joshtriplett.org, niv@us.ibm.com, tglx@linutronix.de,
	peterz@infradead.org, rostedt@goodmis.org,
	Valdis.Kletnieks@vt.edu, dhowells@redhat.com,
	eric.dumazet@gmail.com, darren@dvhart.com, patches@linaro.org,
	"Paul E. McKenney" <paul.mckenney@linaro.org>,
	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Subject: [PATCH tip/core/rcu 05/86] rcu: move TREE_RCU from softirq to kthread
Date: Sun,  1 May 2011 06:20:45 -0700	[thread overview]
Message-ID: <1304256126-26015-5-git-send-email-paulmck@linux.vnet.ibm.com> (raw)
In-Reply-To: <20110501132142.GA25494@linux.vnet.ibm.com>

From: Paul E. McKenney <paul.mckenney@linaro.org>

If RCU priority boosting is to be meaningful, callback invocation must
be boosted in addition to preempted RCU readers.  Otherwise, in presence
of CPU real-time threads, the grace period ends, but the callbacks don't
get invoked.  If the callbacks don't get invoked, the associated memory
doesn't get freed, so the system is still subject to OOM.

But it is not reasonable to priority-boost RCU_SOFTIRQ, so this commit
moves the callback invocations to a kthread, which can be boosted easily.

Also add comments and properly synchronized all accesses to
rcu_cpu_kthread_task, as suggested by Lai Jiangshan.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 Documentation/filesystems/proc.txt  |    1 -
 include/linux/interrupt.h           |    1 -
 include/trace/events/irq.h          |    3 +-
 kernel/rcutree.c                    |  340 ++++++++++++++++++++++++++++++++++-
 kernel/rcutree.h                    |    8 +
 kernel/rcutree_plugin.h             |    4 +-
 kernel/softirq.c                    |    2 +-
 tools/perf/util/trace-event-parse.c |    1 -
 8 files changed, 348 insertions(+), 12 deletions(-)

diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index b0b814d..60740e8 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -836,7 +836,6 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
  TASKLET:          0          0          0        290
    SCHED:      27035      26983      26971      26746
  HRTIMER:          0          0          0          0
-     RCU:       1678       1769       2178       2250
 
 
 1.3 IDE devices in /proc/ide
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index bea0ac7..6c12989 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -414,7 +414,6 @@ enum
 	TASKLET_SOFTIRQ,
 	SCHED_SOFTIRQ,
 	HRTIMER_SOFTIRQ,
-	RCU_SOFTIRQ,	/* Preferable RCU should always be the last softirq */
 
 	NR_SOFTIRQS
 };
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 1c09820..ae045ca 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -20,8 +20,7 @@ struct softirq_action;
 			 softirq_name(BLOCK_IOPOLL),	\
 			 softirq_name(TASKLET),		\
 			 softirq_name(SCHED),		\
-			 softirq_name(HRTIMER),		\
-			 softirq_name(RCU))
+			 softirq_name(HRTIMER))
 
 /**
  * irq_handler_entry - called immediately before the irq action handler
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 0ac1cc0..97420b6 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -47,6 +47,8 @@
 #include <linux/mutex.h>
 #include <linux/time.h>
 #include <linux/kernel_stat.h>
+#include <linux/wait.h>
+#include <linux/kthread.h>
 
 #include "rcutree.h"
 
@@ -83,6 +85,20 @@ int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
 /*
+ * Control variables for per-CPU and per-rcu_node kthreads.  These
+ * handle all flavors of RCU.
+ */
+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
+static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
+static DEFINE_PER_CPU(char, rcu_cpu_has_work);
+static char rcu_kthreads_spawnable;
+
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp);
+static void invoke_rcu_kthread(void);
+
+#define RCU_KTHREAD_PRIO 1	/* RT priority for per-CPU kthreads. */
+
+/*
  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
  * permit this function to be invoked without holding the root rcu_node
  * structure's ->lock, but of course results can be subject to change.
@@ -1009,6 +1025,8 @@ static void rcu_send_cbs_to_online(struct rcu_state *rsp)
 /*
  * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
  * and move all callbacks from the outgoing CPU to the current one.
+ * There can only be one CPU hotplug operation at a time, so no other
+ * CPU can be attempting to update rcu_cpu_kthread_task.
  */
 static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
 {
@@ -1017,6 +1035,14 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
 	int need_report = 0;
 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
 	struct rcu_node *rnp;
+	struct task_struct *t;
+
+	/* Stop the CPU's kthread. */
+	t = per_cpu(rcu_cpu_kthread_task, cpu);
+	if (t != NULL) {
+		per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
+		kthread_stop(t);
+	}
 
 	/* Exclude any attempts to start a new grace period. */
 	raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1054,6 +1080,19 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 	if (need_report & RCU_OFL_TASKS_EXP_GP)
 		rcu_report_exp_rnp(rsp, rnp);
+
+	/*
+	 * If there are no more online CPUs for this rcu_node structure,
+	 * kill the rcu_node structure's kthread.  Otherwise, adjust its
+	 * affinity.
+	 */
+	t = rnp->node_kthread_task;
+	if (t != NULL &&
+	    rnp->qsmaskinit == 0) {
+		kthread_stop(t);
+		rnp->node_kthread_task = NULL;
+	} else
+		rcu_node_kthread_setaffinity(rnp);
 }
 
 /*
@@ -1151,7 +1190,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 
 	/* Re-raise the RCU softirq if there are callbacks remaining. */
 	if (cpu_has_callbacks_ready_to_invoke(rdp))
-		raise_softirq(RCU_SOFTIRQ);
+		invoke_rcu_kthread();
 }
 
 /*
@@ -1197,7 +1236,7 @@ void rcu_check_callbacks(int cpu, int user)
 	}
 	rcu_preempt_check_callbacks(cpu);
 	if (rcu_pending(cpu))
-		raise_softirq(RCU_SOFTIRQ);
+		invoke_rcu_kthread();
 }
 
 #ifdef CONFIG_SMP
@@ -1361,7 +1400,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 /*
  * Do softirq processing for the current CPU.
  */
-static void rcu_process_callbacks(struct softirq_action *unused)
+static void rcu_process_callbacks(void)
 {
 	__rcu_process_callbacks(&rcu_sched_state,
 				&__get_cpu_var(rcu_sched_data));
@@ -1372,6 +1411,281 @@ static void rcu_process_callbacks(struct softirq_action *unused)
 	rcu_needs_cpu_flush();
 }
 
+/*
+ * Wake up the current CPU's kthread.  This replaces raise_softirq()
+ * in earlier versions of RCU.  Note that because we are running on
+ * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
+ * cannot disappear out from under us.
+ */
+static void invoke_rcu_kthread(void)
+{
+	unsigned long flags;
+	wait_queue_head_t *q;
+	int cpu;
+
+	local_irq_save(flags);
+	cpu = smp_processor_id();
+	per_cpu(rcu_cpu_has_work, cpu) = 1;
+	if (per_cpu(rcu_cpu_kthread_task, cpu) == NULL) {
+		local_irq_restore(flags);
+		return;
+	}
+	q = &per_cpu(rcu_cpu_wq, cpu);
+	wake_up(q);
+	local_irq_restore(flags);
+}
+
+/*
+ * Timer handler to initiate the waking up of per-CPU kthreads that
+ * have yielded the CPU due to excess numbers of RCU callbacks.
+ */
+static void rcu_cpu_kthread_timer(unsigned long arg)
+{
+	unsigned long flags;
+	struct rcu_data *rdp = (struct rcu_data *)arg;
+	struct rcu_node *rnp = rdp->mynode;
+	struct task_struct *t;
+
+	raw_spin_lock_irqsave(&rnp->lock, flags);
+	rnp->wakemask |= rdp->grpmask;
+	t = rnp->node_kthread_task;
+	if (t == NULL) {
+		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		return;
+	}
+	wake_up_process(t);
+	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
+/*
+ * Drop to non-real-time priority and yield, but only after posting a
+ * timer that will cause us to regain our real-time priority if we
+ * remain preempted.  Either way, we restore our real-time priority
+ * before returning.
+ */
+static void rcu_yield(int cpu)
+{
+	struct rcu_data *rdp = per_cpu_ptr(rcu_sched_state.rda, cpu);
+	struct sched_param sp;
+	struct timer_list yield_timer;
+
+	setup_timer(&yield_timer, rcu_cpu_kthread_timer, (unsigned long)rdp);
+	mod_timer(&yield_timer, jiffies + 2);
+	sp.sched_priority = 0;
+	sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
+	schedule();
+	sp.sched_priority = RCU_KTHREAD_PRIO;
+	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+	del_timer(&yield_timer);
+}
+
+/*
+ * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
+ * This can happen while the corresponding CPU is either coming online
+ * or going offline.  We cannot wait until the CPU is fully online
+ * before starting the kthread, because the various notifier functions
+ * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
+ * the corresponding CPU is online.
+ *
+ * Return 1 if the kthread needs to stop, 0 otherwise.
+ *
+ * Caller must disable bh.  This function can momentarily enable it.
+ */
+static int rcu_cpu_kthread_should_stop(int cpu)
+{
+	while (cpu_is_offline(cpu) ||
+	       !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
+	       smp_processor_id() != cpu) {
+		if (kthread_should_stop())
+			return 1;
+		local_bh_enable();
+		schedule_timeout_uninterruptible(1);
+		if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
+			set_cpus_allowed_ptr(current, cpumask_of(cpu));
+		local_bh_disable();
+	}
+	return 0;
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
+ * earlier RCU softirq.
+ */
+static int rcu_cpu_kthread(void *arg)
+{
+	int cpu = (int)(long)arg;
+	unsigned long flags;
+	int spincnt = 0;
+	wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
+	char work;
+	char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+
+	for (;;) {
+		wait_event_interruptible(*wqp,
+					 *workp != 0 || kthread_should_stop());
+		local_bh_disable();
+		if (rcu_cpu_kthread_should_stop(cpu)) {
+			local_bh_enable();
+			break;
+		}
+		local_irq_save(flags);
+		work = *workp;
+		*workp = 0;
+		local_irq_restore(flags);
+		if (work)
+			rcu_process_callbacks();
+		local_bh_enable();
+		if (*workp != 0)
+			spincnt++;
+		else
+			spincnt = 0;
+		if (spincnt > 10) {
+			rcu_yield(cpu);
+			spincnt = 0;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Spawn a per-CPU kthread, setting up affinity and priority.
+ * Because the CPU hotplug lock is held, no other CPU will be attempting
+ * to manipulate rcu_cpu_kthread_task.  There might be another CPU
+ * attempting to access it during boot, but the locking in kthread_bind()
+ * will enforce sufficient ordering.
+ */
+static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+{
+	struct sched_param sp;
+	struct task_struct *t;
+
+	if (!rcu_kthreads_spawnable ||
+	    per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
+		return 0;
+	t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+	if (IS_ERR(t))
+		return PTR_ERR(t);
+	kthread_bind(t, cpu);
+	WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
+	per_cpu(rcu_cpu_kthread_task, cpu) = t;
+	wake_up_process(t);
+	sp.sched_priority = RCU_KTHREAD_PRIO;
+	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+	return 0;
+}
+
+/*
+ * Per-rcu_node kthread, which is in charge of waking up the per-CPU
+ * kthreads when needed.  We ignore requests to wake up kthreads
+ * for offline CPUs, which is OK because force_quiescent_state()
+ * takes care of this case.
+ */
+static int rcu_node_kthread(void *arg)
+{
+	int cpu;
+	unsigned long flags;
+	unsigned long mask;
+	struct rcu_node *rnp = (struct rcu_node *)arg;
+	struct sched_param sp;
+	struct task_struct *t;
+
+	for (;;) {
+		wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 ||
+						       kthread_should_stop());
+		if (kthread_should_stop())
+			break;
+		raw_spin_lock_irqsave(&rnp->lock, flags);
+		mask = rnp->wakemask;
+		rnp->wakemask = 0;
+		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
+			if ((mask & 0x1) == 0)
+				continue;
+			preempt_disable();
+			t = per_cpu(rcu_cpu_kthread_task, cpu);
+			if (!cpu_online(cpu) || t == NULL) {
+				preempt_enable();
+				continue;
+			}
+			per_cpu(rcu_cpu_has_work, cpu) = 1;
+			sp.sched_priority = RCU_KTHREAD_PRIO;
+			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+			preempt_enable();
+		}
+	}
+	return 0;
+}
+
+/*
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question.
+ */
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp)
+{
+	cpumask_var_t cm;
+	int cpu;
+	unsigned long mask = rnp->qsmaskinit;
+
+	if (rnp->node_kthread_task == NULL ||
+	    rnp->qsmaskinit == 0)
+		return;
+	if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+		return;
+	cpumask_clear(cm);
+	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
+		if (mask & 0x1)
+			cpumask_set_cpu(cpu, cm);
+	set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
+	free_cpumask_var(cm);
+}
+
+/*
+ * Spawn a per-rcu_node kthread, setting priority and affinity.
+ */
+static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
+						struct rcu_node *rnp)
+{
+	int rnp_index = rnp - &rsp->node[0];
+	struct sched_param sp;
+	struct task_struct *t;
+
+	if (!rcu_kthreads_spawnable ||
+	    rnp->qsmaskinit == 0 ||
+	    rnp->node_kthread_task != NULL)
+		return 0;
+	t = kthread_create(rcu_node_kthread, (void *)rnp, "rcun%d", rnp_index);
+	if (IS_ERR(t))
+		return PTR_ERR(t);
+	rnp->node_kthread_task = t;
+	wake_up_process(t);
+	sp.sched_priority = 99;
+	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+	return 0;
+}
+
+/*
+ * Spawn all kthreads -- called as soon as the scheduler is running.
+ */
+static int __init rcu_spawn_kthreads(void)
+{
+	int cpu;
+	struct rcu_node *rnp;
+
+	rcu_kthreads_spawnable = 1;
+	for_each_possible_cpu(cpu) {
+		init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu));
+		per_cpu(rcu_cpu_has_work, cpu) = 0;
+		if (cpu_online(cpu))
+			(void)rcu_spawn_one_cpu_kthread(cpu);
+	}
+	rcu_for_each_leaf_node(&rcu_sched_state, rnp) {
+		init_waitqueue_head(&rnp->node_wq);
+		(void)rcu_spawn_one_node_kthread(&rcu_sched_state, rnp);
+	}
+	return 0;
+}
+early_initcall(rcu_spawn_kthreads);
+
 static void
 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
 	   struct rcu_state *rsp)
@@ -1771,6 +2085,19 @@ static void __cpuinit rcu_online_cpu(int cpu)
 	rcu_preempt_init_percpu_data(cpu);
 }
 
+static void __cpuinit rcu_online_kthreads(int cpu)
+{
+	struct rcu_data *rdp = per_cpu_ptr(rcu_sched_state.rda, cpu);
+	struct rcu_node *rnp = rdp->mynode;
+
+	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
+	if (rcu_kthreads_spawnable) {
+		(void)rcu_spawn_one_cpu_kthread(cpu);
+		if (rnp->node_kthread_task == NULL)
+			(void)rcu_spawn_one_node_kthread(&rcu_sched_state, rnp);
+	}
+}
+
 /*
  * Handle CPU online/offline notification events.
  */
@@ -1778,11 +2105,17 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
 				    unsigned long action, void *hcpu)
 {
 	long cpu = (long)hcpu;
+	struct rcu_data *rdp = per_cpu_ptr(rcu_sched_state.rda, cpu);
+	struct rcu_node *rnp = rdp->mynode;
 
 	switch (action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
 		rcu_online_cpu(cpu);
+		rcu_online_kthreads(cpu);
+		break;
+	case CPU_ONLINE:
+		rcu_node_kthread_setaffinity(rnp);
 		break;
 	case CPU_DYING:
 	case CPU_DYING_FROZEN:
@@ -1923,7 +2256,6 @@ void __init rcu_init(void)
 	rcu_init_one(&rcu_sched_state, &rcu_sched_data);
 	rcu_init_one(&rcu_bh_state, &rcu_bh_data);
 	__rcu_init_preempt();
-	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
 	/*
 	 * We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 5a439c1..c021380 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -111,6 +111,7 @@ struct rcu_node {
 				/*  elements that need to drain to allow the */
 				/*  current expedited grace period to */
 				/*  complete (only for TREE_PREEMPT_RCU). */
+	unsigned long wakemask; /* CPUs whose kthread needs to be awakened. */
 	unsigned long qsmaskinit;
 				/* Per-GP initial value for qsmask & expmask. */
 	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
@@ -134,6 +135,13 @@ struct rcu_node {
 				/*  if there is no such task.  If there */
 				/*  is no current expedited grace period, */
 				/*  then there can cannot be any such task. */
+	struct task_struct *node_kthread_task;
+				/* kthread that takes care of this rcu_node */
+				/*  structure, for example, awakening the */
+				/*  per-CPU kthreads as needed. */
+	wait_queue_head_t node_wq;
+				/* Wait queue on which to park the per-node */
+				/*  kthread. */
 } ____cacheline_internodealigned_in_smp;
 
 /*
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 774f010..b9bd69a 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1206,7 +1206,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
  *
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
- * raise_softirq() to cause rcu_process_callbacks() to be invoked later.
+ * invoke_rcu_kthread() to cause rcu_process_callbacks() to be invoked later.
  * The per-cpu rcu_dyntick_drain variable controls the sequencing.
  */
 int rcu_needs_cpu(int cpu)
@@ -1257,7 +1257,7 @@ int rcu_needs_cpu(int cpu)
 
 	/* If RCU callbacks are still pending, RCU still needs this CPU. */
 	if (c)
-		raise_softirq(RCU_SOFTIRQ);
+		invoke_rcu_kthread();
 	return c;
 }
 
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 174f976..1396017 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 char *softirq_to_name[NR_SOFTIRQS] = {
 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
-	"TASKLET", "SCHED", "HRTIMER",	"RCU"
+	"TASKLET", "SCHED", "HRTIMER"
 };
 
 /*
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 0a7ed5b..1e88485 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -2187,7 +2187,6 @@ static const struct flag flags[] = {
 	{ "TASKLET_SOFTIRQ", 6 },
 	{ "SCHED_SOFTIRQ", 7 },
 	{ "HRTIMER_SOFTIRQ", 8 },
-	{ "RCU_SOFTIRQ", 9 },
 
 	{ "HRTIMER_NORESTART", 0 },
 	{ "HRTIMER_RESTART", 1 },
-- 
1.7.3.2


  parent reply	other threads:[~2011-05-01 13:39 UTC|newest]

Thread overview: 126+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-05-01 13:21 [PATCH tip/core/rcu 0/6] Preview of RCU patches for 2.6.40 Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 01/86] rcu: Remove conditional compilation for RCU CPU stall warnings Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 02/86] rcu: Decrease memory-barrier usage based on semi-formal proof Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 03/86] rcu: merge TREE_PREEPT_RCU blocked_tasks[] lists Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 04/86] rcu: Update documentation to reflect blocked_tasks[] merge Paul E. McKenney
2011-05-01 13:20 ` Paul E. McKenney [this message]
2011-05-05  9:31   ` [PATCH tip/core/rcu 05/86] rcu: move TREE_RCU from softirq to kthread Yong Zhang
2011-05-06  5:46     ` Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 06/86] rcu: priority boosting for TREE_PREEMPT_RCU Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 07/86] rcu: Force per-rcu_node kthreads off of the outgoing CPU Paul E. McKenney
2011-05-01 15:10   ` Josh Triplett
2011-05-02 10:25     ` Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 08/86] rcu: put per-CPU kthread at non-RT priority during CPU hotplug operations Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 09/86] rcu: avoid hammering sched with yet another bound RT kthread Paul E. McKenney
2011-05-01 15:48   ` Josh Triplett
2011-05-02  8:23     ` Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 10/86] rcu: eliminate unused boosting statistics Paul E. McKenney
2011-05-01 15:53   ` Josh Triplett
2011-05-02  8:25     ` Paul E. McKenney
2011-05-02 16:15       ` Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 11/86] rcu: Add boosting to TREE_PREEMPT_RCU tracing Paul E. McKenney
2011-05-01 15:52   ` Josh Triplett
2011-05-02  8:27     ` Paul E. McKenney
2011-05-02 17:53       ` Josh Triplett
2011-05-02 22:19         ` Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 12/86] rcu: Update RCU's trace.txt documentation for new format Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 13/86] rcu: add callback-queue information to rcudata output Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 14/86] rcu: document new callback-queue trace information Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 15/86] rcu: add tracing for RCU's kthread run states Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 16/86] rcu: make rcutorture version numbers available through debugfs Paul E. McKenney
2011-05-01 15:29   ` Josh Triplett
2011-05-02  8:30     ` Paul E. McKenney
2011-05-02 17:39       ` Josh Triplett
2011-05-01 13:20 ` [PATCH tip/core/rcu 17/86] rcu: fix boost-tracing bug and update tracing documentation Paul E. McKenney
2011-05-01 15:43   ` Josh Triplett
2011-05-02  8:33     ` Paul E. McKenney
2011-05-02 16:18       ` Paul E. McKenney
2011-05-02 16:19         ` Paul E. McKenney
2011-05-02 17:44       ` Josh Triplett
2011-05-02 22:19         ` Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 18/86] rcu: add grace-period age to tracing Paul E. McKenney
2011-05-01 15:25   ` Josh Triplett
2011-05-02  8:34     ` Paul E. McKenney
2011-05-02 10:52       ` Paul E. McKenney
2011-05-02 16:21         ` Paul E. McKenney
2011-05-01 13:20 ` [PATCH tip/core/rcu 19/86] rcu: Add forward-progress diagnostic for per-CPU kthreads Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 20/86] rcu: Enable DEBUG_OBJECTS_RCU_HEAD from !PREEMPT Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 21/86] rcu: add DEBUG_OBJECTS_RCU_HEAD check for alignment Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 22/86] rcu: mark rcutorture boosting callback as being on-stack Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 23/86] rcu: Use WARN_ON_ONCE for DEBUG_OBJECTS_RCU_HEAD warnings Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 24/86] rcu: Switch to this_cpu() primitives Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 25/86] rcu: code cleanups in TINY_RCU priority boosting Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 26/86] rcu: remove useless ->boosted_this_gp field Paul E. McKenney
2011-05-01 16:05   ` Josh Triplett
2011-05-02  8:34     ` Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 27/86] rcu: Converge TINY_RCU expedited and normal boosting Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 28/86] rcu: call __rcu_read_unlock() in exit_rcu for tree RCU Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 29/86] rcu: fix spelling Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 30/86] rcu: introduce kfree_rcu() Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 31/86] rcu: further lower priority in rcu_yield() Paul E. McKenney
2011-05-01 17:51   ` Mike Galbraith
2011-05-02  8:11     ` Paul E. McKenney
2011-05-02  9:33       ` Mike Galbraith
2011-05-01 13:21 ` [PATCH tip/core/rcu 32/86] rcu: prevent call_rcu() from diving into rcu core if irqs disabled Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 33/86] rcu: optimize rcutiny Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 34/86] cgroup,rcu: convert call_rcu(free_css_set_rcu) to kfree_rcu() Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 35/86] cgroup,rcu: convert call_rcu(free_cgroup_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 36/86] cgroup,rcu: convert call_rcu(__free_css_id_cb) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 37/86] net,rcu: convert call_rcu(tcf_common_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 38/86] net,rcu: convert call_rcu(tcf_police_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 39/86] net,rcu: convert call_rcu(in6_dev_finish_destroy_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 40/86] net,rcu: convert call_rcu(inet6_ifa_finish_destroy_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 41/86] net,rcu: convert call_rcu(listeners_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 42/86] net,rcu: convert call_rcu(sctp_local_addr_free) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 43/86] net,rcu: convert call_rcu(ha_rcu_free) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 44/86] net,rcu: convert call_rcu(dn_dev_free_ifa_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 45/86] net,act_police,rcu: remove rcu_barrier() Paul E. McKenney
2011-05-01 15:59   ` Josh Triplett
2011-05-02  8:36     ` Paul E. McKenney
2011-05-02 17:50       ` Josh Triplett
2011-05-02 22:21         ` Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 46/86] security,rcu: convert call_rcu(user_update_rcu_disposal) to kfree_rcu() Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 47/86] net,rcu: convert call_rcu(fc_rport_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 48/86] net,rcu: convert call_rcu(__leaf_info_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 49/86] net,rcu: convert call_rcu(__gen_kill_estimator) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 50/86] net,rcu: convert call_rcu(ip_mc_list_reclaim) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 51/86] net,rcu: convert call_rcu(ip_sf_socklist_reclaim) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 52/86] net,rcu: convert call_rcu(ip_mc_socklist_reclaim) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 53/86] net,rcu: convert call_rcu(free_dm_hw_stat) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 54/86] ixgbe,rcu: convert call_rcu(ring_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 55/86] macvlan,rcu: convert call_rcu(macvlan_port_rcu_free) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 56/86] net,rcu: convert call_rcu(ipv6_mc_socklist_reclaim) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 57/86] net,rcu: convert call_rcu(rps_map_release) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 58/86] net,rcu: convert call_rcu(xps_map_release) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 59/86] net,rcu: convert call_rcu(xps_dev_maps_release) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 60/86] security,rcu: convert call_rcu(sel_netif_free) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 61/86] net,rcu: convert call_rcu(netlbl_unlhsh_free_addr4) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 62/86] net,rcu: convert call_rcu(netlbl_unlhsh_free_addr6) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 63/86] net,rcu: convert call_rcu(net_generic_release) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 64/86] net,rcu: convert call_rcu(__nf_ct_ext_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 65/86] perf,rcu: convert call_rcu(free_ctx) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 66/86] perf,rcu: convert call_rcu(swevent_hlist_release_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 67/86] net,rcu: convert call_rcu(phonet_device_rcu_free) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 68/86] net,rcu: convert call_rcu(wq_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 69/86] net/mac80211,rcu: convert call_rcu(work_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 70/86] net,rcu: convert call_rcu(xt_osf_finger_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 71/86] net,rcu: convert call_rcu(kfree_tid_tx) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 72/86] audit_tree,rcu: convert call_rcu(__put_tree) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 73/86] block,rcu: convert call_rcu(cfq_cfqd_free) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 74/86] nfs,rcu: convert call_rcu(nfs_free_delegation_callback) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 75/86] security,rcu: convert call_rcu(whitelist_item_free) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 76/86] scsi,rcu: convert call_rcu(fc_rport_free_rcu) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 77/86] block,rcu: convert call_rcu(disk_free_ptbl_rcu_cb) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 78/86] ia64,rcu: convert call_rcu(sn_irq_info_free) " Paul E. McKenney
2011-05-01 13:21 ` [PATCH tip/core/rcu 79/86] jbd2,rcu: convert call_rcu(free_devcache) " Paul E. McKenney
2011-05-01 13:22 ` [PATCH tip/core/rcu 80/86] md,rcu: convert call_rcu(free_conf) " Paul E. McKenney
2011-05-01 13:22 ` [PATCH tip/core/rcu 81/86] security,rcu: convert call_rcu(sel_netnode_free) " Paul E. McKenney
2011-05-01 13:22 ` [PATCH tip/core/rcu 82/86] security,rcu: convert call_rcu(sel_netport_free) " Paul E. McKenney
2011-05-01 13:22 ` [PATCH tip/core/rcu 83/86] ipc,rcu: convert call_rcu(free_un) " Paul E. McKenney
2011-05-01 13:22 ` [PATCH tip/core/rcu 84/86] ipc,rcu: convert call_rcu(ipc_immediate_free) " Paul E. McKenney
2011-05-01 13:22 ` [PATCH tip/core/rcu 85/86] vmalloc,rcu: convert call_rcu(rcu_free_va) " Paul E. McKenney
2011-05-01 13:22 ` [PATCH tip/core/rcu 86/86] vmalloc,rcu: convert call_rcu(rcu_free_vb) " Paul E. McKenney
2011-05-01 16:14 ` [PATCH tip/core/rcu 0/6] Preview of RCU patches for 2.6.40 Josh Triplett
2011-05-02  8:37   ` Paul E. McKenney
2011-05-01 23:49 ` Dave Chinner
2011-05-02  8:09   ` Paul E. McKenney

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1304256126-26015-5-git-send-email-paulmck@linux.vnet.ibm.com \
    --to=paulmck@linux.vnet.ibm.com \
    --cc=Valdis.Kletnieks@vt.edu \
    --cc=akpm@linux-foundation.org \
    --cc=darren@dvhart.com \
    --cc=dhowells@redhat.com \
    --cc=dipankar@in.ibm.com \
    --cc=eric.dumazet@gmail.com \
    --cc=josh@joshtriplett.org \
    --cc=laijs@cn.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@polymtl.ca \
    --cc=mingo@elte.hu \
    --cc=niv@us.ibm.com \
    --cc=patches@linaro.org \
    --cc=paul.mckenney@linaro.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).