All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH tip/core/rcu 0/3] signed overflow, virtualization optimizations, deadlock avoidance
@ 2011-05-08 13:33 Paul E. McKenney
  2011-05-08 13:34 ` [PATCH tip/core/rcu 1/3] rcu: get rid of signed overflow in check_cpu_stall() Paul E. McKenney
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Paul E. McKenney @ 2011-05-08 13:33 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, laijs, dipankar, akpm, mathieu.desnoyers, josh, niv, tglx,
	peterz, rostedt, Valdis.Kletnieks, dhowells, eric.dumazet,
	darren, patches

Hello!

This patchset adds a few more commits to the list:

1.	Rework check_cpu_stall to avoid signed overflow.
2.	Provide an RCU API for better detection of running in a
	guest OS (thanks to Gleb Natapov).
3.	Permit rcu_read_unlock() to be called while holding one
	of the runqueue locks in response to a lockdep splat,
	quite possibly from Valdis Kletnieks.

For a testing-only version of this patchset from git, please see the
following subject-to-rebase branch:

git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git rcu/testing

							Thanx, Paul

 b/include/linux/rcutiny.h |    8 +++++
 b/include/linux/rcutree.h |   10 +++++++
 b/kernel/rcutree.c        |   13 +++++----
 b/kernel/rcutree.h        |    5 ---
 b/kernel/rcutree_plugin.h |   64 ++++++++++++++--------------------------------
 kernel/rcutree.c          |   45 ++++++++++----------------------
 6 files changed, 61 insertions(+), 84 deletions(-)

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH tip/core/rcu 1/3] rcu: get rid of signed overflow in check_cpu_stall()
  2011-05-08 13:33 [PATCH tip/core/rcu 0/3] signed overflow, virtualization optimizations, deadlock avoidance Paul E. McKenney
@ 2011-05-08 13:34 ` Paul E. McKenney
  2011-05-08 13:34 ` [PATCH tip/core/rcu 2/3] rcu: provide rcu_virt_note_context_switch() function Paul E. McKenney
  2011-05-08 13:34 ` [PATCH tip/core/rcu 3/3] rcu: permit rcu_read_unlock() to be called while holding runqueue locks Paul E. McKenney
  2 siblings, 0 replies; 4+ messages in thread
From: Paul E. McKenney @ 2011-05-08 13:34 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, laijs, dipankar, akpm, mathieu.desnoyers, josh, niv, tglx,
	peterz, rostedt, Valdis.Kletnieks, dhowells, eric.dumazet,
	darren, patches, Paul E. McKenney

Signed integer overflow is undefined by the C standard, so move
calculations to unsigned.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcutree.c |   13 ++++++++-----
 1 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 78923a5..b2fe2a2 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -581,21 +581,24 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-	long delta;
+	unsigned long j;
+	unsigned long js;
 	struct rcu_node *rnp;
 
 	if (rcu_cpu_stall_suppress)
 		return;
-	delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
+	j = ACCESS_ONCE(jiffies);
+	js = ACCESS_ONCE(rsp->jiffies_stall);
 	rnp = rdp->mynode;
-	if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) {
+	if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
 
 		/* We haven't checked in, so go dump stack. */
 		print_cpu_stall(rsp);
 
-	} else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
+	} else if (rcu_gp_in_progress(rsp) &&
+		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
 
-		/* They had two time units to dump stack, so complain. */
+		/* They had a few time units to dump stack, so complain. */
 		print_other_cpu_stall(rsp);
 	}
 }
-- 
1.7.3.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH tip/core/rcu 2/3] rcu: provide rcu_virt_note_context_switch() function.
  2011-05-08 13:33 [PATCH tip/core/rcu 0/3] signed overflow, virtualization optimizations, deadlock avoidance Paul E. McKenney
  2011-05-08 13:34 ` [PATCH tip/core/rcu 1/3] rcu: get rid of signed overflow in check_cpu_stall() Paul E. McKenney
@ 2011-05-08 13:34 ` Paul E. McKenney
  2011-05-08 13:34 ` [PATCH tip/core/rcu 3/3] rcu: permit rcu_read_unlock() to be called while holding runqueue locks Paul E. McKenney
  2 siblings, 0 replies; 4+ messages in thread
From: Paul E. McKenney @ 2011-05-08 13:34 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, laijs, dipankar, akpm, mathieu.desnoyers, josh, niv, tglx,
	peterz, rostedt, Valdis.Kletnieks, dhowells, eric.dumazet,
	darren, patches, Gleb Natapov, Paul E. McKenney

From: Gleb Natapov <gleb@redhat.com>

Provide rcu_virt_note_context_switch() for vitalization use to note
quiescent state during guest entry.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 include/linux/rcutiny.h |    8 ++++++++
 include/linux/rcutree.h |   10 ++++++++++
 kernel/rcutree.c        |    1 +
 3 files changed, 19 insertions(+), 0 deletions(-)

diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 30ebd7c..52b3e02 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -100,6 +100,14 @@ static inline void rcu_note_context_switch(int cpu)
 }
 
 /*
+ * Take advantage of the fact that there is only one CPU, which
+ * allows us to ignore virtualization-based context switches.
+ */
+static inline void rcu_virt_note_context_switch(int cpu)
+{
+}
+
+/*
  * Return the number of grace periods.
  */
 static inline long rcu_batches_completed(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 284dad1..e65d066 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -35,6 +35,16 @@ extern void rcu_note_context_switch(int cpu);
 extern int rcu_needs_cpu(int cpu);
 extern void rcu_cpu_stall_reset(void);
 
+/*
+ * Note a virtualization-based context switch.  This is simply a
+ * wrapper around rcu_note_context_switch(), which allows TINY_RCU
+ * to save a few bytes.
+ */
+static inline void rcu_virt_note_context_switch(int cpu)
+{
+	rcu_note_context_switch(cpu);
+}
+
 #ifdef CONFIG_TREE_PREEMPT_RCU
 
 extern void exit_rcu(void);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index b2fe2a2..54ff7eb 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -157,6 +157,7 @@ void rcu_note_context_switch(int cpu)
 	rcu_sched_qs(cpu);
 	rcu_preempt_note_context_switch(cpu);
 }
+EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
 #ifdef CONFIG_NO_HZ
 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
-- 
1.7.3.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH tip/core/rcu 3/3] rcu: permit rcu_read_unlock() to be called while holding runqueue locks
  2011-05-08 13:33 [PATCH tip/core/rcu 0/3] signed overflow, virtualization optimizations, deadlock avoidance Paul E. McKenney
  2011-05-08 13:34 ` [PATCH tip/core/rcu 1/3] rcu: get rid of signed overflow in check_cpu_stall() Paul E. McKenney
  2011-05-08 13:34 ` [PATCH tip/core/rcu 2/3] rcu: provide rcu_virt_note_context_switch() function Paul E. McKenney
@ 2011-05-08 13:34 ` Paul E. McKenney
  2 siblings, 0 replies; 4+ messages in thread
From: Paul E. McKenney @ 2011-05-08 13:34 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, laijs, dipankar, akpm, mathieu.desnoyers, josh, niv, tglx,
	peterz, rostedt, Valdis.Kletnieks, dhowells, eric.dumazet,
	darren, patches, Paul E. McKenney, Paul E. McKenney

From: Paul E. McKenney <paul.mckenney@linaro.org>

Avoid calling into the scheduler while holding core RCU locks.  This
allows rcu_read_unlock() to be called while holding the runqueue locks,
but only as long as there was no chance of the RCU read-side critical
section having been preempted.  (Otherwise, if RCU priority boosting
is enabled, rcu_read_unlock() might call into the scheduler in order to
unboost itself, which might allows self-deadlock on the runqueue locks
within the scheduler.)

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcutree.c        |   44 +++++++++----------------------
 kernel/rcutree.h        |    5 +---
 kernel/rcutree_plugin.h |   64 ++++++++++++++--------------------------------
 3 files changed, 34 insertions(+), 79 deletions(-)

diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 54ff7eb..5616b17 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1133,22 +1133,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 	if (need_report & RCU_OFL_TASKS_EXP_GP)
 		rcu_report_exp_rnp(rsp, rnp);
-
-	/*
-	 * If there are no more online CPUs for this rcu_node structure,
-	 * kill the rcu_node structure's kthread.  Otherwise, adjust its
-	 * affinity.
-	 */
-	t = rnp->node_kthread_task;
-	if (t != NULL &&
-	    rnp->qsmaskinit == 0) {
-		raw_spin_lock_irqsave(&rnp->lock, flags);
-		rnp->node_kthread_task = NULL;
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
-		kthread_stop(t);
-		rcu_stop_boost_kthread(rnp);
-	} else
-		rcu_node_kthread_setaffinity(rnp, -1);
+	rcu_node_kthread_setaffinity(rnp, -1);
 }
 
 /*
@@ -1320,8 +1305,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
 			return;
 		}
 		if (rnp->qsmask == 0) {
-			rcu_initiate_boost(rnp);
-			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+			rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
 			continue;
 		}
 		cpu = rnp->grplo;
@@ -1340,10 +1324,10 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 	}
 	rnp = rcu_get_root(rsp);
-	raw_spin_lock_irqsave(&rnp->lock, flags);
-	if (rnp->qsmask == 0)
-		rcu_initiate_boost(rnp);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	if (rnp->qsmask == 0) {
+		raw_spin_lock_irqsave(&rnp->lock, flags);
+		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+	}
 }
 
 /*
@@ -1497,7 +1481,8 @@ static void invoke_rcu_cpu_kthread(void)
 
 /*
  * Wake up the specified per-rcu_node-structure kthread.
- * The caller must hold ->lock.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
  */
 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
 {
@@ -1546,8 +1531,8 @@ static void rcu_cpu_kthread_timer(unsigned long arg)
 
 	raw_spin_lock_irqsave(&rnp->lock, flags);
 	rnp->wakemask |= rdp->grpmask;
-	invoke_rcu_node_kthread(rnp);
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	invoke_rcu_node_kthread(rnp);
 }
 
 /*
@@ -1694,16 +1679,12 @@ static int rcu_node_kthread(void *arg)
 
 	for (;;) {
 		rnp->node_kthread_status = RCU_KTHREAD_WAITING;
-		wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 ||
-						       kthread_should_stop());
-		if (kthread_should_stop())
-			break;
+		wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0);
 		rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
 		raw_spin_lock_irqsave(&rnp->lock, flags);
 		mask = rnp->wakemask;
 		rnp->wakemask = 0;
-		rcu_initiate_boost(rnp);
-		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
 		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
 			if ((mask & 0x1) == 0)
 				continue;
@@ -1719,6 +1700,7 @@ static int rcu_node_kthread(void *arg)
 			preempt_enable();
 		}
 	}
+	/* NOTREACHED */
 	rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
 	return 0;
 }
@@ -1738,7 +1720,7 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
 	int cpu;
 	unsigned long mask = rnp->qsmaskinit;
 
-	if (rnp->node_kthread_task == NULL || mask == 0)
+	if (rnp->node_kthread_task == NULL)
 		return;
 	if (!alloc_cpumask_var(&cm, GFP_KERNEL))
 		return;
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index a6a9717..93d4a1c 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -444,15 +444,12 @@ static void rcu_preempt_send_cbs_to_online(void);
 static void __init __rcu_init_preempt(void);
 static void rcu_needs_cpu_flush(void);
 static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
-static void rcu_initiate_boost(struct rcu_node *rnp);
+static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
 					  cpumask_var_t cm);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 						 struct rcu_node *rnp,
 						 int rnp_index);
-#ifdef CONFIG_HOTPLUG_CPU
-static void rcu_stop_boost_kthread(struct rcu_node *rnp);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
 #endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index f629479..ed339702 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -711,15 +711,17 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
 static void
 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
 {
+	unsigned long flags;
 	int must_wait = 0;
 
-	raw_spin_lock(&rnp->lock); /* irqs already disabled */
-	if (!list_empty(&rnp->blkd_tasks)) {
+	raw_spin_lock_irqsave(&rnp->lock, flags);
+	if (list_empty(&rnp->blkd_tasks))
+		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	else {
 		rnp->exp_tasks = rnp->blkd_tasks.next;
-		rcu_initiate_boost(rnp);
+		rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
 		must_wait = 1;
 	}
-	raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
 	if (!must_wait)
 		rcu_report_exp_rnp(rsp, rnp);
 }
@@ -1179,12 +1181,7 @@ static int rcu_boost(struct rcu_node *rnp)
  */
 static void rcu_boost_kthread_timer(unsigned long arg)
 {
-	unsigned long flags;
-	struct rcu_node *rnp = (struct rcu_node *)arg;
-
-	raw_spin_lock_irqsave(&rnp->lock, flags);
-	invoke_rcu_node_kthread(rnp);
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	invoke_rcu_node_kthread((struct rcu_node *)arg);
 }
 
 /*
@@ -1200,10 +1197,7 @@ static int rcu_boost_kthread(void *arg)
 	for (;;) {
 		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
 		wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
-							rnp->exp_tasks ||
-							kthread_should_stop());
-		if (kthread_should_stop())
-			break;
+							rnp->exp_tasks);
 		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
 		more2boost = rcu_boost(rnp);
 		if (more2boost)
@@ -1215,7 +1209,7 @@ static int rcu_boost_kthread(void *arg)
 			spincnt = 0;
 		}
 	}
-	rnp->boost_kthread_status = RCU_KTHREAD_STOPPED;
+	/* NOTREACHED */
 	return 0;
 }
 
@@ -1225,14 +1219,17 @@ static int rcu_boost_kthread(void *arg)
  * kthread to start boosting them.  If there is an expedited grace
  * period in progress, it is always time to boost.
  *
- * The caller must hold rnp->lock.
+ * The caller must hold rnp->lock, which this function releases,
+ * but irqs remain disabled.  The ->boost_kthread_task is immortal,
+ * so we don't need to worry about it going away.
  */
-static void rcu_initiate_boost(struct rcu_node *rnp)
+static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 {
 	struct task_struct *t;
 
 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
 		rnp->n_balk_exp_gp_tasks++;
+		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 		return;
 	}
 	if (rnp->exp_tasks != NULL ||
@@ -1242,11 +1239,14 @@ static void rcu_initiate_boost(struct rcu_node *rnp)
 	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
 		if (rnp->exp_tasks == NULL)
 			rnp->boost_tasks = rnp->gp_tasks;
+		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 		t = rnp->boost_kthread_task;
 		if (t != NULL)
 			wake_up_process(t);
-	} else
+	} else {
 		rcu_initiate_boost_trace(rnp);
+		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	}
 }
 
 /*
@@ -1312,27 +1312,11 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 	return 0;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void rcu_stop_boost_kthread(struct rcu_node *rnp)
-{
-	unsigned long flags;
-	struct task_struct *t;
-
-	raw_spin_lock_irqsave(&rnp->lock, flags);
-	t = rnp->boost_kthread_task;
-	rnp->boost_kthread_task = NULL;
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
-	if (t != NULL)
-		kthread_stop(t);
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 #else /* #ifdef CONFIG_RCU_BOOST */
 
-static void rcu_initiate_boost(struct rcu_node *rnp)
+static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 {
+	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
@@ -1355,14 +1339,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 	return 0;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void rcu_stop_boost_kthread(struct rcu_node *rnp)
-{
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 #endif /* #else #ifdef CONFIG_RCU_BOOST */
 
 #ifndef CONFIG_SMP
-- 
1.7.3.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2011-05-08 13:34 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-05-08 13:33 [PATCH tip/core/rcu 0/3] signed overflow, virtualization optimizations, deadlock avoidance Paul E. McKenney
2011-05-08 13:34 ` [PATCH tip/core/rcu 1/3] rcu: get rid of signed overflow in check_cpu_stall() Paul E. McKenney
2011-05-08 13:34 ` [PATCH tip/core/rcu 2/3] rcu: provide rcu_virt_note_context_switch() function Paul E. McKenney
2011-05-08 13:34 ` [PATCH tip/core/rcu 3/3] rcu: permit rcu_read_unlock() to be called while holding runqueue locks Paul E. McKenney

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.