All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH tip/core/rcu 0/3] rcu: fixes for lockdep RCU and accelerated dyntick GPs
@ 2010-02-27  0:38 Paul E. McKenney
  2010-02-27  0:38 ` [PATCH tip/core/rcu 1/3] rcu: fixes for accelerated grace periods for last non-dynticked CPU Paul E. McKenney
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Paul E. McKenney @ 2010-02-27  0:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, laijs, dipankar, akpm, mathieu.desnoyers, josh, dvhltc,
	niv, tglx, peterz, rostedt, Valdis.Kletnieks, dhowells

Hello!

This patchset includes three fixes for problems in tip/core/rcu:

1.	Convert grace-period acceleration for the last non-dynticked
	RCU to a trivial state machine in order to avoid illegally
	invoking __rcu_process_callbacks() with irqs disabled.

2.	Make the non-PROVE_RCU variant of rcu_read_lock_sched_held()
	understand that preemption is disabled during boot before
	the scheduler starts.  (The earlier patch did the PROVE_RCU
	variant, but missed the non-PROVE_RCU variant.)

3.	Add some irq-disabling to the grace-period acceleration
	in #1 above and also enforce a hold-off period so that the
	last non-dynticked CPU doesn't softirq itself to death when
	there are multiple RCU callbacks in flight.

							Thanx, Paul

 b/include/linux/rcupdate.h |    3 -
 b/kernel/rcutree.c         |    3 +
 b/kernel/rcutree.h         |    1 
 b/kernel/rcutree_plugin.h  |   74 ++++++++++++++++++++++++++++++++-------------
 kernel/rcutree_plugin.h    |   11 +++++-
 5 files changed, 67 insertions(+), 25 deletions(-)

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH tip/core/rcu 1/3] rcu: fixes for accelerated grace periods for last non-dynticked CPU
  2010-02-27  0:38 [PATCH tip/core/rcu 0/3] rcu: fixes for lockdep RCU and accelerated dyntick GPs Paul E. McKenney
@ 2010-02-27  0:38 ` Paul E. McKenney
  2010-02-27 12:56   ` [tip:core/rcu] rcu: Fix " tip-bot for Paul E. McKenney
  2010-02-27  0:38 ` [PATCH tip/core/rcu 2/3] rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot Paul E. McKenney
  2010-02-27  0:38 ` [PATCH tip/core/rcu 3/3] rcu: more fixes for accelerated GPs for last non-dynticked CPU Paul E. McKenney
  2 siblings, 1 reply; 7+ messages in thread
From: Paul E. McKenney @ 2010-02-27  0:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, laijs, dipankar, akpm, mathieu.desnoyers, josh, dvhltc,
	niv, tglx, peterz, rostedt, Valdis.Kletnieks, dhowells,
	Paul E. McKenney

It is illegal to invoke __rcu_process_callbacks() with irqs disabled,
so do it indirectly via raise_softirq().  This requires a state-machine
implementation to cycle through the grace-period machinery the required
number of times.

Located-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcutree.c        |    3 ++
 kernel/rcutree.h        |    1 +
 kernel/rcutree_plugin.h |   73 ++++++++++++++++++++++++++++++++++-------------
 3 files changed, 57 insertions(+), 20 deletions(-)

diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 335bfe4..3ec8160 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1341,6 +1341,9 @@ static void rcu_process_callbacks(struct softirq_action *unused)
 	 * grace-period manipulations above.
 	 */
 	smp_mb(); /* See above block comment. */
+
+	/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
+	rcu_needs_cpu_flush();
 }
 
 static void
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 2ceb083..1439eb5 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -373,5 +373,6 @@ static int rcu_preempt_needs_cpu(int cpu);
 static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
 static void rcu_preempt_send_cbs_to_orphanage(void);
 static void __init __rcu_init_preempt(void);
+static void rcu_needs_cpu_flush(void);
 
 #endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 3516de7..ed241fc 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -973,9 +973,19 @@ int rcu_needs_cpu(int cpu)
 	return rcu_needs_cpu_quick_check(cpu);
 }
 
+/*
+ * Check to see if we need to continue a callback-flush operations to
+ * allow the last CPU to enter dyntick-idle mode.  But fast dyntick-idle
+ * entry is not configured, so we never do need to.
+ */
+static void rcu_needs_cpu_flush(void)
+{
+}
+
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
 #define RCU_NEEDS_CPU_FLUSHES 5
+static DEFINE_PER_CPU(int, rcu_dyntick_drain);
 
 /*
  * Check to see if any future RCU-related work will need to be done
@@ -988,39 +998,62 @@ int rcu_needs_cpu(int cpu)
  * only if all other CPUs are already in dynticks-idle mode.  This will
  * allow the CPU cores to be powered down immediately, as opposed to after
  * waiting many milliseconds for grace periods to elapse.
+ *
+ * Because it is not legal to invoke rcu_process_callbacks() with irqs
+ * disabled, we do one pass of force_quiescent_state(), then do a
+ * raise_softirq() to cause rcu_process_callbacks() to be invoked later.
+ * The per-cpu rcu_dyntick_drain variable controls the sequencing.
  */
 int rcu_needs_cpu(int cpu)
 {
-	int c = 1;
-	int i;
+	int c = 0;
 	int thatcpu;
 
 	/* Don't bother unless we are the last non-dyntick-idle CPU. */
 	for_each_cpu_not(thatcpu, nohz_cpu_mask)
-		if (thatcpu != cpu)
+		if (thatcpu != cpu) {
+			per_cpu(rcu_dyntick_drain, cpu) = 0;
 			return rcu_needs_cpu_quick_check(cpu);
-
-	/* Try to push remaining RCU-sched and RCU-bh callbacks through. */
-	for (i = 0; i < RCU_NEEDS_CPU_FLUSHES && c; i++) {
-		c = 0;
-		if (per_cpu(rcu_sched_data, cpu).nxtlist) {
-			rcu_sched_qs(cpu);
-			force_quiescent_state(&rcu_sched_state, 0);
-			__rcu_process_callbacks(&rcu_sched_state,
-						&per_cpu(rcu_sched_data, cpu));
-			c = !!per_cpu(rcu_sched_data, cpu).nxtlist;
-		}
-		if (per_cpu(rcu_bh_data, cpu).nxtlist) {
-			rcu_bh_qs(cpu);
-			force_quiescent_state(&rcu_bh_state, 0);
-			__rcu_process_callbacks(&rcu_bh_state,
-						&per_cpu(rcu_bh_data, cpu));
-			c = !!per_cpu(rcu_bh_data, cpu).nxtlist;
 		}
+
+	/* Check and update the rcu_dyntick_drain sequencing. */
+	if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+		/* First time through, initialize the counter. */
+		per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
+	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+		/* We have hit the limit, so time to give up. */
+		return rcu_needs_cpu_quick_check(cpu);
+	}
+
+	/* Do one step pushing remaining RCU callbacks through. */
+	if (per_cpu(rcu_sched_data, cpu).nxtlist) {
+		rcu_sched_qs(cpu);
+		force_quiescent_state(&rcu_sched_state, 0);
+		c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
+	}
+	if (per_cpu(rcu_bh_data, cpu).nxtlist) {
+		rcu_bh_qs(cpu);
+		force_quiescent_state(&rcu_bh_state, 0);
+		c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
 	}
 
 	/* If RCU callbacks are still pending, RCU still needs this CPU. */
+	if (c)
+		raise_softirq(RCU_SOFTIRQ);
 	return c;
 }
 
+/*
+ * Check to see if we need to continue a callback-flush operations to
+ * allow the last CPU to enter dyntick-idle mode.
+ */
+static void rcu_needs_cpu_flush(void)
+{
+	int cpu = smp_processor_id();
+
+	if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
+		return;
+	(void)rcu_needs_cpu(cpu);
+}
+
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
-- 
1.6.6


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH tip/core/rcu 2/3] rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot
  2010-02-27  0:38 [PATCH tip/core/rcu 0/3] rcu: fixes for lockdep RCU and accelerated dyntick GPs Paul E. McKenney
  2010-02-27  0:38 ` [PATCH tip/core/rcu 1/3] rcu: fixes for accelerated grace periods for last non-dynticked CPU Paul E. McKenney
@ 2010-02-27  0:38 ` Paul E. McKenney
  2010-02-27 12:56   ` [tip:core/rcu] " tip-bot for Paul E. McKenney
  2010-02-27  0:38 ` [PATCH tip/core/rcu 3/3] rcu: more fixes for accelerated GPs for last non-dynticked CPU Paul E. McKenney
  2 siblings, 1 reply; 7+ messages in thread
From: Paul E. McKenney @ 2010-02-27  0:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, laijs, dipankar, akpm, mathieu.desnoyers, josh, dvhltc,
	niv, tglx, peterz, rostedt, Valdis.Kletnieks, dhowells,
	Paul E. McKenney

Before the scheduler starts, all tasks are non-preemptible by
definition. So, during that time, rcu_read_lock_sched_held()
needs to always return "true".  This patch makes that be so
for RCU_PROVE_LOCKING=n.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 include/linux/rcupdate.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index fcea332..c843736 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -166,7 +166,7 @@ static inline int rcu_read_lock_bh_held(void)
 
 static inline int rcu_read_lock_sched_held(void)
 {
-	return preempt_count() != 0;
+	return preempt_count() != 0 || !rcu_scheduler_active;
 }
 
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-- 
1.6.6


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH tip/core/rcu 3/3] rcu: more fixes for accelerated GPs for last non-dynticked CPU
  2010-02-27  0:38 [PATCH tip/core/rcu 0/3] rcu: fixes for lockdep RCU and accelerated dyntick GPs Paul E. McKenney
  2010-02-27  0:38 ` [PATCH tip/core/rcu 1/3] rcu: fixes for accelerated grace periods for last non-dynticked CPU Paul E. McKenney
  2010-02-27  0:38 ` [PATCH tip/core/rcu 2/3] rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot Paul E. McKenney
@ 2010-02-27  0:38 ` Paul E. McKenney
  2010-02-27 12:56   ` [tip:core/rcu] rcu: Fix " tip-bot for Paul E. McKenney
  2 siblings, 1 reply; 7+ messages in thread
From: Paul E. McKenney @ 2010-02-27  0:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, laijs, dipankar, akpm, mathieu.desnoyers, josh, dvhltc,
	niv, tglx, peterz, rostedt, Valdis.Kletnieks, dhowells,
	Paul E. McKenney

This patch disables irqs across the call to rcu_needs_cpu().  It also
enforces a hold-off period so that the idle loop doesn't softirq itself
to death when there are lots of RCU callbacks in flight on the last
non-dynticked CPU.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcutree_plugin.h |   10 +++++++++-
 1 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index ed241fc..464ad2c 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -986,6 +986,7 @@ static void rcu_needs_cpu_flush(void)
 
 #define RCU_NEEDS_CPU_FLUSHES 5
 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
+static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
 
 /*
  * Check to see if any future RCU-related work will need to be done
@@ -1013,6 +1014,7 @@ int rcu_needs_cpu(int cpu)
 	for_each_cpu_not(thatcpu, nohz_cpu_mask)
 		if (thatcpu != cpu) {
 			per_cpu(rcu_dyntick_drain, cpu) = 0;
+			per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
 			return rcu_needs_cpu_quick_check(cpu);
 		}
 
@@ -1022,6 +1024,7 @@ int rcu_needs_cpu(int cpu)
 		per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
 	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
 		/* We have hit the limit, so time to give up. */
+		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
 		return rcu_needs_cpu_quick_check(cpu);
 	}
 
@@ -1038,8 +1041,10 @@ int rcu_needs_cpu(int cpu)
 	}
 
 	/* If RCU callbacks are still pending, RCU still needs this CPU. */
-	if (c)
+	if (c) {
 		raise_softirq(RCU_SOFTIRQ);
+		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+	}
 	return c;
 }
 
@@ -1050,10 +1055,13 @@ int rcu_needs_cpu(int cpu)
 static void rcu_needs_cpu_flush(void)
 {
 	int cpu = smp_processor_id();
+	unsigned long flags;
 
 	if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
 		return;
+	local_irq_save(flags);
 	(void)rcu_needs_cpu(cpu);
+	local_irq_restore(flags);
 }
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
-- 
1.6.6


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [tip:core/rcu] rcu: Fix accelerated grace periods for last non-dynticked CPU
  2010-02-27  0:38 ` [PATCH tip/core/rcu 1/3] rcu: fixes for accelerated grace periods for last non-dynticked CPU Paul E. McKenney
@ 2010-02-27 12:56   ` tip-bot for Paul E. McKenney
  0 siblings, 0 replies; 7+ messages in thread
From: tip-bot for Paul E. McKenney @ 2010-02-27 12:56 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, paulmck, hpa, mingo, tglx, mingo

Commit-ID:  a47cd880b50e14b0b6f5e9d426ae9a2676c9c474
Gitweb:     http://git.kernel.org/tip/a47cd880b50e14b0b6f5e9d426ae9a2676c9c474
Author:     Paul E. McKenney <paulmck@linux.vnet.ibm.com>
AuthorDate: Fri, 26 Feb 2010 16:38:56 -0800
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 27 Feb 2010 09:53:52 +0100

rcu: Fix accelerated grace periods for last non-dynticked CPU

It is invalid to invoke __rcu_process_callbacks() with irqs
disabled, so do it indirectly via raise_softirq().  This
requires a state-machine implementation to cycle through the
grace-period machinery the required number of times.

Located-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267231138-27856-1-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 kernel/rcutree.c        |    3 ++
 kernel/rcutree.h        |    1 +
 kernel/rcutree_plugin.h |   73 ++++++++++++++++++++++++++++++++++-------------
 3 files changed, 57 insertions(+), 20 deletions(-)

diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 335bfe4..3ec8160 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1341,6 +1341,9 @@ static void rcu_process_callbacks(struct softirq_action *unused)
 	 * grace-period manipulations above.
 	 */
 	smp_mb(); /* See above block comment. */
+
+	/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
+	rcu_needs_cpu_flush();
 }
 
 static void
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 2ceb083..1439eb5 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -373,5 +373,6 @@ static int rcu_preempt_needs_cpu(int cpu);
 static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
 static void rcu_preempt_send_cbs_to_orphanage(void);
 static void __init __rcu_init_preempt(void);
+static void rcu_needs_cpu_flush(void);
 
 #endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 3516de7..ed241fc 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -973,9 +973,19 @@ int rcu_needs_cpu(int cpu)
 	return rcu_needs_cpu_quick_check(cpu);
 }
 
+/*
+ * Check to see if we need to continue a callback-flush operations to
+ * allow the last CPU to enter dyntick-idle mode.  But fast dyntick-idle
+ * entry is not configured, so we never do need to.
+ */
+static void rcu_needs_cpu_flush(void)
+{
+}
+
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
 #define RCU_NEEDS_CPU_FLUSHES 5
+static DEFINE_PER_CPU(int, rcu_dyntick_drain);
 
 /*
  * Check to see if any future RCU-related work will need to be done
@@ -988,39 +998,62 @@ int rcu_needs_cpu(int cpu)
  * only if all other CPUs are already in dynticks-idle mode.  This will
  * allow the CPU cores to be powered down immediately, as opposed to after
  * waiting many milliseconds for grace periods to elapse.
+ *
+ * Because it is not legal to invoke rcu_process_callbacks() with irqs
+ * disabled, we do one pass of force_quiescent_state(), then do a
+ * raise_softirq() to cause rcu_process_callbacks() to be invoked later.
+ * The per-cpu rcu_dyntick_drain variable controls the sequencing.
  */
 int rcu_needs_cpu(int cpu)
 {
-	int c = 1;
-	int i;
+	int c = 0;
 	int thatcpu;
 
 	/* Don't bother unless we are the last non-dyntick-idle CPU. */
 	for_each_cpu_not(thatcpu, nohz_cpu_mask)
-		if (thatcpu != cpu)
+		if (thatcpu != cpu) {
+			per_cpu(rcu_dyntick_drain, cpu) = 0;
 			return rcu_needs_cpu_quick_check(cpu);
-
-	/* Try to push remaining RCU-sched and RCU-bh callbacks through. */
-	for (i = 0; i < RCU_NEEDS_CPU_FLUSHES && c; i++) {
-		c = 0;
-		if (per_cpu(rcu_sched_data, cpu).nxtlist) {
-			rcu_sched_qs(cpu);
-			force_quiescent_state(&rcu_sched_state, 0);
-			__rcu_process_callbacks(&rcu_sched_state,
-						&per_cpu(rcu_sched_data, cpu));
-			c = !!per_cpu(rcu_sched_data, cpu).nxtlist;
-		}
-		if (per_cpu(rcu_bh_data, cpu).nxtlist) {
-			rcu_bh_qs(cpu);
-			force_quiescent_state(&rcu_bh_state, 0);
-			__rcu_process_callbacks(&rcu_bh_state,
-						&per_cpu(rcu_bh_data, cpu));
-			c = !!per_cpu(rcu_bh_data, cpu).nxtlist;
 		}
+
+	/* Check and update the rcu_dyntick_drain sequencing. */
+	if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+		/* First time through, initialize the counter. */
+		per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
+	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
+		/* We have hit the limit, so time to give up. */
+		return rcu_needs_cpu_quick_check(cpu);
+	}
+
+	/* Do one step pushing remaining RCU callbacks through. */
+	if (per_cpu(rcu_sched_data, cpu).nxtlist) {
+		rcu_sched_qs(cpu);
+		force_quiescent_state(&rcu_sched_state, 0);
+		c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
+	}
+	if (per_cpu(rcu_bh_data, cpu).nxtlist) {
+		rcu_bh_qs(cpu);
+		force_quiescent_state(&rcu_bh_state, 0);
+		c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
 	}
 
 	/* If RCU callbacks are still pending, RCU still needs this CPU. */
+	if (c)
+		raise_softirq(RCU_SOFTIRQ);
 	return c;
 }
 
+/*
+ * Check to see if we need to continue a callback-flush operations to
+ * allow the last CPU to enter dyntick-idle mode.
+ */
+static void rcu_needs_cpu_flush(void)
+{
+	int cpu = smp_processor_id();
+
+	if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
+		return;
+	(void)rcu_needs_cpu(cpu);
+}
+
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [tip:core/rcu] rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot
  2010-02-27  0:38 ` [PATCH tip/core/rcu 2/3] rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot Paul E. McKenney
@ 2010-02-27 12:56   ` tip-bot for Paul E. McKenney
  0 siblings, 0 replies; 7+ messages in thread
From: tip-bot for Paul E. McKenney @ 2010-02-27 12:56 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, paulmck, hpa, mingo, tglx, mingo

Commit-ID:  0b1c87278a8c7e394022ec184a0b44a3886b6fde
Gitweb:     http://git.kernel.org/tip/0b1c87278a8c7e394022ec184a0b44a3886b6fde
Author:     Paul E. McKenney <paulmck@linux.vnet.ibm.com>
AuthorDate: Fri, 26 Feb 2010 16:38:57 -0800
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 27 Feb 2010 09:53:52 +0100

rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot

Before the scheduler starts, all tasks are non-preemptible by
definition. So, during that time, rcu_read_lock_sched_held()
needs to always return "true".  This patch makes that be so
for RCU_PROVE_LOCKING=n.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267231138-27856-2-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 include/linux/rcupdate.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index fcea332..c843736 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -166,7 +166,7 @@ static inline int rcu_read_lock_bh_held(void)
 
 static inline int rcu_read_lock_sched_held(void)
 {
-	return preempt_count() != 0;
+	return preempt_count() != 0 || !rcu_scheduler_active;
 }
 
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [tip:core/rcu] rcu: Fix accelerated GPs for last non-dynticked CPU
  2010-02-27  0:38 ` [PATCH tip/core/rcu 3/3] rcu: more fixes for accelerated GPs for last non-dynticked CPU Paul E. McKenney
@ 2010-02-27 12:56   ` tip-bot for Paul E. McKenney
  0 siblings, 0 replies; 7+ messages in thread
From: tip-bot for Paul E. McKenney @ 2010-02-27 12:56 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, paulmck, hpa, mingo, tglx, mingo

Commit-ID:  71da81324c83ef65bb196c7f874ac1c6996d8287
Gitweb:     http://git.kernel.org/tip/71da81324c83ef65bb196c7f874ac1c6996d8287
Author:     Paul E. McKenney <paulmck@linux.vnet.ibm.com>
AuthorDate: Fri, 26 Feb 2010 16:38:58 -0800
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 27 Feb 2010 09:53:53 +0100

rcu: Fix accelerated GPs for last non-dynticked CPU

This patch disables irqs across the call to rcu_needs_cpu().  It
also enforces a hold-off period so that the idle loop doesn't
softirq itself to death when there are lots of RCU callbacks in
flight on the last non-dynticked CPU.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267231138-27856-3-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 kernel/rcutree_plugin.h |   10 +++++++++-
 1 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index ed241fc..464ad2c 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -986,6 +986,7 @@ static void rcu_needs_cpu_flush(void)
 
 #define RCU_NEEDS_CPU_FLUSHES 5
 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
+static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
 
 /*
  * Check to see if any future RCU-related work will need to be done
@@ -1013,6 +1014,7 @@ int rcu_needs_cpu(int cpu)
 	for_each_cpu_not(thatcpu, nohz_cpu_mask)
 		if (thatcpu != cpu) {
 			per_cpu(rcu_dyntick_drain, cpu) = 0;
+			per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
 			return rcu_needs_cpu_quick_check(cpu);
 		}
 
@@ -1022,6 +1024,7 @@ int rcu_needs_cpu(int cpu)
 		per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
 	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
 		/* We have hit the limit, so time to give up. */
+		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
 		return rcu_needs_cpu_quick_check(cpu);
 	}
 
@@ -1038,8 +1041,10 @@ int rcu_needs_cpu(int cpu)
 	}
 
 	/* If RCU callbacks are still pending, RCU still needs this CPU. */
-	if (c)
+	if (c) {
 		raise_softirq(RCU_SOFTIRQ);
+		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+	}
 	return c;
 }
 
@@ -1050,10 +1055,13 @@ int rcu_needs_cpu(int cpu)
 static void rcu_needs_cpu_flush(void)
 {
 	int cpu = smp_processor_id();
+	unsigned long flags;
 
 	if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
 		return;
+	local_irq_save(flags);
 	(void)rcu_needs_cpu(cpu);
+	local_irq_restore(flags);
 }
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2010-02-27 12:57 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-02-27  0:38 [PATCH tip/core/rcu 0/3] rcu: fixes for lockdep RCU and accelerated dyntick GPs Paul E. McKenney
2010-02-27  0:38 ` [PATCH tip/core/rcu 1/3] rcu: fixes for accelerated grace periods for last non-dynticked CPU Paul E. McKenney
2010-02-27 12:56   ` [tip:core/rcu] rcu: Fix " tip-bot for Paul E. McKenney
2010-02-27  0:38 ` [PATCH tip/core/rcu 2/3] rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot Paul E. McKenney
2010-02-27 12:56   ` [tip:core/rcu] " tip-bot for Paul E. McKenney
2010-02-27  0:38 ` [PATCH tip/core/rcu 3/3] rcu: more fixes for accelerated GPs for last non-dynticked CPU Paul E. McKenney
2010-02-27 12:56   ` [tip:core/rcu] rcu: Fix " tip-bot for Paul E. McKenney

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.