linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH tip/core/rcu 01/52] rcu: Remove rsp parameter from rcu_report_qs_rnp()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 02/52] rcu: Remove rsp parameter from rcu_report_qs_rsp() Paul E. McKenney
                   ` (51 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_report_qs_rnp().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f0e7e3972fd9..c9f4d7f3de91 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -132,9 +132,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
-static void
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
-		  struct rcu_node *rnp, unsigned long gps, unsigned long flags);
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
+			      unsigned long gps, unsigned long flags);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -1946,7 +1945,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
 		rnp->rcu_gp_init_mask = mask;
 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
-			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		else
 			raw_spin_unlock_irq_rcu_node(rnp);
 		cond_resched_tasks_rcu_qs();
@@ -2213,13 +2212,13 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  * disabled.  This allows propagating quiescent state due to resumed tasks
  * during grace-period initialization.
  */
-static void
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
-		  struct rcu_node *rnp, unsigned long gps, unsigned long flags)
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
+			      unsigned long gps, unsigned long flags)
 	__releases(rnp->lock)
 {
 	unsigned long oldmask = 0;
 	struct rcu_node *rnp_c;
+	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -2311,7 +2310,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
 	mask = rnp->grpmask;
 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
-	rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
+	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
 }
 
 /*
@@ -2354,7 +2353,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 		 */
 		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
 
-		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		/* ^^^ Released rnp->lock */
 		if (needwake)
 			rcu_gp_kthread_wake(rsp);
@@ -2622,7 +2621,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
 		}
 		if (mask != 0) {
 			/* Idle/offline CPUs, report (releases rnp->lock). */
-			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		} else {
 			/* Nothing to do here, so just drop the lock. */
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -3576,7 +3575,7 @@ void rcu_cpu_starting(unsigned int cpu)
 		rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
 		if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
 			/* Report QS -after- changing ->qsmaskinitnext! */
-			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		} else {
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		}
@@ -3605,7 +3604,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
 	rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
-		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	}
 	rnp->qsmaskinitnext &= ~mask;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 02/52] rcu: Remove rsp parameter from rcu_report_qs_rsp()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 01/52] rcu: Remove rsp parameter from rcu_report_qs_rnp() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 03/52] rcu: Remove rsp parameter from rcu_report_unblock_qs_rnp() Paul E. McKenney
                   ` (50 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_report_qs_rsp().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c9f4d7f3de91..73dde7c661e7 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -138,7 +138,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
+static void invoke_rcu_callbacks(struct rcu_data *rdp);
 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp);
 static void sync_sched_exp_online_cleanup(int cpu);
 
@@ -2188,9 +2188,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
  * just-completed grace period.  Note that the caller must hold rnp->lock,
  * which is released before return.
  */
-static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
+static void rcu_report_qs_rsp(unsigned long flags)
 	__releases(rcu_get_root(rsp)->lock)
 {
+	struct rcu_state *rsp = &rcu_state;
+
 	raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
 	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
@@ -2267,7 +2269,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
 	 * to clean up and start the next grace period if one is needed.
 	 */
-	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
+	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
 }
 
 /*
@@ -2301,7 +2303,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
 		 * Only one rcu_node structure in the tree, so don't
 		 * try to report up to its nonexistent parent!
 		 */
-		rcu_report_qs_rsp(rsp, flags);
+		rcu_report_qs_rsp(flags);
 		return;
 	}
 
@@ -2760,7 +2762,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 
 	/* If there are callbacks ready, invoke them. */
 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
-		invoke_rcu_callbacks(rsp, rdp);
+		invoke_rcu_callbacks(rdp);
 
 	/* Do any needed deferred wakeups of rcuo kthreads. */
 	do_nocb_deferred_wakeup(rdp);
@@ -2788,8 +2790,10 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
  * are running on the current CPU with softirqs disabled, the
  * rcu_cpu_kthread_task cannot disappear out from under us.
  */
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+static void invoke_rcu_callbacks(struct rcu_data *rdp)
 {
+	struct rcu_state *rsp = &rcu_state;
+
 	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
 		return;
 	if (likely(!rsp->boost)) {
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 03/52] rcu: Remove rsp parameter from rcu_report_unblock_qs_rnp()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 01/52] rcu: Remove rsp parameter from rcu_report_qs_rnp() Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 02/52] rcu: Remove rsp parameter from rcu_report_qs_rsp() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 04/52] rcu: Remove rsp parameter from rcu_report_qs_rdp() Paul E. McKenney
                   ` (49 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_report_unblock_qs_rnp(), which is particularly appropriate in
this case given that this parameter is no longer used.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 3 +--
 kernel/rcu/tree_plugin.h | 2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 73dde7c661e7..3abb981bb11c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2280,8 +2280,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
  * disabled.
  */
 static void __maybe_unused
-rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
-			  struct rcu_node *rnp, unsigned long flags)
+rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
 	__releases(rnp->lock)
 {
 	unsigned long gps;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 18175ca19f34..566828ecaecb 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -566,7 +566,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 							 rnp->grplo,
 							 rnp->grphi,
 							 !!rnp->gp_tasks);
-			rcu_report_unblock_qs_rnp(&rcu_state, rnp, flags);
+			rcu_report_unblock_qs_rnp(rnp, flags);
 		} else {
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		}
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 04/52] rcu: Remove rsp parameter from rcu_report_qs_rdp()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (2 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 03/52] rcu: Remove rsp parameter from rcu_report_unblock_qs_rnp() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 05/52] rcu: Remove rsp parameter from rcu_gp_in_progress() Paul E. McKenney
                   ` (48 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_report_qs_rdp().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3abb981bb11c..8977e37fcba3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2319,12 +2319,13 @@ rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
  * structure.  This must be called from the specified CPU.
  */
 static void
-rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
+rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
 {
 	unsigned long flags;
 	unsigned long mask;
 	bool needwake;
 	struct rcu_node *rnp;
+	struct rcu_state *rsp = &rcu_state;
 
 	rnp = rdp->mynode;
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -2391,7 +2392,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
 	 * judge of that).
 	 */
-	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
+	rcu_report_qs_rdp(rdp->cpu, rdp);
 }
 
 /*
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 05/52] rcu: Remove rsp parameter from rcu_gp_in_progress()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (3 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 04/52] rcu: Remove rsp parameter from rcu_report_qs_rdp() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 06/52] rcu: Remove rsp parameter from rcu_get_root() Paul E. McKenney
                   ` (47 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_gp_in_progress().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 30 +++++++++++++++---------------
 kernel/rcu/tree_plugin.h |  2 +-
 2 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8977e37fcba3..605e1c990619 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -189,9 +189,9 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  * permit this function to be invoked without holding the root rcu_node
  * structure's ->lock, but of course results can be subject to change.
  */
-static int rcu_gp_in_progress(struct rcu_state *rsp)
+static int rcu_gp_in_progress(void)
 {
-	return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
+	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
 }
 
 void rcu_softirq_qs(void)
@@ -1296,7 +1296,7 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
 		return;
 	j = READ_ONCE(rsp->jiffies_kick_kthreads);
 	if (time_after(jiffies, j) && rsp->gp_kthread &&
-	    (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
+	    (rcu_gp_in_progress() || READ_ONCE(rsp->gp_flags))) {
 		WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
 		rcu_ftrace_dump(DUMP_ALL);
 		wake_up_process(rsp->gp_kthread);
@@ -1448,7 +1448,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 	struct rcu_node *rnp;
 
 	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
-	    !rcu_gp_in_progress(rsp))
+	    !rcu_gp_in_progress())
 		return;
 	rcu_stall_kick_kthreads(rsp);
 	j = jiffies;
@@ -1483,14 +1483,14 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 		return; /* No stall or GP completed since entering function. */
 	rnp = rdp->mynode;
 	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
-	if (rcu_gp_in_progress(rsp) &&
+	if (rcu_gp_in_progress() &&
 	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
 	    cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
 		/* We haven't checked in, so go dump stack. */
 		print_cpu_stall(rsp);
 
-	} else if (rcu_gp_in_progress(rsp) &&
+	} else if (rcu_gp_in_progress() &&
 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
 		   cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
@@ -1588,7 +1588,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
 	}
 
 	/* If GP already in progress, just leave, otherwise start one. */
-	if (rcu_gp_in_progress(rsp)) {
+	if (rcu_gp_in_progress()) {
 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
 		goto unlock_out;
 	}
@@ -1845,7 +1845,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 	}
 	WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
 
-	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
+	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
 		/*
 		 * Grace period already in progress, don't start another.
 		 * Not supposed to be able to happen.
@@ -2194,7 +2194,7 @@ static void rcu_report_qs_rsp(unsigned long flags)
 	struct rcu_state *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
-	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+	WARN_ON_ONCE(!rcu_gp_in_progress());
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
 	rcu_gp_kthread_wake(rsp);
@@ -2681,7 +2681,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
 	struct rcu_node *rnp_root = rcu_get_root(rsp);
 	static atomic_t warned = ATOMIC_INIT(0);
 
-	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
+	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
 		return;
 	j = jiffies; /* Expensive access, and in common case don't get here. */
@@ -2692,7 +2692,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	j = jiffies;
-	if (rcu_gp_in_progress(rsp) ||
+	if (rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
 	    time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
 	    time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
@@ -2705,7 +2705,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
 	if (rnp_root != rnp)
 		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
 	j = jiffies;
-	if (rcu_gp_in_progress(rsp) ||
+	if (rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
 	    time_before(j, rsp->gp_req_activity + gpssdelay) ||
 	    time_before(j, rsp->gp_activity + gpssdelay) ||
@@ -2750,7 +2750,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 	rcu_check_quiescent_state(rsp, rdp);
 
 	/* No grace period and unregistered callbacks? */
-	if (!rcu_gp_in_progress(rsp) &&
+	if (!rcu_gp_in_progress() &&
 	    rcu_segcblist_is_enabled(&rdp->cblist)) {
 		local_irq_save(flags);
 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
@@ -2840,7 +2840,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 		note_gp_changes(rsp, rdp);
 
 		/* Start a new grace period if one not already started. */
-		if (!rcu_gp_in_progress(rsp)) {
+		if (!rcu_gp_in_progress()) {
 			rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
 		} else {
 			/* Give the grace period a kick. */
@@ -3104,7 +3104,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 		return 1;
 
 	/* Has RCU gone idle with this CPU needing another grace period? */
-	if (!rcu_gp_in_progress(rsp) &&
+	if (!rcu_gp_in_progress() &&
 	    rcu_segcblist_is_enabled(&rdp->cblist) &&
 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
 		return 1;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 566828ecaecb..99f517035a6e 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2655,7 +2655,7 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
 {
 #ifdef CONFIG_NO_HZ_FULL
 	if (tick_nohz_full_cpu(smp_processor_id()) &&
-	    (!rcu_gp_in_progress(rsp) ||
+	    (!rcu_gp_in_progress() ||
 	     ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
 		return true;
 #endif /* #ifdef CONFIG_NO_HZ_FULL */
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 06/52] rcu: Remove rsp parameter from rcu_get_root()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (4 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 05/52] rcu: Remove rsp parameter from rcu_gp_in_progress() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 07/52] rcu: Remove rsp parameter from record_gp_stall_check_time() Paul E. McKenney
                   ` (46 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_get_root().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 38 +++++++++++++++++++-------------------
 kernel/rcu/tree_exp.h    |  6 +++---
 kernel/rcu/tree_plugin.h |  2 +-
 3 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 605e1c990619..9a40692f980d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -623,9 +623,9 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 /*
  * Return the root node of the specified rcu_state structure.
  */
-static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+static struct rcu_node *rcu_get_root(void)
 {
-	return &rsp->node[0];
+	return &rcu_state.node[0];
 }
 
 /*
@@ -1317,7 +1317,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 	unsigned long gpa;
 	unsigned long j;
 	int ndetected = 0;
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
@@ -1366,7 +1366,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
 			       rsp->name, j - gpa, j, gpa,
 			       jiffies_till_next_fqs,
-			       rcu_get_root(rsp)->qsmask);
+			       rcu_get_root()->qsmask);
 			/* In this case, the current CPU might be at fault. */
 			sched_show_task(current);
 		}
@@ -1388,7 +1388,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
 	int cpu;
 	unsigned long flags;
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
@@ -1834,7 +1834,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 	unsigned long oldmask;
 	unsigned long mask;
 	struct rcu_data *rdp;
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
 	raw_spin_lock_irq_rcu_node(rnp);
@@ -1961,7 +1961,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
  */
 static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
 {
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	/* Someone like call_rcu() requested a force-quiescent-state scan. */
 	*gfp = READ_ONCE(rsp->gp_flags);
@@ -1980,7 +1980,7 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
  */
 static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
 {
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
 	rsp->n_force_qs++;
@@ -2009,7 +2009,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 	bool needgp = false;
 	unsigned long new_gp_seq;
 	struct rcu_data *rdp;
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 	struct swait_queue_head *sq;
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2057,7 +2057,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 		WRITE_ONCE(rsp->gp_activity, jiffies);
 		rcu_gp_slow(rsp, gp_cleanup_delay);
 	}
-	rnp = rcu_get_root(rsp);
+	rnp = rcu_get_root();
 	raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
 
 	/* Declare grace period done. */
@@ -2093,7 +2093,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 	unsigned long j;
 	int ret;
 	struct rcu_state *rsp = arg;
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	rcu_bind_gp_kthread();
 	for (;;) {
@@ -2189,14 +2189,14 @@ static int __noreturn rcu_gp_kthread(void *arg)
  * which is released before return.
  */
 static void rcu_report_qs_rsp(unsigned long flags)
-	__releases(rcu_get_root(rsp)->lock)
+	__releases(rcu_get_root()->lock)
 {
 	struct rcu_state *rsp = &rcu_state;
 
-	raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
+	raw_lockdep_assert_held_rcu_node(rcu_get_root());
 	WARN_ON_ONCE(!rcu_gp_in_progress());
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
-	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
+	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
 	rcu_gp_kthread_wake(rsp);
 }
 
@@ -2653,7 +2653,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
 			return;
 		rnp_old = rnp;
 	}
-	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
+	/* rnp_old == rcu_get_root(), rnp == NULL. */
 
 	/* Reached the root of the rcu_node tree, acquire lock. */
 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
@@ -2678,7 +2678,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
 	const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
 	unsigned long flags;
 	unsigned long j;
-	struct rcu_node *rnp_root = rcu_get_root(rsp);
+	struct rcu_node *rnp_root = rcu_get_root();
 	static atomic_t warned = ATOMIC_INIT(0);
 
 	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
@@ -3396,7 +3396,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
 {
 	unsigned long flags;
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	/* Set up local state, ensuring consistent view of global state. */
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -3645,7 +3645,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
 	unsigned long flags;
 	struct rcu_data *my_rdp;
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
+	struct rcu_node *rnp_root = rcu_get_root();
 	bool needwake;
 
 	if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
@@ -3743,7 +3743,7 @@ static int __init rcu_spawn_gp_kthread(void)
 	for_each_rcu_flavor(rsp) {
 		t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
 		BUG_ON(IS_ERR(t));
-		rnp = rcu_get_root(rsp);
+		rnp = rcu_get_root();
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		rsp->gp_kthread = t;
 		if (kthread_prio) {
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 298a6904bbcd..0bcbb03c9702 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -288,7 +288,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
 	struct rcu_node *rnp = rdp->mynode;
-	struct rcu_node *rnp_root = rcu_get_root(rsp);
+	struct rcu_node *rnp_root = rcu_get_root();
 
 	/* Low-contention fastpath. */
 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
@@ -479,7 +479,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 	unsigned long mask;
 	int ndetected;
 	struct rcu_node *rnp;
-	struct rcu_node *rnp_root = rcu_get_root(rsp);
+	struct rcu_node *rnp_root = rcu_get_root();
 	int ret;
 
 	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait"));
@@ -643,7 +643,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
 
 	/* Wait for expedited grace period to complete. */
 	rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
-	rnp = rcu_get_root(rsp);
+	rnp = rcu_get_root();
 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
 		   sync_exp_work_done(rsp, s));
 	smp_mb(); /* Workqueue actions happen before return. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 99f517035a6e..545e4ac9422a 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -685,7 +685,7 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
  */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 {
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	rcu_print_detail_task_stall_rnp(rnp);
 	rcu_for_each_leaf_node(rsp, rnp)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 07/52] rcu: Remove rsp parameter from record_gp_stall_check_time()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (5 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 06/52] rcu: Remove rsp parameter from rcu_get_root() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 08/52] rcu: Remove rsp parameter from rcu_check_gp_kthread_starvation() Paul E. McKenney
                   ` (45 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
record_gp_stall_check_time().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9a40692f980d..7f3e93eb726a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1213,17 +1213,17 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 	return 0;
 }
 
-static void record_gp_stall_check_time(struct rcu_state *rsp)
+static void record_gp_stall_check_time(void)
 {
 	unsigned long j = jiffies;
 	unsigned long j1;
 
-	rsp->gp_start = j;
+	rcu_state.gp_start = j;
 	j1 = rcu_jiffies_till_stall_check();
 	/* Record ->gp_start before ->jiffies_stall. */
-	smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
-	rsp->jiffies_resched = j + j1 / 2;
-	rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
+	smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
+	rcu_state.jiffies_resched = j + j1 / 2;
+	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
 }
 
 /*
@@ -1855,7 +1855,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 	}
 
 	/* Advance to a new grace period and initialize state. */
-	record_gp_stall_check_time(rsp);
+	record_gp_stall_check_time();
 	/* Record GP times before starting GP, hence rcu_seq_start(). */
 	rcu_seq_start(&rsp->gp_seq);
 	trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 08/52] rcu: Remove rsp parameter from rcu_check_gp_kthread_starvation()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (6 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 07/52] rcu: Remove rsp parameter from record_gp_stall_check_time() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 09/52] rcu: Remove rsp parameter from rcu_dump_cpu_stacks() Paul E. McKenney
                   ` (44 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_check_gp_kthread_starvation().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7f3e93eb726a..0f419554122a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1239,10 +1239,11 @@ static const char *gp_state_getname(short gs)
 /*
  * Complain about starvation of grace-period kthread.
  */
-static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
+static void rcu_check_gp_kthread_starvation(void)
 {
 	unsigned long gpa;
 	unsigned long j;
+	struct rcu_state *rsp = &rcu_state;
 
 	j = jiffies;
 	gpa = READ_ONCE(rsp->gp_activity);
@@ -1376,7 +1377,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 		WRITE_ONCE(rsp->jiffies_stall,
 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 
-	rcu_check_gp_kthread_starvation(rsp);
+	rcu_check_gp_kthread_starvation();
 
 	panic_on_rcu_stall();
 
@@ -1414,7 +1415,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
 		jiffies - rsp->gp_start,
 		(long)rcu_seq_current(&rsp->gp_seq), totqlen);
 
-	rcu_check_gp_kthread_starvation(rsp);
+	rcu_check_gp_kthread_starvation();
 
 	rcu_dump_cpu_stacks(rsp);
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 09/52] rcu: Remove rsp parameter from rcu_dump_cpu_stacks()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (7 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 08/52] rcu: Remove rsp parameter from rcu_check_gp_kthread_starvation() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 10/52] rcu: Remove rsp parameter from rcu_stall_kick_kthreads() Paul E. McKenney
                   ` (43 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_dump_cpu_stacks().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0f419554122a..2741d987e745 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1269,13 +1269,13 @@ static void rcu_check_gp_kthread_starvation(void)
  * that don't support NMI-based stack dumps.  The NMI-triggered stack
  * traces are more accurate because they are printed by the target CPU.
  */
-static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
+static void rcu_dump_cpu_stacks(void)
 {
 	int cpu;
 	unsigned long flags;
 	struct rcu_node *rnp;
 
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(&rcu_state, rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		for_each_leaf_node_possible_cpu(rnp, cpu)
 			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
@@ -1354,7 +1354,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
 	       (long)rcu_seq_current(&rsp->gp_seq), totqlen);
 	if (ndetected) {
-		rcu_dump_cpu_stacks(rsp);
+		rcu_dump_cpu_stacks();
 
 		/* Complain about tasks blocking the grace period. */
 		rcu_print_detail_task_stall(rsp);
@@ -1417,7 +1417,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
 	rcu_check_gp_kthread_starvation();
 
-	rcu_dump_cpu_stacks(rsp);
+	rcu_dump_cpu_stacks();
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	/* Rewrite if needed in case of slow consoles. */
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 10/52] rcu: Remove rsp parameter from rcu_stall_kick_kthreads()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (8 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 09/52] rcu: Remove rsp parameter from rcu_dump_cpu_stacks() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 11/52] rcu: Remove rsp parameter from print_other_cpu_stall() Paul E. McKenney
                   ` (42 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_stall_kick_kthreads().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2741d987e745..0777374f12a3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1289,9 +1289,10 @@ static void rcu_dump_cpu_stacks(void)
  * If too much time has passed in the current grace period, and if
  * so configured, go kick the relevant kthreads.
  */
-static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
+static void rcu_stall_kick_kthreads(void)
 {
 	unsigned long j;
+	struct rcu_state *rsp = &rcu_state;
 
 	if (!rcu_kick_kthreads)
 		return;
@@ -1322,7 +1323,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
-	rcu_stall_kick_kthreads(rsp);
+	rcu_stall_kick_kthreads();
 	if (rcu_cpu_stall_suppress)
 		return;
 
@@ -1393,7 +1394,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
-	rcu_stall_kick_kthreads(rsp);
+	rcu_stall_kick_kthreads();
 	if (rcu_cpu_stall_suppress)
 		return;
 
@@ -1451,7 +1452,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
 	    !rcu_gp_in_progress())
 		return;
-	rcu_stall_kick_kthreads(rsp);
+	rcu_stall_kick_kthreads();
 	j = jiffies;
 
 	/*
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 11/52] rcu: Remove rsp parameter from print_other_cpu_stall()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (9 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 10/52] rcu: Remove rsp parameter from rcu_stall_kick_kthreads() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 12/52] rcu: Remove rsp parameter from print_cpu_stall() Paul E. McKenney
                   ` (41 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
print_other_cpu_stall().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0777374f12a3..a674198b5560 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1312,7 +1312,7 @@ static void panic_on_rcu_stall(void)
 		panic("RCU Stall\n");
 }
 
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
+static void print_other_cpu_stall(unsigned long gp_seq)
 {
 	int cpu;
 	unsigned long flags;
@@ -1320,6 +1320,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 	unsigned long j;
 	int ndetected = 0;
 	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
@@ -1497,7 +1498,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 		   cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
 		/* They had a few time units to dump stack, so complain. */
-		print_other_cpu_stall(rsp, gs2);
+		print_other_cpu_stall(gs2);
 	}
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 12/52] rcu: Remove rsp parameter from print_cpu_stall()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (10 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 11/52] rcu: Remove rsp parameter from print_other_cpu_stall() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 13/52] rcu: Remove rsp parameter from check_cpu_stall() Paul E. McKenney
                   ` (40 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
print_cpu_stall().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index a674198b5560..e875987f2ae3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1386,12 +1386,13 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 	force_quiescent_state(rsp);  /* Kick them all. */
 }
 
-static void print_cpu_stall(struct rcu_state *rsp)
+static void print_cpu_stall(void)
 {
 	int cpu;
 	unsigned long flags;
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
@@ -1491,7 +1492,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 	    cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
 		/* We haven't checked in, so go dump stack. */
-		print_cpu_stall(rsp);
+		print_cpu_stall();
 
 	} else if (rcu_gp_in_progress() &&
 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 13/52] rcu: Remove rsp parameter from check_cpu_stall()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (11 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 12/52] rcu: Remove rsp parameter from print_cpu_stall() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 14/52] rcu: Remove rsp parameter from rcu_future_gp_cleanup() Paul E. McKenney
                   ` (39 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
check_cpu_stall().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e875987f2ae3..65411f6a84fe 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1441,7 +1441,7 @@ static void print_cpu_stall(void)
 	resched_cpu(smp_processor_id());
 }
 
-static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+static void check_cpu_stall(struct rcu_data *rdp)
 {
 	unsigned long gs1;
 	unsigned long gs2;
@@ -1450,6 +1450,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 	unsigned long jn;
 	unsigned long js;
 	struct rcu_node *rnp;
+	struct rcu_state *rsp = &rcu_state;
 
 	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
 	    !rcu_gp_in_progress())
@@ -3093,7 +3094,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 	struct rcu_node *rnp = rdp->mynode;
 
 	/* Check for CPU stalls, if enabled. */
-	check_cpu_stall(rsp, rdp);
+	check_cpu_stall(rdp);
 
 	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
 	if (rcu_nohz_full_cpu(rsp))
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 14/52] rcu: Remove rsp parameter from rcu_future_gp_cleanup()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (12 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 13/52] rcu: Remove rsp parameter from check_cpu_stall() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 15/52] rcu: Remove rsp parameter from rcu_gp_kthread_wake() Paul E. McKenney
                   ` (38 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_future_gp_cleanup().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 65411f6a84fe..967d429ecfaf 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1621,7 +1621,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
  * Clean up any old requests for the just-ended grace period.  Also return
  * whether any additional grace periods have been requested.
  */
-static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
 {
 	bool needmore;
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -2054,7 +2054,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 		if (rnp == rdp->mynode)
 			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
 		/* smp_mb() provided by prior unlock-lock pair. */
-		needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp;
+		needgp = rcu_future_gp_cleanup(rnp) || needgp;
 		sq = rcu_nocb_gp_get(rnp);
 		raw_spin_unlock_irq_rcu_node(rnp);
 		rcu_nocb_gp_cleanup(sq);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 15/52] rcu: Remove rsp parameter from rcu_gp_kthread_wake()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (13 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 14/52] rcu: Remove rsp parameter from rcu_future_gp_cleanup() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 16/52] rcu: Remove rsp parameter from rcu_accelerate_cbs() Paul E. McKenney
                   ` (37 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_gp_kthread_wake().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 22 +++++++++++-----------
 kernel/rcu/tree_plugin.h |  4 ++--
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 967d429ecfaf..d3150943db9f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1641,13 +1641,13 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
  * raced to awaken, and we lost), and finally don't try to awaken
  * a kthread that has not yet been created.
  */
-static void rcu_gp_kthread_wake(struct rcu_state *rsp)
+static void rcu_gp_kthread_wake(void)
 {
-	if (current == rsp->gp_kthread ||
-	    !READ_ONCE(rsp->gp_flags) ||
-	    !rsp->gp_kthread)
+	if (current == rcu_state.gp_kthread ||
+	    !READ_ONCE(rcu_state.gp_flags) ||
+	    !rcu_state.gp_kthread)
 		return;
-	swake_up_one(&rsp->gp_wq);
+	swake_up_one(&rcu_state.gp_wq);
 }
 
 /*
@@ -1721,7 +1721,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
 	needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
 	if (needwake)
-		rcu_gp_kthread_wake(rsp);
+		rcu_gp_kthread_wake();
 }
 
 /*
@@ -1819,7 +1819,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 	needwake = __note_gp_changes(rsp, rnp, rdp);
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	if (needwake)
-		rcu_gp_kthread_wake(rsp);
+		rcu_gp_kthread_wake();
 }
 
 static void rcu_gp_slow(struct rcu_state *rsp, int delay)
@@ -2202,7 +2202,7 @@ static void rcu_report_qs_rsp(unsigned long flags)
 	WARN_ON_ONCE(!rcu_gp_in_progress());
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
-	rcu_gp_kthread_wake(rsp);
+	rcu_gp_kthread_wake();
 }
 
 /*
@@ -2363,7 +2363,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		/* ^^^ Released rnp->lock */
 		if (needwake)
-			rcu_gp_kthread_wake(rsp);
+			rcu_gp_kthread_wake();
 	}
 }
 
@@ -2669,7 +2669,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
 	}
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
-	rcu_gp_kthread_wake(rsp);
+	rcu_gp_kthread_wake();
 }
 
 /*
@@ -3671,7 +3671,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
 	raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
 	if (needwake)
-		rcu_gp_kthread_wake(rsp);
+		rcu_gp_kthread_wake();
 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
 		  !rcu_segcblist_empty(&rdp->cblist),
 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 545e4ac9422a..50ca000ad9f2 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1700,7 +1700,7 @@ static void rcu_prepare_for_idle(void)
 		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
 		if (needwake)
-			rcu_gp_kthread_wake(rsp);
+			rcu_gp_kthread_wake();
 	}
 }
 
@@ -2147,7 +2147,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
 		needwake = rcu_start_this_gp(rnp, rdp, c);
 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		if (needwake)
-			rcu_gp_kthread_wake(rdp->rsp);
+			rcu_gp_kthread_wake();
 	}
 
 	/*
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 16/52] rcu: Remove rsp parameter from rcu_accelerate_cbs()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (14 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 15/52] rcu: Remove rsp parameter from rcu_gp_kthread_wake() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 17/52] rcu: Remove rsp parameter from rcu_accelerate_cbs_unlocked() Paul E. McKenney
                   ` (36 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_accelerate_cbs().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 15 +++++++--------
 kernel/rcu/tree_plugin.h |  2 +-
 2 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index d3150943db9f..2c1d457e8cc2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1662,11 +1662,11 @@ static void rcu_gp_kthread_wake(void)
  *
  * The caller must hold rnp->lock with interrupts disabled.
  */
-static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
-			       struct rcu_data *rdp)
+static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	unsigned long gp_seq_req;
 	bool ret = false;
+	struct rcu_state *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -1718,7 +1718,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
 		return;
 	}
 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
-	needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+	needwake = rcu_accelerate_cbs(rnp, rdp);
 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
 	if (needwake)
 		rcu_gp_kthread_wake();
@@ -1750,7 +1750,7 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
 
 	/* Classify any remaining callbacks. */
-	return rcu_accelerate_cbs(rsp, rnp, rdp);
+	return rcu_accelerate_cbs(rnp, rdp);
 }
 
 /*
@@ -1776,7 +1776,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 		ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
 		trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
 	} else {
-		ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
+		ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */
 	}
 
 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
@@ -2077,7 +2077,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 		needgp = true;
 	}
 	/* Advance CBs to reduce false positives below. */
-	if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
+	if (!rcu_accelerate_cbs(rnp, rdp) && needgp) {
 		WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
 		rsp->gp_req_activity = jiffies;
 		trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
@@ -2330,7 +2330,6 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
 	unsigned long mask;
 	bool needwake;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp = &rcu_state;
 
 	rnp = rdp->mynode;
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -2358,7 +2357,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
 		 * This GP can't end until cpu checks in, so all of our
 		 * callbacks can be processed during the next GP.
 		 */
-		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+		needwake = rcu_accelerate_cbs(rnp, rdp);
 
 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		/* ^^^ Released rnp->lock */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 50ca000ad9f2..0c59c3987c60 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1697,7 +1697,7 @@ static void rcu_prepare_for_idle(void)
 			continue;
 		rnp = rdp->mynode;
 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
-		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+		needwake = rcu_accelerate_cbs(rnp, rdp);
 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
 		if (needwake)
 			rcu_gp_kthread_wake();
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 17/52] rcu: Remove rsp parameter from rcu_accelerate_cbs_unlocked()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (15 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 16/52] rcu: Remove rsp parameter from rcu_accelerate_cbs() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 18/52] rcu: Remove rsp parameter from rcu_advance_cbs() Paul E. McKenney
                   ` (35 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_accelerate_cbs_unlocked().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2c1d457e8cc2..442adb8eedc1 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1703,15 +1703,14 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
  * while holding the leaf rcu_node structure's ->lock.
  */
-static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
-					struct rcu_node *rnp,
+static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
 					struct rcu_data *rdp)
 {
 	unsigned long c;
 	bool needwake;
 
 	lockdep_assert_irqs_disabled();
-	c = rcu_seq_snap(&rsp->gp_seq);
+	c = rcu_seq_snap(&rcu_state.gp_seq);
 	if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
 		/* Old request still live, so mark recent callbacks. */
 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
@@ -2758,7 +2757,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 	    rcu_segcblist_is_enabled(&rdp->cblist)) {
 		local_irq_save(flags);
 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
-			rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
+			rcu_accelerate_cbs_unlocked(rnp, rdp);
 		local_irq_restore(flags);
 	}
 
@@ -2845,7 +2844,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 
 		/* Start a new grace period if one not already started. */
 		if (!rcu_gp_in_progress()) {
-			rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
+			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
 		} else {
 			/* Give the grace period a kick. */
 			rdp->blimit = LONG_MAX;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 18/52] rcu: Remove rsp parameter from rcu_advance_cbs()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (16 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 17/52] rcu: Remove rsp parameter from rcu_accelerate_cbs_unlocked() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 19/52] rcu: Remove rsp parameter from __note_gp_changes() Paul E. McKenney
                   ` (34 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_advance_cbs().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 442adb8eedc1..dae8c1ad6a9e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1733,8 +1733,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
  *
  * The caller must hold rnp->lock with interrupts disabled.
  */
-static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
-			    struct rcu_data *rdp)
+static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -1772,7 +1771,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 	/* Handle the ends of any preceding grace periods first. */
 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
 	    unlikely(READ_ONCE(rdp->gpwrap))) {
-		ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
+		ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */
 		trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
 	} else {
 		ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */
@@ -3662,8 +3661,8 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
 	}
 	raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
 	/* Leverage recent GPs and set GP for new callbacks. */
-	needwake = rcu_advance_cbs(rsp, rnp_root, rdp) ||
-		   rcu_advance_cbs(rsp, rnp_root, my_rdp);
+	needwake = rcu_advance_cbs(rnp_root, rdp) ||
+		   rcu_advance_cbs(rnp_root, my_rdp);
 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 19/52] rcu: Remove rsp parameter from __note_gp_changes()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (17 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 18/52] rcu: Remove rsp parameter from rcu_advance_cbs() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 20/52] rcu: Remove rsp parameter from note_gp_changes() Paul E. McKenney
                   ` (33 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
__note_gp_changes().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index dae8c1ad6a9e..4dd9d68af702 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1757,11 +1757,11 @@ static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
  * structure corresponding to the current CPU, and must have irqs disabled.
  * Returns true if the grace-period kthread needs to be awakened.
  */
-static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
-			      struct rcu_data *rdp)
+static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	bool ret;
 	bool need_gp;
+	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -1814,7 +1814,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 		local_irq_restore(flags);
 		return;
 	}
-	needwake = __note_gp_changes(rsp, rnp, rdp);
+	needwake = __note_gp_changes(rnp, rdp);
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	if (needwake)
 		rcu_gp_kthread_wake();
@@ -1939,7 +1939,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 		rnp->qsmask = rnp->qsmaskinit;
 		WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
 		if (rnp == rdp->mynode)
-			(void)__note_gp_changes(rsp, rnp, rdp);
+			(void)__note_gp_changes(rnp, rdp);
 		rcu_preempt_boost_start_gp(rnp);
 		trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
 					    rnp->level, rnp->grplo,
@@ -2050,7 +2050,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
 		rdp = this_cpu_ptr(&rcu_data);
 		if (rnp == rdp->mynode)
-			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
+			needgp = __note_gp_changes(rnp, rdp) || needgp;
 		/* smp_mb() provided by prior unlock-lock pair. */
 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
 		sq = rcu_nocb_gp_get(rnp);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 20/52] rcu: Remove rsp parameter from note_gp_changes()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (18 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 19/52] rcu: Remove rsp parameter from __note_gp_changes() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 21/52] rcu: Remove rsp parameter from rcu_gp_slow() Paul E. McKenney
                   ` (32 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
note_gp_changes().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 6 +++---
 kernel/rcu/tree_plugin.h | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4dd9d68af702..752cd23860ed 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1800,7 +1800,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
 	return ret;
 }
 
-static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
+static void note_gp_changes(struct rcu_data *rdp)
 {
 	unsigned long flags;
 	bool needwake;
@@ -2374,7 +2374,7 @@ static void
 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
 {
 	/* Check for grace-period ends and beginnings. */
-	note_gp_changes(rsp, rdp);
+	note_gp_changes(rdp);
 
 	/*
 	 * Does this CPU still need to do its part for current grace period?
@@ -2839,7 +2839,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 		     rdp->qlen_last_fqs_check + qhimark)) {
 
 		/* Are we ignoring a completed grace period? */
-		note_gp_changes(rsp, rdp);
+		note_gp_changes(rdp);
 
 		/* Start a new grace period if one not already started. */
 		if (!rcu_gp_in_progress()) {
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0c59c3987c60..82f10a6bf266 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1586,7 +1586,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
 					  rcu_seq_current(&rnp->gp_seq)) ||
 		     unlikely(READ_ONCE(rdp->gpwrap))) &&
 		    rcu_segcblist_pend_cbs(&rdp->cblist))
-			note_gp_changes(rsp, rdp);
+			note_gp_changes(rdp);
 
 		if (rcu_segcblist_ready_cbs(&rdp->cblist))
 			cbs_ready = true;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 21/52] rcu: Remove rsp parameter from rcu_gp_slow()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (19 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 20/52] rcu: Remove rsp parameter from note_gp_changes() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 22/52] rcu: Remove rsp parameter from rcu_gp_kthread() and friends Paul E. McKenney
                   ` (31 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_gp_slow().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 752cd23860ed..751c28ddf5b1 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1820,10 +1820,10 @@ static void note_gp_changes(struct rcu_data *rdp)
 		rcu_gp_kthread_wake();
 }
 
-static void rcu_gp_slow(struct rcu_state *rsp, int delay)
+static void rcu_gp_slow(int delay)
 {
 	if (delay > 0 &&
-	    !(rcu_seq_ctr(rsp->gp_seq) %
+	    !(rcu_seq_ctr(rcu_state.gp_seq) %
 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
 		schedule_timeout_uninterruptible(delay);
 }
@@ -1916,7 +1916,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 		raw_spin_unlock_irq_rcu_node(rnp);
 		spin_unlock(&rsp->ofl_lock);
 	}
-	rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
+	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
 
 	/*
 	 * Set the quiescent-state-needed bits in all the rcu_node
@@ -1932,7 +1932,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 	 */
 	rsp->gp_state = RCU_GP_INIT;
 	rcu_for_each_node_breadth_first(rsp, rnp) {
-		rcu_gp_slow(rsp, gp_init_delay);
+		rcu_gp_slow(gp_init_delay);
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		rdp = this_cpu_ptr(&rcu_data);
 		rcu_preempt_check_blocked_tasks(rsp, rnp);
@@ -2058,7 +2058,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 		rcu_nocb_gp_cleanup(sq);
 		cond_resched_tasks_rcu_qs();
 		WRITE_ONCE(rsp->gp_activity, jiffies);
-		rcu_gp_slow(rsp, gp_cleanup_delay);
+		rcu_gp_slow(gp_cleanup_delay);
 	}
 	rnp = rcu_get_root();
 	raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 22/52] rcu: Remove rsp parameter from rcu_gp_kthread() and friends
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (20 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 21/52] rcu: Remove rsp parameter from rcu_gp_slow() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 23/52] rcu: Remove rsp parameter from rcu_check_quiescent_state() Paul E. McKenney
                   ` (30 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_gp_init(), rcu_gp_fqs_check_wake(), rcu_gp_fqs(), rcu_gp_cleanup(),
and rcu_gp_kthread().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 751c28ddf5b1..b2d79570c70f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1831,13 +1831,14 @@ static void rcu_gp_slow(int delay)
 /*
  * Initialize a new grace period.  Return false if no grace period required.
  */
-static bool rcu_gp_init(struct rcu_state *rsp)
+static bool rcu_gp_init(void)
 {
 	unsigned long flags;
 	unsigned long oldmask;
 	unsigned long mask;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
 	raw_spin_lock_irq_rcu_node(rnp);
@@ -1962,12 +1963,12 @@ static bool rcu_gp_init(struct rcu_state *rsp)
  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
  * time.
  */
-static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
+static bool rcu_gp_fqs_check_wake(int *gfp)
 {
 	struct rcu_node *rnp = rcu_get_root();
 
 	/* Someone like call_rcu() requested a force-quiescent-state scan. */
-	*gfp = READ_ONCE(rsp->gp_flags);
+	*gfp = READ_ONCE(rcu_state.gp_flags);
 	if (*gfp & RCU_GP_FLAG_FQS)
 		return true;
 
@@ -1981,9 +1982,10 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
 /*
  * Do one round of quiescent-state forcing.
  */
-static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
+static void rcu_gp_fqs(bool first_time)
 {
 	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
 	rsp->n_force_qs++;
@@ -2006,13 +2008,14 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
 /*
  * Clean up after the old grace period.
  */
-static void rcu_gp_cleanup(struct rcu_state *rsp)
+static void rcu_gp_cleanup(void)
 {
 	unsigned long gp_duration;
 	bool needgp = false;
 	unsigned long new_gp_seq;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 	struct swait_queue_head *sq;
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2089,13 +2092,13 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 /*
  * Body of kthread that handles grace periods.
  */
-static int __noreturn rcu_gp_kthread(void *arg)
+static int __noreturn rcu_gp_kthread(void *unused)
 {
 	bool first_gp_fqs;
 	int gf;
 	unsigned long j;
 	int ret;
-	struct rcu_state *rsp = arg;
+	struct rcu_state *rsp = &rcu_state;
 	struct rcu_node *rnp = rcu_get_root();
 
 	rcu_bind_gp_kthread();
@@ -2111,7 +2114,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 						     RCU_GP_FLAG_INIT);
 			rsp->gp_state = RCU_GP_DONE_GPS;
 			/* Locking provides needed memory barrier. */
-			if (rcu_gp_init(rsp))
+			if (rcu_gp_init())
 				break;
 			cond_resched_tasks_rcu_qs();
 			WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2136,7 +2139,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 					       TPS("fqswait"));
 			rsp->gp_state = RCU_GP_WAIT_FQS;
 			ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
-					rcu_gp_fqs_check_wake(rsp, &gf), j);
+					rcu_gp_fqs_check_wake(&gf), j);
 			rsp->gp_state = RCU_GP_DOING_FQS;
 			/* Locking provides needed memory barriers. */
 			/* If grace period done, leave loop. */
@@ -2149,7 +2152,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 				trace_rcu_grace_period(rsp->name,
 						       READ_ONCE(rsp->gp_seq),
 						       TPS("fqsstart"));
-				rcu_gp_fqs(rsp, first_gp_fqs);
+				rcu_gp_fqs(first_gp_fqs);
 				first_gp_fqs = false;
 				trace_rcu_grace_period(rsp->name,
 						       READ_ONCE(rsp->gp_seq),
@@ -2177,7 +2180,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 
 		/* Handle grace-period end. */
 		rsp->gp_state = RCU_GP_CLEANUP;
-		rcu_gp_cleanup(rsp);
+		rcu_gp_cleanup();
 		rsp->gp_state = RCU_GP_CLEANED;
 	}
 }
@@ -3743,7 +3746,7 @@ static int __init rcu_spawn_gp_kthread(void)
 
 	rcu_scheduler_fully_active = 1;
 	for_each_rcu_flavor(rsp) {
-		t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
+		t = kthread_create(rcu_gp_kthread, NULL, "%s", rsp->name);
 		BUG_ON(IS_ERR(t));
 		rnp = rcu_get_root();
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 23/52] rcu: Remove rsp parameter from rcu_check_quiescent_state()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (21 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 22/52] rcu: Remove rsp parameter from rcu_gp_kthread() and friends Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 24/52] rcu: Remove rsp parameter from CPU hotplug functions Paul E. McKenney
                   ` (29 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_check_quiescent_state().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b2d79570c70f..c7e58f6ac666 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2374,7 +2374,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
  * quiescent state for this grace period, and record that fact if so.
  */
 static void
-rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
+rcu_check_quiescent_state(struct rcu_data *rdp)
 {
 	/* Check for grace-period ends and beginnings. */
 	note_gp_changes(rdp);
@@ -2752,7 +2752,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 		resched_cpu(rdp->cpu); /* Provoke future context switch. */
 
 	/* Update RCU state based on any recent quiescent states. */
-	rcu_check_quiescent_state(rsp, rdp);
+	rcu_check_quiescent_state(rdp);
 
 	/* No grace period and unregistered callbacks? */
 	if (!rcu_gp_in_progress() &&
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 24/52] rcu: Remove rsp parameter from CPU hotplug functions
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (22 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 23/52] rcu: Remove rsp parameter from rcu_check_quiescent_state() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 25/52] rcu: Remove rsp parameter from rcu_do_batch() Paul E. McKenney
                   ` (28 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_cleanup_dying_cpu() and rcu_cleanup_dead_cpu().  And, as long as
we are in the neighborhood, inlines them into rcutree_dying_cpu() and
rcutree_dead_cpu(), respectively.  This also eliminates a pair of
for_each_rcu_flavor() loops.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 43 +++++++++++--------------------------------
 1 file changed, 11 insertions(+), 32 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c7e58f6ac666..c38c9dcb98fe 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2401,20 +2401,22 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
 }
 
 /*
- * Trace the fact that this CPU is going offline.
+ * Near the end of the offline process.  Trace the fact that this CPU
+ * is going offline.
  */
-static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
+int rcutree_dying_cpu(unsigned int cpu)
 {
 	RCU_TRACE(bool blkd;)
 	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
 	RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
 
 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
-		return;
+		return 0;
 
 	RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
-	trace_rcu_grace_period(rsp->name, rnp->gp_seq,
+	trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
 			       blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
+	return 0;
 }
 
 /*
@@ -2468,16 +2470,19 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
  * There can only be one CPU hotplug operation at a time, so no need for
  * explicit locking.
  */
-static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
+int rcutree_dead_cpu(unsigned int cpu)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
-		return;
+		return 0;
 
 	/* Adjust any no-longer-needed kthreads. */
 	rcu_boost_kthread_setaffinity(rnp, -1);
+	/* Do any needed no-CB deferred wakeups from this CPU. */
+	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
+	return 0;
 }
 
 /*
@@ -3513,32 +3518,6 @@ int rcutree_offline_cpu(unsigned int cpu)
 	return 0;
 }
 
-/*
- * Near the end of the offline process.  We do only tracing here.
- */
-int rcutree_dying_cpu(unsigned int cpu)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		rcu_cleanup_dying_cpu(rsp);
-	return 0;
-}
-
-/*
- * The outgoing CPU is gone and we are running elsewhere.
- */
-int rcutree_dead_cpu(unsigned int cpu)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp) {
-		rcu_cleanup_dead_cpu(cpu, rsp);
-		do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
-	}
-	return 0;
-}
-
 static DEFINE_PER_CPU(int, rcu_cpu_started);
 
 /*
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 25/52] rcu: Remove rsp parameter from rcu_do_batch()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (23 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 24/52] rcu: Remove rsp parameter from CPU hotplug functions Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 26/52] rcu: Remove rsp parameter from force-quiescent-state functions Paul E. McKenney
                   ` (27 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_do_batch().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 5 +++--
 kernel/rcu/tree_plugin.h | 2 +-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c38c9dcb98fe..18161321633e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2489,12 +2489,13 @@ int rcutree_dead_cpu(unsigned int cpu)
  * Invoke any RCU callbacks that have made it to the end of their grace
  * period.  Thottle as specified by rdp->blimit.
  */
-static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
+static void rcu_do_batch(struct rcu_data *rdp)
 {
 	unsigned long flags;
 	struct rcu_head *rhp;
 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
 	long bl, count;
+	struct rcu_state *rsp = &rcu_state;
 
 	/* If no callbacks are ready, just return. */
 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
@@ -2807,7 +2808,7 @@ static void invoke_rcu_callbacks(struct rcu_data *rdp)
 	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
 		return;
 	if (likely(!rsp->boost)) {
-		rcu_do_batch(rsp, rdp);
+		rcu_do_batch(rdp);
 		return;
 	}
 	invoke_rcu_callbacks_kthread();
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 82f10a6bf266..c678c76a754e 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1324,7 +1324,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 
 static void rcu_kthread_do_work(void)
 {
-	rcu_do_batch(&rcu_state, this_cpu_ptr(&rcu_data));
+	rcu_do_batch(this_cpu_ptr(&rcu_data));
 }
 
 static void rcu_cpu_kthread_setup(unsigned int cpu)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 26/52] rcu: Remove rsp parameter from force-quiescent-state functions
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (24 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 25/52] rcu: Remove rsp parameter from rcu_do_batch() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 27/52] rcu: Remove rsp parameter from rcu_check_gp_start_stall() Paul E. McKenney
                   ` (26 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
force_qs_rnp() and force_quiescent_state().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 18161321633e..4e6bb2bb0874 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -479,8 +479,8 @@ module_param(rcu_kick_kthreads, bool, 0644);
 static ulong jiffies_till_sched_qs = HZ / 10;
 module_param(jiffies_till_sched_qs, ulong, 0444);
 
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
-static void force_quiescent_state(struct rcu_state *rsp);
+static void force_qs_rnp(int (*f)(struct rcu_data *rsp));
+static void force_quiescent_state(void);
 static int rcu_pending(void);
 
 /*
@@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
  */
 void rcu_force_quiescent_state(void)
 {
-	force_quiescent_state(&rcu_state);
+	force_quiescent_state();
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
@@ -547,7 +547,7 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  */
 void rcu_bh_force_quiescent_state(void)
 {
-	force_quiescent_state(&rcu_state);
+	force_quiescent_state();
 }
 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 
@@ -1383,7 +1383,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 
 	panic_on_rcu_stall();
 
-	force_quiescent_state(rsp);  /* Kick them all. */
+	force_quiescent_state();  /* Kick them all. */
 }
 
 static void print_cpu_stall(void)
@@ -1991,10 +1991,10 @@ static void rcu_gp_fqs(bool first_time)
 	rsp->n_force_qs++;
 	if (first_time) {
 		/* Collect dyntick-idle snapshots. */
-		force_qs_rnp(rsp, dyntick_save_progress_counter);
+		force_qs_rnp(dyntick_save_progress_counter);
 	} else {
 		/* Handle dyntick-idle and offline CPUs. */
-		force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
+		force_qs_rnp(rcu_implicit_dynticks_qs);
 	}
 	/* Clear flag to prevent immediate re-entry. */
 	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
@@ -2599,12 +2599,13 @@ void rcu_check_callbacks(int user)
  *
  * The caller must have suppressed start of new grace periods.
  */
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
+static void force_qs_rnp(int (*f)(struct rcu_data *rsp))
 {
 	int cpu;
 	unsigned long flags;
 	unsigned long mask;
 	struct rcu_node *rnp;
+	struct rcu_state *rsp = &rcu_state;
 
 	rcu_for_each_leaf_node(rsp, rnp) {
 		cond_resched_tasks_rcu_qs();
@@ -2646,12 +2647,13 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
  * Force quiescent states on reluctant CPUs, and also detect which
  * CPUs are in dyntick-idle mode.
  */
-static void force_quiescent_state(struct rcu_state *rsp)
+static void force_quiescent_state(void)
 {
 	unsigned long flags;
 	bool ret;
 	struct rcu_node *rnp;
 	struct rcu_node *rnp_old = NULL;
+	struct rcu_state *rsp = &rcu_state;
 
 	/* Funnel through hierarchy to reduce memory contention. */
 	rnp = __this_cpu_read(rcu_data.mynode);
@@ -2858,7 +2860,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 			rdp->blimit = LONG_MAX;
 			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
-				force_quiescent_state(rsp);
+				force_quiescent_state();
 			rdp->n_force_qs_snap = rsp->n_force_qs;
 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
 		}
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 27/52] rcu: Remove rsp parameter from rcu_check_gp_start_stall()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (25 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 26/52] rcu: Remove rsp parameter from force-quiescent-state functions Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 28/52] rcu: Remove rsp parameter from __rcu_process_callbacks() Paul E. McKenney
                   ` (25 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_check_gp_start_stall().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4e6bb2bb0874..b77e41cbf07c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2685,13 +2685,13 @@ static void force_quiescent_state(void)
  * RCU to come out of its idle mode.
  */
 static void
-rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
-			 struct rcu_data *rdp)
+rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
 	unsigned long flags;
 	unsigned long j;
 	struct rcu_node *rnp_root = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 	static atomic_t warned = ATOMIC_INIT(0);
 
 	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
@@ -2771,7 +2771,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 		local_irq_restore(flags);
 	}
 
-	rcu_check_gp_start_stall(rsp, rnp, rdp);
+	rcu_check_gp_start_stall(rnp, rdp);
 
 	/* If there are callbacks ready, invoke them. */
 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
@ 2018-08-29 22:38 Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 01/52] rcu: Remove rsp parameter from rcu_report_qs_rnp() Paul E. McKenney
                   ` (52 more replies)
  0 siblings, 53 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel

Hello!

This commit does RCU-consolidation cleanups that get rid of pointers to
the sole remaining rcu_state structure:

1-40:	Remove the "rsp" parameter from numerous functions, given that
	the corresponding argument will always be &rcu_state.

41.	Remove rcu_data structure's ->rsp field, now that it always
	contains a pointer to rcu_state.

42.	Remove non-flavor-traversal rsp local variable from tree_plugin.h.

43.	Remove the for_each_rcu_flavor() flavor-traversal macro, given
	that there is now only ever one flavor to traverse.

44.	Simplify rcutorture_get_gp_data() based on there now being only
	one rcu_state structure.

45.	Restructure rcu_check_gp_kthread_starvation() based on there
	now being only one rcu_state structure.

46.	Restructure RCU CPU stall warnings based on there now being only
	one rcu_state structure.

47.	Restructure grace-period management code based on there now being
	only one rcu_state structure.

48.	Restructure callback registration/invocation code based on there
	now being only one rcu_state structure.

49.	Restructure quiescent-state and grace-period-nonstart code based
	on there now being only one rcu_state structure.

50.	Restructure rcu_barrier() based on there now being only one
	rcu_state structure.

51.	Restructure initialization code based on there now being only
	one rcu_state structure.

52.	Fix typo in force_qs_rnp()'s parameter's parameter, which was
	located by searching for "rsp".

							Thanx, Paul

------------------------------------------------------------------------

 Documentation/RCU/Design/Data-Structures/Data-Structures.html |   23 
 kernel/rcu/rcu.h                                              |   28 
 kernel/rcu/srcutree.c                                         |    4 
 kernel/rcu/tree.c                                             | 1261 ++++------
 kernel/rcu/tree.h                                             |   29 
 kernel/rcu/tree_exp.h                                         |  209 -
 kernel/rcu/tree_plugin.h                                      |  203 -
 7 files changed, 784 insertions(+), 973 deletions(-)


^ permalink raw reply	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 28/52] rcu: Remove rsp parameter from __rcu_process_callbacks()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (26 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 27/52] rcu: Remove rsp parameter from rcu_check_gp_start_stall() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 29/52] rcu: Remove rsp parameter from __call_rcu() and friend Paul E. McKenney
                   ` (24 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
__rcu_process_callbacks(), and also inlines it into rcu_process_callbacks(),
removing the for_each_rcu_flavor() while in the neighborhood.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 26 +++++++-------------------
 1 file changed, 7 insertions(+), 19 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b77e41cbf07c..715ca506b5cd 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2740,17 +2740,19 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
 }
 
 /*
- * This does the RCU core processing work for the specified rcu_state
- * and rcu_data structures.  This may be called only from the CPU to
- * whom the rdp belongs.
+ * This does the RCU core processing work for the specified rcu_data
+ * structures.  This may be called only from the CPU to whom the rdp
+ * belongs.
  */
-static void
-__rcu_process_callbacks(struct rcu_state *rsp)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
 {
 	unsigned long flags;
 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
 	struct rcu_node *rnp = rdp->mynode;
 
+	if (cpu_is_offline(smp_processor_id()))
+		return;
+	trace_rcu_utilization(TPS("Start RCU core"));
 	WARN_ON_ONCE(!rdp->beenonline);
 
 	/* Report any deferred quiescent states if preemption enabled. */
@@ -2779,20 +2781,6 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 
 	/* Do any needed deferred wakeups of rcuo kthreads. */
 	do_nocb_deferred_wakeup(rdp);
-}
-
-/*
- * Do RCU core processing for the current CPU.
- */
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-{
-	struct rcu_state *rsp;
-
-	if (cpu_is_offline(smp_processor_id()))
-		return;
-	trace_rcu_utilization(TPS("Start RCU core"));
-	for_each_rcu_flavor(rsp)
-		__rcu_process_callbacks(rsp);
 	trace_rcu_utilization(TPS("End RCU core"));
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 29/52] rcu: Remove rsp parameter from __call_rcu() and friend
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (27 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 28/52] rcu: Remove rsp parameter from __rcu_process_callbacks() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 30/52] rcu: Remove rsp parameter from __rcu_pending() Paul E. McKenney
                   ` (23 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
__call_rcu_core() and __call_rcu().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 715ca506b5cd..4af227e826a6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2813,8 +2813,8 @@ static void invoke_rcu_core(void)
 /*
  * Handle any core-RCU processing required by a call_rcu() invocation.
  */
-static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
-			    struct rcu_head *head, unsigned long flags)
+static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
+			    unsigned long flags)
 {
 	/*
 	 * If called from an extended quiescent state, invoke the RCU
@@ -2846,10 +2846,10 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 		} else {
 			/* Give the grace period a kick. */
 			rdp->blimit = LONG_MAX;
-			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
+			if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
 				force_quiescent_state();
-			rdp->n_force_qs_snap = rsp->n_force_qs;
+			rdp->n_force_qs_snap = rcu_state.n_force_qs;
 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
 		}
 	}
@@ -2869,11 +2869,11 @@ static void rcu_leak_callback(struct rcu_head *rhp)
  * is expected to specify a CPU.
  */
 static void
-__call_rcu(struct rcu_head *head, rcu_callback_t func,
-	   struct rcu_state *rsp, int cpu, bool lazy)
+__call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
 {
 	unsigned long flags;
 	struct rcu_data *rdp;
+	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	/* Misaligned rcu_head! */
 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
@@ -2931,7 +2931,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
 				   rcu_segcblist_n_cbs(&rdp->cblist));
 
 	/* Go handle any RCU core processing required. */
-	__call_rcu_core(rsp, rdp, head, flags);
+	__call_rcu_core(rdp, head, flags);
 	local_irq_restore(flags);
 }
 
@@ -2972,7 +2972,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
  */
 void call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
-	__call_rcu(head, func, &rcu_state, -1, 0);
+	__call_rcu(head, func, -1, 0);
 }
 EXPORT_SYMBOL_GPL(call_rcu);
 
@@ -2999,7 +2999,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
 void kfree_call_rcu(struct rcu_head *head,
 		    rcu_callback_t func)
 {
-	__call_rcu(head, func, &rcu_state, -1, 1);
+	__call_rcu(head, func, -1, 1);
 }
 EXPORT_SYMBOL_GPL(kfree_call_rcu);
 
@@ -3271,7 +3271,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 				smp_mb__before_atomic();
 				atomic_inc(&rsp->barrier_cpu_count);
 				__call_rcu(&rdp->barrier_head,
-					   rcu_barrier_callback, rsp, cpu, 0);
+					   rcu_barrier_callback, cpu, 0);
 			}
 		} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
 			_rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 30/52] rcu: Remove rsp parameter from __rcu_pending()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (28 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 29/52] rcu: Remove rsp parameter from __call_rcu() and friend Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 31/52] rcu: Remove rsp parameter from _rcu_barrier() and friends Paul E. McKenney
                   ` (22 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the Linux
kernel, so there is no need to pass it as a parameter to RCU's functions.
This commit therefore removes the rsp parameter from __rcu_pending(),
and also inlines it into rcu_pending(), removing the for_each_rcu_flavor()
while in the neighborhood..

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 34 ++++++++++------------------------
 1 file changed, 10 insertions(+), 24 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4af227e826a6..e347a6b2984c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2996,8 +2996,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
  * callbacks in the list of pending callbacks. Until then, this
  * function may only be called from __kfree_rcu().
  */
-void kfree_call_rcu(struct rcu_head *head,
-		    rcu_callback_t func)
+void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
 	__call_rcu(head, func, -1, 1);
 }
@@ -3079,21 +3078,23 @@ void cond_synchronize_sched(unsigned long oldstate)
 EXPORT_SYMBOL_GPL(cond_synchronize_sched);
 
 /*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, for the specified type of RCU, returning 1 if so.
- * The checks are in order of increasing expense: checks that can be
- * carried out against CPU-local state are performed first.  However,
- * we must check for CPU stalls first, else we might not get a chance.
+ * Check to see if there is any immediate RCU-related work to be done by
+ * the current CPU, for the specified type of RCU, returning 1 if so and
+ * zero otherwise.  The checks are in order of increasing expense: checks
+ * that can be carried out against CPU-local state are performed first.
+ * However, we must check for CPU stalls first, else we might not get
+ * a chance.
  */
-static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
+static int rcu_pending(void)
 {
+	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 	struct rcu_node *rnp = rdp->mynode;
 
 	/* Check for CPU stalls, if enabled. */
 	check_cpu_stall(rdp);
 
 	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
-	if (rcu_nohz_full_cpu(rsp))
+	if (rcu_nohz_full_cpu(&rcu_state))
 		return 0;
 
 	/* Is the RCU core waiting for a quiescent state from this CPU? */
@@ -3123,21 +3124,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 	return 0;
 }
 
-/*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, returning 1 if so.  This function is part of the
- * RCU implementation; it is -not- an exported member of the RCU API.
- */
-static int rcu_pending(void)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		if (__rcu_pending(rsp, this_cpu_ptr(&rcu_data)))
-			return 1;
-	return 0;
-}
-
 /*
  * Return true if the specified CPU has any callback.  If all_lazy is
  * non-NULL, store an indication of whether all callbacks are lazy.
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 31/52] rcu: Remove rsp parameter from _rcu_barrier() and friends
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (29 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 30/52] rcu: Remove rsp parameter from __rcu_pending() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 32/52] rcu: Remove rsp parameter from rcu_boot_init_percpu_data() " Paul E. McKenney
                   ` (21 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
_rcu_barrier_trace() and _rcu_barrier().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 41 +++++++++++++++++++----------------------
 1 file changed, 19 insertions(+), 22 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e347a6b2984c..8028936dc95d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3155,11 +3155,10 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
  * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
  * the compiler is expected to optimize this away.
  */
-static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
-			       int cpu, unsigned long done)
+static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done)
 {
-	trace_rcu_barrier(rsp->name, s, cpu,
-			  atomic_read(&rsp->barrier_cpu_count), done);
+	trace_rcu_barrier(rcu_state.name, s, cpu,
+			  atomic_read(&rcu_state.barrier_cpu_count), done);
 }
 
 /*
@@ -3172,11 +3171,10 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
 	struct rcu_state *rsp = rdp->rsp;
 
 	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
-		_rcu_barrier_trace(rsp, TPS("LastCB"), -1,
-				   rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("LastCB"), -1, rsp->barrier_sequence);
 		complete(&rsp->barrier_completion);
 	} else {
-		_rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("CB"), -1, rsp->barrier_sequence);
 	}
 }
 
@@ -3188,15 +3186,14 @@ static void rcu_barrier_func(void *type)
 	struct rcu_state *rsp = type;
 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
 
-	_rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
+	_rcu_barrier_trace(TPS("IRQ"), -1, rsp->barrier_sequence);
 	rdp->barrier_head.func = rcu_barrier_callback;
 	debug_rcu_head_queue(&rdp->barrier_head);
 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
 		atomic_inc(&rsp->barrier_cpu_count);
 	} else {
 		debug_rcu_head_unqueue(&rdp->barrier_head);
-		_rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
-				   rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("IRQNQ"), -1, rsp->barrier_sequence);
 	}
 }
 
@@ -3204,21 +3201,21 @@ static void rcu_barrier_func(void *type)
  * Orchestrate the specified type of RCU barrier, waiting for all
  * RCU callbacks of the specified type to complete.
  */
-static void _rcu_barrier(struct rcu_state *rsp)
+static void _rcu_barrier(void)
 {
 	int cpu;
 	struct rcu_data *rdp;
+	struct rcu_state *rsp = &rcu_state;
 	unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
 
-	_rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
+	_rcu_barrier_trace(TPS("Begin"), -1, s);
 
 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
 	mutex_lock(&rsp->barrier_mutex);
 
 	/* Did someone else do our work for us? */
 	if (rcu_seq_done(&rsp->barrier_sequence, s)) {
-		_rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
-				   rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("EarlyExit"), -1, rsp->barrier_sequence);
 		smp_mb(); /* caller's subsequent code after above check. */
 		mutex_unlock(&rsp->barrier_mutex);
 		return;
@@ -3226,7 +3223,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
 	/* Mark the start of the barrier operation. */
 	rcu_seq_start(&rsp->barrier_sequence);
-	_rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
+	_rcu_barrier_trace(TPS("Inc1"), -1, rsp->barrier_sequence);
 
 	/*
 	 * Initialize the count to one rather than to zero in order to
@@ -3249,10 +3246,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
 		rdp = per_cpu_ptr(&rcu_data, cpu);
 		if (rcu_is_nocb_cpu(cpu)) {
 			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
-				_rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
+				_rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
 						   rsp->barrier_sequence);
 			} else {
-				_rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
+				_rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
 						   rsp->barrier_sequence);
 				smp_mb__before_atomic();
 				atomic_inc(&rsp->barrier_cpu_count);
@@ -3260,11 +3257,11 @@ static void _rcu_barrier(struct rcu_state *rsp)
 					   rcu_barrier_callback, cpu, 0);
 			}
 		} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
-			_rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
+			_rcu_barrier_trace(TPS("OnlineQ"), cpu,
 					   rsp->barrier_sequence);
 			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
 		} else {
-			_rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
+			_rcu_barrier_trace(TPS("OnlineNQ"), cpu,
 					   rsp->barrier_sequence);
 		}
 	}
@@ -3281,7 +3278,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 	wait_for_completion(&rsp->barrier_completion);
 
 	/* Mark the end of the barrier operation. */
-	_rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
+	_rcu_barrier_trace(TPS("Inc2"), -1, rsp->barrier_sequence);
 	rcu_seq_end(&rsp->barrier_sequence);
 
 	/* Other rcu_barrier() invocations can now safely proceed. */
@@ -3293,7 +3290,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
  */
 void rcu_barrier_bh(void)
 {
-	_rcu_barrier(&rcu_state);
+	_rcu_barrier();
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
 
@@ -3307,7 +3304,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  */
 void rcu_barrier(void)
 {
-	_rcu_barrier(&rcu_state);
+	_rcu_barrier();
 }
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 32/52] rcu: Remove rsp parameter from rcu_boot_init_percpu_data() and friends
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (30 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 31/52] rcu: Remove rsp parameter from _rcu_barrier() and friends Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 33/52] rcu: Remove rsp parameter from rcu_init_one() " Paul E. McKenney
                   ` (20 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of
the Linux kernel, so there is no need to pass it as a parameter
to RCU's functions.  This commit therefore removes the rsp
parameter from rcu_boot_init_percpu_data(), rcu_init_percpu_data(),
rcu_cleanup_dying_idle_cpu(), and rcu_migrate_callbacks().  While in
the neighborhood, line the last three into rcutree_prepare_cpu(),
rcu_report_dead() and rcutree_migrate_callbacks(), respectively.
This also gets rid of the for_each_rcu_flavor() calls that were in
those tree functions.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 102 ++++++++++++++++------------------------------
 1 file changed, 35 insertions(+), 67 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8028936dc95d..3591633efc33 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3351,7 +3351,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
  * Do boot-time initialization of a CPU's per-CPU RCU data.
  */
 static void __init
-rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
+rcu_boot_init_percpu_data(int cpu)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 
@@ -3360,23 +3360,25 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
 	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
-	rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
-	rdp->rcu_onl_gp_seq = rsp->gp_seq;
+	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
 	rdp->cpu = cpu;
-	rdp->rsp = rsp;
+	rdp->rsp = &rcu_state;
 	rcu_boot_init_nocb_percpu_data(rdp);
 }
 
 /*
- * Initialize a CPU's per-CPU RCU data.  Note that only one online or
+ * Invoked early in the CPU-online process, when pretty much all services
+ * are available.  The incoming CPU is not present.
+ *
+ * Initializes a CPU's per-CPU RCU data.  Note that only one online or
  * offline event can be happening at a given time.  Note also that we can
  * accept some slop in the rsp->gp_seq access due to the fact that this
  * CPU cannot possibly have any RCU callbacks in flight yet.
  */
-static void
-rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
+int rcutree_prepare_cpu(unsigned int cpu)
 {
 	unsigned long flags;
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
@@ -3385,7 +3387,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
 	/* Set up local state, ensuring consistent view of global state. */
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	rdp->qlen_last_fqs_check = 0;
-	rdp->n_force_qs_snap = rsp->n_force_qs;
+	rdp->n_force_qs_snap = rcu_state.n_force_qs;
 	rdp->blimit = blimit;
 	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
 	    !init_nocb_callback_list(rdp))
@@ -3409,21 +3411,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
 	rdp->core_needs_qs = false;
 	rdp->rcu_iw_pending = false;
 	rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
-	trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
+	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-}
-
-/*
- * Invoked early in the CPU-online process, when pretty much all
- * services are available.  The incoming CPU is not present.
- */
-int rcutree_prepare_cpu(unsigned int cpu)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		rcu_init_percpu_data(cpu, rsp);
-
 	rcu_prepare_kthreads(cpu);
 	rcu_spawn_all_nocb_kthreads(cpu);
 
@@ -3547,23 +3536,32 @@ void rcu_cpu_starting(unsigned int cpu)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
- * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function.  We now remove it from the rcu_node tree's ->qsmaskinitnext
- * bit masks.
+ * The outgoing function has no further need of RCU, so remove it from
+ * the rcu_node tree's ->qsmaskinitnext bit masks.
+ *
+ * Note that this function is special in that it is invoked directly
+ * from the outgoing CPU rather than from the cpuhp_step mechanism.
+ * This is because this function must be invoked at a precise location.
  */
-static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+void rcu_report_dead(unsigned int cpu)
 {
 	unsigned long flags;
 	unsigned long mask;
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
+	/* QS for any half-done expedited RCU-sched GP. */
+	preempt_disable();
+	rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
+	preempt_enable();
+	rcu_preempt_deferred_qs(current);
+
 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
 	mask = rdp->grpmask;
-	spin_lock(&rsp->ofl_lock);
+	spin_lock(&rcu_state.ofl_lock);
 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
-	rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
-	rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
+	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
@@ -3571,34 +3569,17 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
 	}
 	rnp->qsmaskinitnext &= ~mask;
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-	spin_unlock(&rsp->ofl_lock);
-}
-
-/*
- * The outgoing function has no further need of RCU, so remove it from
- * the list of CPUs that RCU must track.
- *
- * Note that this function is special in that it is invoked directly
- * from the outgoing CPU rather than from the cpuhp_step mechanism.
- * This is because this function must be invoked at a precise location.
- */
-void rcu_report_dead(unsigned int cpu)
-{
-	struct rcu_state *rsp;
-
-	/* QS for any half-done expedited RCU-sched GP. */
-	preempt_disable();
-	rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
-	preempt_enable();
-	rcu_preempt_deferred_qs(current);
-	for_each_rcu_flavor(rsp)
-		rcu_cleanup_dying_idle_cpu(cpu, rsp);
+	spin_unlock(&rcu_state.ofl_lock);
 
 	per_cpu(rcu_cpu_started, cpu) = 0;
 }
 
-/* Migrate the dead CPU's callbacks to the current CPU. */
-static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
+/*
+ * The outgoing CPU has just passed through the dying-idle state, and we
+ * are being invoked from the CPU that was IPIed to continue the offline
+ * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
+ */
+void rcutree_migrate_callbacks(int cpu)
 {
 	unsigned long flags;
 	struct rcu_data *my_rdp;
@@ -3631,19 +3612,6 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
 		  rcu_segcblist_first_cb(&rdp->cblist));
 }
-
-/*
- * The outgoing CPU has just passed through the dying-idle state,
- * and we are being invoked from the CPU that was IPIed to continue the
- * offline operation.  We need to migrate the outgoing CPU's callbacks.
- */
-void rcutree_migrate_callbacks(int cpu)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		rcu_migrate_callbacks(cpu, rsp);
-}
 #endif
 
 /*
@@ -3813,7 +3781,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
 		while (i > rnp->grphi)
 			rnp++;
 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
-		rcu_boot_init_percpu_data(i, rsp);
+		rcu_boot_init_percpu_data(i);
 	}
 	list_add(&rsp->flavors, &rcu_struct_flavors);
 }
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 33/52] rcu: Remove rsp parameter from rcu_init_one() and friends
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (31 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 32/52] rcu: Remove rsp parameter from rcu_boot_init_percpu_data() " Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 34/52] rcu: Remove rsp parameter from rcu_print_detail_task_stall() Paul E. McKenney
                   ` (19 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_init_one() and rcu_dump_rcu_node_tree().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3591633efc33..6183c2889a9f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3707,7 +3707,7 @@ void rcu_scheduler_starting(void)
 /*
  * Helper function for rcu_init() that initializes one rcu_state structure.
  */
-static void __init rcu_init_one(struct rcu_state *rsp)
+static void __init rcu_init_one(void)
 {
 	static const char * const buf[] = RCU_NODE_NAME_INIT;
 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
@@ -3719,6 +3719,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
 	int i;
 	int j;
 	struct rcu_node *rnp;
+	struct rcu_state *rsp = &rcu_state;
 
 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
 
@@ -3869,14 +3870,14 @@ static void __init rcu_init_geometry(void)
  * Dump out the structure of the rcu_node combining tree associated
  * with the rcu_state structure referenced by rsp.
  */
-static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
+static void __init rcu_dump_rcu_node_tree(void)
 {
 	int level = 0;
 	struct rcu_node *rnp;
 
 	pr_info("rcu_node tree layout dump\n");
 	pr_info(" ");
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	rcu_for_each_node_breadth_first(&rcu_state, rnp) {
 		if (rnp->level != level) {
 			pr_cont("\n");
 			pr_info(" ");
@@ -3898,9 +3899,9 @@ void __init rcu_init(void)
 
 	rcu_bootup_announce();
 	rcu_init_geometry();
-	rcu_init_one(&rcu_state);
+	rcu_init_one();
 	if (dump_tree)
-		rcu_dump_rcu_node_tree(&rcu_state);
+		rcu_dump_rcu_node_tree();
 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
 	/*
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 34/52] rcu: Remove rsp parameter from rcu_print_detail_task_stall()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (32 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 33/52] rcu: Remove rsp parameter from rcu_init_one() " Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 35/52] rcu: Remove rsp parameter from dump_blkd_tasks() and friend Paul E. McKenney
                   ` (18 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_print_detail_task_stall().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 2 +-
 kernel/rcu/tree.h        | 2 +-
 kernel/rcu/tree_plugin.h | 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 6183c2889a9f..e484ee95a192 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1359,7 +1359,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 		rcu_dump_cpu_stacks();
 
 		/* Complain about tasks blocking the grace period. */
-		rcu_print_detail_task_stall(rsp);
+		rcu_print_detail_task_stall();
 	} else {
 		if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
 			pr_err("INFO: Stall ended before state dump start\n");
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index d60304f1ef56..00d268cb4d04 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -452,7 +452,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static void rcu_print_detail_task_stall(struct rcu_state *rsp);
+static void rcu_print_detail_task_stall(void);
 static int rcu_print_task_stall(struct rcu_node *rnp);
 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
 static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index c678c76a754e..1d8148b0d4e5 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -683,12 +683,12 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
  * Dump detailed information for all tasks blocking the current RCU
  * grace period.
  */
-static void rcu_print_detail_task_stall(struct rcu_state *rsp)
+static void rcu_print_detail_task_stall(void)
 {
 	struct rcu_node *rnp = rcu_get_root();
 
 	rcu_print_detail_task_stall_rnp(rnp);
-	rcu_for_each_leaf_node(rsp, rnp)
+	rcu_for_each_leaf_node(&rcu_state, rnp)
 		rcu_print_detail_task_stall_rnp(rnp);
 }
 
@@ -1005,7 +1005,7 @@ static void rcu_preempt_deferred_qs(struct task_struct *t) { }
  * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections.
  */
-static void rcu_print_detail_task_stall(struct rcu_state *rsp)
+static void rcu_print_detail_task_stall(void)
 {
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 35/52] rcu: Remove rsp parameter from dump_blkd_tasks() and friend
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (33 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 34/52] rcu: Remove rsp parameter from rcu_print_detail_task_stall() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 36/52] rcu: Remove rsp parameter from rcu_spawn_one_boost_kthread() Paul E. McKenney
                   ` (17 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
dump_blkd_tasks() and rcu_preempt_blocked_readers_cgp().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        |  4 ++--
 kernel/rcu/tree.h        |  6 ++----
 kernel/rcu/tree_plugin.h | 12 +++++-------
 3 files changed, 9 insertions(+), 13 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e484ee95a192..aa81958edec8 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1936,7 +1936,7 @@ static bool rcu_gp_init(void)
 		rcu_gp_slow(gp_init_delay);
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		rdp = this_cpu_ptr(&rcu_data);
-		rcu_preempt_check_blocked_tasks(rsp, rnp);
+		rcu_preempt_check_blocked_tasks(rnp);
 		rnp->qsmask = rnp->qsmaskinit;
 		WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
 		if (rnp == rdp->mynode)
@@ -2048,7 +2048,7 @@ static void rcu_gp_cleanup(void)
 	rcu_for_each_node_breadth_first(rsp, rnp) {
 		raw_spin_lock_irq_rcu_node(rnp);
 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
-			dump_blkd_tasks(rsp, rnp, 10);
+			dump_blkd_tasks(rnp, 10);
 		WARN_ON_ONCE(rnp->qsmask);
 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
 		rdp = this_cpu_ptr(&rcu_data);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 00d268cb4d04..ccdee6bd3919 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -455,12 +455,10 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 static void rcu_print_detail_task_stall(void);
 static int rcu_print_task_stall(struct rcu_node *rnp);
 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
-static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
-					    struct rcu_node *rnp);
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 static void rcu_flavor_check_callbacks(int user);
 void call_rcu(struct rcu_head *head, rcu_callback_t func);
-static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
-			    int ncheck);
+static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static void invoke_rcu_callbacks_kthread(void);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1d8148b0d4e5..9a3d30121815 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -756,14 +756,13 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * Also, if there are blocked tasks on the list, they automatically
  * block the newly created grace period, so set up ->gp_tasks accordingly.
  */
-static void
-rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
 	struct task_struct *t;
 
 	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
 	if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
-		dump_blkd_tasks(rsp, rnp, 10);
+		dump_blkd_tasks(rnp, 10);
 	if (rcu_preempt_has_tasks(rnp) &&
 	    (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
 		rnp->gp_tasks = rnp->blkd_tasks.next;
@@ -884,7 +883,7 @@ void exit_rcu(void)
  * specified number of elements.
  */
 static void
-dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
 {
 	int cpu;
 	int i;
@@ -1033,8 +1032,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * so there is no need to check for blocked tasks.  So check only for
  * bogus qsmask values.
  */
-static void
-rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
 	WARN_ON_ONCE(rnp->qsmask);
 }
@@ -1095,7 +1093,7 @@ void exit_rcu(void)
  * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
  */
 static void
-dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
 {
 	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
 }
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 36/52] rcu: Remove rsp parameter from rcu_spawn_one_boost_kthread()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (34 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 35/52] rcu: Remove rsp parameter from dump_blkd_tasks() and friend Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 37/52] rcu: Remove rsp parameter from print_cpu_stall_info() Paul E. McKenney
                   ` (16 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_spawn_one_boost_kthread().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.h        |  4 ----
 kernel/rcu/tree_plugin.h | 13 ++++++-------
 2 files changed, 6 insertions(+), 11 deletions(-)

diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index ccdee6bd3919..dc1c337f6da9 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -463,10 +463,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static void invoke_rcu_callbacks_kthread(void);
 static bool rcu_is_callbacks_kthread(void);
-#ifdef CONFIG_RCU_BOOST
-static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
-						 struct rcu_node *rnp);
-#endif /* #ifdef CONFIG_RCU_BOOST */
 static void __init rcu_spawn_boost_kthreads(void);
 static void rcu_prepare_kthreads(int cpu);
 static void rcu_cleanup_after_idle(void);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 9a3d30121815..9a6dea5fab86 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1290,21 +1290,20 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  * already exist.  We only create this kthread for preemptible RCU.
  * Returns zero if all is well, a negated errno otherwise.
  */
-static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
-				       struct rcu_node *rnp)
+static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
 {
-	int rnp_index = rnp - &rsp->node[0];
+	int rnp_index = rnp - rcu_get_root();
 	unsigned long flags;
 	struct sched_param sp;
 	struct task_struct *t;
 
-	if (&rcu_state != rsp)
+	if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
 		return 0;
 
 	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
 		return 0;
 
-	rsp->boost = 1;
+	rcu_state.boost = 1;
 	if (rnp->boost_kthread_task != NULL)
 		return 0;
 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1430,7 +1429,7 @@ static void __init rcu_spawn_boost_kthreads(void)
 		per_cpu(rcu_cpu_has_work, cpu) = 0;
 	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
 	rcu_for_each_leaf_node(&rcu_state, rnp)
-		(void)rcu_spawn_one_boost_kthread(&rcu_state, rnp);
+		(void)rcu_spawn_one_boost_kthread(rnp);
 }
 
 static void rcu_prepare_kthreads(int cpu)
@@ -1440,7 +1439,7 @@ static void rcu_prepare_kthreads(int cpu)
 
 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
 	if (rcu_scheduler_fully_active)
-		(void)rcu_spawn_one_boost_kthread(&rcu_state, rnp);
+		(void)rcu_spawn_one_boost_kthread(rnp);
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 37/52] rcu: Remove rsp parameter from print_cpu_stall_info()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (35 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 36/52] rcu: Remove rsp parameter from rcu_spawn_one_boost_kthread() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 38/52] rcu: Remove rsp parameter from no-CBs CPU functions Paul E. McKenney
                   ` (15 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
print_cpu_stall_info().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 4 ++--
 kernel/rcu/tree.h        | 2 +-
 kernel/rcu/tree_plugin.h | 6 +++---
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index aa81958edec8..1b63a28f148d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1341,7 +1341,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 		if (rnp->qsmask != 0) {
 			for_each_leaf_node_possible_cpu(rnp, cpu)
 				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
-					print_cpu_stall_info(rsp, cpu);
+					print_cpu_stall_info(cpu);
 					ndetected++;
 				}
 		}
@@ -1408,7 +1408,7 @@ static void print_cpu_stall(void)
 	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
 	print_cpu_stall_info_begin();
 	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
-	print_cpu_stall_info(rsp, smp_processor_id());
+	print_cpu_stall_info(smp_processor_id());
 	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
 	print_cpu_stall_info_end();
 	for_each_possible_cpu(cpu)
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index dc1c337f6da9..2bf57de9f78a 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -472,7 +472,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
 static void rcu_preempt_deferred_qs(struct task_struct *t);
 static void print_cpu_stall_info_begin(void);
-static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
+static void print_cpu_stall_info(int cpu);
 static void print_cpu_stall_info_end(void);
 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
 static void increment_cpu_stall_ticks(void);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 9a6dea5fab86..08ff162e02b3 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1771,7 +1771,7 @@ static void print_cpu_stall_info_begin(void)
  *
  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
  */
-static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
+static void print_cpu_stall_info(int cpu)
 {
 	unsigned long delta;
 	char fast_no_hz[72];
@@ -1786,7 +1786,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
 	 */
 	touch_nmi_watchdog();
 
-	ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
+	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
 	if (ticks_value) {
 		ticks_title = "GPs behind";
 	} else {
@@ -1807,7 +1807,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
 	       rcu_dynticks_snap(rdtp) & 0xfff,
 	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
-	       READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
+	       READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
 	       fast_no_hz);
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 38/52] rcu: Remove rsp parameter from no-CBs CPU functions
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (36 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 37/52] rcu: Remove rsp parameter from print_cpu_stall_info() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 39/52] rcu: Remove rsp parameter from expedited grace-period functions Paul E. McKenney
                   ` (14 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to
RCU's functions.  This commit therefore removes the rsp parameter
from rcu_nocb_cpu_needs_barrier(), rcu_spawn_one_nocb_kthread(),
rcu_organize_nocb_kthreads(), rcu_nocb_cpu_needs_barrier(), and
rcu_nohz_full_cpu().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        |  4 ++--
 kernel/rcu/tree.h        |  6 +++---
 kernel/rcu/tree_plugin.h | 18 +++++++++---------
 3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1b63a28f148d..6d8e4a4d8019 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3094,7 +3094,7 @@ static int rcu_pending(void)
 	check_cpu_stall(rdp);
 
 	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
-	if (rcu_nohz_full_cpu(&rcu_state))
+	if (rcu_nohz_full_cpu())
 		return 0;
 
 	/* Is the RCU core waiting for a quiescent state from this CPU? */
@@ -3245,7 +3245,7 @@ static void _rcu_barrier(void)
 			continue;
 		rdp = per_cpu_ptr(&rcu_data, cpu);
 		if (rcu_is_nocb_cpu(cpu)) {
-			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
+			if (!rcu_nocb_cpu_needs_barrier(cpu)) {
 				_rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
 						   rsp->barrier_sequence);
 			} else {
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 2bf57de9f78a..7c6033d71e9d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -476,7 +476,7 @@ static void print_cpu_stall_info(int cpu);
 static void print_cpu_stall_info_end(void);
 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
 static void increment_cpu_stall_ticks(void);
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
+static bool rcu_nocb_cpu_needs_barrier(int cpu);
 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
 static void rcu_init_one_nocb(struct rcu_node *rnp);
@@ -491,11 +491,11 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
 static void rcu_spawn_all_nocb_kthreads(int cpu);
 static void __init rcu_spawn_nocb_kthreads(void);
 #ifdef CONFIG_RCU_NOCB_CPU
-static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
+static void __init rcu_organize_nocb_kthreads(void);
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_bind_gp_kthread(void);
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
+static bool rcu_nohz_full_cpu(void);
 static void rcu_dynticks_task_enter(void);
 static void rcu_dynticks_task_exit(void);
 
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 08ff162e02b3..69705ec13527 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1960,7 +1960,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
  * Does the specified CPU need an RCU callback for the specified flavor
  * of rcu_barrier()?
  */
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+static bool rcu_nocb_cpu_needs_barrier(int cpu)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 	unsigned long ret;
@@ -2424,7 +2424,7 @@ void __init rcu_init_nohz(void)
 	for_each_rcu_flavor(rsp) {
 		for_each_cpu(cpu, rcu_nocb_mask)
 			init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
-		rcu_organize_nocb_kthreads(rsp);
+		rcu_organize_nocb_kthreads();
 	}
 }
 
@@ -2444,7 +2444,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
  * brought online out of order, this can require re-organizing the
  * leader-follower relationships.
  */
-static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
+static void rcu_spawn_one_nocb_kthread(int cpu)
 {
 	struct rcu_data *rdp;
 	struct rcu_data *rdp_last;
@@ -2481,7 +2481,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
 
 	/* Spawn the kthread for this CPU and RCU flavor. */
 	t = kthread_run(rcu_nocb_kthread, rdp_spawn,
-			"rcuo%c/%d", rsp->abbr, cpu);
+			"rcuo%c/%d", rcu_state.abbr, cpu);
 	BUG_ON(IS_ERR(t));
 	WRITE_ONCE(rdp_spawn->nocb_kthread, t);
 }
@@ -2496,7 +2496,7 @@ static void rcu_spawn_all_nocb_kthreads(int cpu)
 
 	if (rcu_scheduler_fully_active)
 		for_each_rcu_flavor(rsp)
-			rcu_spawn_one_nocb_kthread(rsp, cpu);
+			rcu_spawn_one_nocb_kthread(cpu);
 }
 
 /*
@@ -2520,7 +2520,7 @@ module_param(rcu_nocb_leader_stride, int, 0444);
 /*
  * Initialize leader-follower relationships for all no-CBs CPU.
  */
-static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
+static void __init rcu_organize_nocb_kthreads(void)
 {
 	int cpu;
 	int ls = rcu_nocb_leader_stride;
@@ -2579,7 +2579,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
 
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
 
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+static bool rcu_nocb_cpu_needs_barrier(int cpu)
 {
 	WARN_ON_ONCE(1); /* Should be dead code. */
 	return false;
@@ -2648,12 +2648,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
  * This code relies on the fact that all NO_HZ_FULL CPUs are also
  * CONFIG_RCU_NOCB_CPU CPUs.
  */
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
+static bool rcu_nohz_full_cpu(void)
 {
 #ifdef CONFIG_NO_HZ_FULL
 	if (tick_nohz_full_cpu(smp_processor_id()) &&
 	    (!rcu_gp_in_progress() ||
-	     ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
+	     ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
 		return true;
 #endif /* #ifdef CONFIG_NO_HZ_FULL */
 	return false;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 39/52] rcu: Remove rsp parameter from expedited grace-period functions
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (37 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 38/52] rcu: Remove rsp parameter from no-CBs CPU functions Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 40/52] rcu: Remove rsp parameter from rcu_node tree accessor macros Paul E. McKenney
                   ` (13 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to
RCU's functions.  This commit therefore removes the rsp parameter
from the code in kernel/rcu/tree_exp.h, and removes all of the
rsp local variables while in the area.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        |   4 +-
 kernel/rcu/tree.h        |   1 -
 kernel/rcu/tree_exp.h    | 185 ++++++++++++++++++---------------------
 kernel/rcu/tree_plugin.h |  13 ++-
 4 files changed, 94 insertions(+), 109 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 6d8e4a4d8019..c3031b566851 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -139,7 +139,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_data *rdp);
-static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp);
+static void rcu_report_exp_rdp(struct rcu_data *rdp);
 static void sync_sched_exp_online_cleanup(int cpu);
 
 /* rcuc/rcub kthread realtime priority */
@@ -3552,7 +3552,7 @@ void rcu_report_dead(unsigned int cpu)
 
 	/* QS for any half-done expedited RCU-sched GP. */
 	preempt_disable();
-	rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
+	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
 	preempt_enable();
 	rcu_preempt_deferred_qs(current);
 
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 7c6033d71e9d..b21d79bdab23 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -61,7 +61,6 @@ struct rcu_dynticks {
 /* Communicate arguments to a workqueue handler. */
 struct rcu_exp_work {
 	smp_call_func_t rew_func;
-	struct rcu_state *rew_rsp;
 	unsigned long rew_s;
 	struct work_struct rew_work;
 };
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 0bcbb03c9702..b6f7bc34ac49 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -25,39 +25,39 @@
 /*
  * Record the start of an expedited grace period.
  */
-static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
+static void rcu_exp_gp_seq_start(void)
 {
-	rcu_seq_start(&rsp->expedited_sequence);
+	rcu_seq_start(&rcu_state.expedited_sequence);
 }
 
 /*
  * Return then value that expedited-grace-period counter will have
  * at the end of the current grace period.
  */
-static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp)
+static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
 {
-	return rcu_seq_endval(&rsp->expedited_sequence);
+	return rcu_seq_endval(&rcu_state.expedited_sequence);
 }
 
 /*
  * Record the end of an expedited grace period.
  */
-static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
+static void rcu_exp_gp_seq_end(void)
 {
-	rcu_seq_end(&rsp->expedited_sequence);
+	rcu_seq_end(&rcu_state.expedited_sequence);
 	smp_mb(); /* Ensure that consecutive grace periods serialize. */
 }
 
 /*
  * Take a snapshot of the expedited-grace-period counter.
  */
-static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
+static unsigned long rcu_exp_gp_seq_snap(void)
 {
 	unsigned long s;
 
 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
-	s = rcu_seq_snap(&rsp->expedited_sequence);
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
+	s = rcu_seq_snap(&rcu_state.expedited_sequence);
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
 	return s;
 }
 
@@ -66,9 +66,9 @@ static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
  * if a full expedited grace period has elapsed since that snapshot
  * was taken.
  */
-static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
+static bool rcu_exp_gp_seq_done(unsigned long s)
 {
-	return rcu_seq_done(&rsp->expedited_sequence, s);
+	return rcu_seq_done(&rcu_state.expedited_sequence, s);
 }
 
 /*
@@ -78,26 +78,26 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
  * ever been online.  This means that this function normally takes its
  * no-work-to-do fastpath.
  */
-static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
+static void sync_exp_reset_tree_hotplug(void)
 {
 	bool done;
 	unsigned long flags;
 	unsigned long mask;
 	unsigned long oldmask;
-	int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */
+	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
 	struct rcu_node *rnp;
 	struct rcu_node *rnp_up;
 
 	/* If no new CPUs onlined since last time, nothing to do. */
-	if (likely(ncpus == rsp->ncpus_snap))
+	if (likely(ncpus == rcu_state.ncpus_snap))
 		return;
-	rsp->ncpus_snap = ncpus;
+	rcu_state.ncpus_snap = ncpus;
 
 	/*
 	 * Each pass through the following loop propagates newly onlined
 	 * CPUs for the current rcu_node structure up the rcu_node tree.
 	 */
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(&rcu_state, rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -135,13 +135,13 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
  * Reset the ->expmask values in the rcu_node tree in preparation for
  * a new expedited grace period.
  */
-static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
+static void __maybe_unused sync_exp_reset_tree(void)
 {
 	unsigned long flags;
 	struct rcu_node *rnp;
 
-	sync_exp_reset_tree_hotplug(rsp);
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	sync_exp_reset_tree_hotplug();
+	rcu_for_each_node_breadth_first(&rcu_state, rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		WARN_ON_ONCE(rnp->expmask);
 		rnp->expmask = rnp->expmaskinit;
@@ -194,7 +194,7 @@ static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
  *
  * Caller must hold the specified rcu_node structure's ->lock.
  */
-static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+static void __rcu_report_exp_rnp(struct rcu_node *rnp,
 				 bool wake, unsigned long flags)
 	__releases(rnp->lock)
 {
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 			if (wake) {
 				smp_mb(); /* EGP done before wake_up(). */
-				swake_up_one(&rsp->expedited_wq);
+				swake_up_one(&rcu_state.expedited_wq);
 			}
 			break;
 		}
@@ -229,20 +229,19 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
  * Report expedited quiescent state for specified node.  This is a
  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
  */
-static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
-					      struct rcu_node *rnp, bool wake)
+static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
 {
 	unsigned long flags;
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
-	__rcu_report_exp_rnp(rsp, rnp, wake, flags);
+	__rcu_report_exp_rnp(rnp, wake, flags);
 }
 
 /*
  * Report expedited quiescent state for multiple CPUs, all covered by the
  * specified leaf rcu_node structure.
  */
-static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
+static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
 				    unsigned long mask, bool wake)
 {
 	unsigned long flags;
@@ -253,23 +252,23 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
 		return;
 	}
 	rnp->expmask &= ~mask;
-	__rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
+	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
 }
 
 /*
  * Report expedited quiescent state for specified rcu_data (CPU).
  */
-static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp)
+static void rcu_report_exp_rdp(struct rcu_data *rdp)
 {
 	WRITE_ONCE(rdp->deferred_qs, false);
-	rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true);
+	rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
 }
 
 /* Common code for work-done checking. */
-static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
+static bool sync_exp_work_done(unsigned long s)
 {
-	if (rcu_exp_gp_seq_done(rsp, s)) {
-		trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
+	if (rcu_exp_gp_seq_done(s)) {
+		trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
 		/* Ensure test happens before caller kfree(). */
 		smp_mb__before_atomic(); /* ^^^ */
 		return true;
@@ -284,7 +283,7 @@ static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
  * with the mutex held, indicating that the caller must actually do the
  * expedited grace period.
  */
-static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
+static bool exp_funnel_lock(unsigned long s)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
 	struct rcu_node *rnp = rdp->mynode;
@@ -294,18 +293,18 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
 	    (rnp == rnp_root ||
 	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
-	    mutex_trylock(&rsp->exp_mutex))
+	    mutex_trylock(&rcu_state.exp_mutex))
 		goto fastpath;
 
 	/*
 	 * Each pass through the following loop works its way up
 	 * the rcu_node tree, returning if others have done the work or
-	 * otherwise falls through to acquire rsp->exp_mutex.  The mapping
+	 * otherwise falls through to acquire ->exp_mutex.  The mapping
 	 * from CPU to rcu_node structure can be inexact, as it is just
 	 * promoting locality and is not strictly needed for correctness.
 	 */
 	for (; rnp != NULL; rnp = rnp->parent) {
-		if (sync_exp_work_done(rsp, s))
+		if (sync_exp_work_done(s))
 			return true;
 
 		/* Work not done, either wait here or go up. */
@@ -314,26 +313,26 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
 
 			/* Someone else doing GP, so wait for them. */
 			spin_unlock(&rnp->exp_lock);
-			trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
 						  rnp->grplo, rnp->grphi,
 						  TPS("wait"));
 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
-				   sync_exp_work_done(rsp, s));
+				   sync_exp_work_done(s));
 			return true;
 		}
 		rnp->exp_seq_rq = s; /* Followers can wait on us. */
 		spin_unlock(&rnp->exp_lock);
-		trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
-					  rnp->grphi, TPS("nxtlvl"));
+		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
+					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
 	}
-	mutex_lock(&rsp->exp_mutex);
+	mutex_lock(&rcu_state.exp_mutex);
 fastpath:
-	if (sync_exp_work_done(rsp, s)) {
-		mutex_unlock(&rsp->exp_mutex);
+	if (sync_exp_work_done(s)) {
+		mutex_unlock(&rcu_state.exp_mutex);
 		return true;
 	}
-	rcu_exp_gp_seq_start(rsp);
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
+	rcu_exp_gp_seq_start();
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
 	return false;
 }
 
@@ -352,7 +351,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 	struct rcu_exp_work *rewp =
 		container_of(wp, struct rcu_exp_work, rew_work);
 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
-	struct rcu_state *rsp = rewp->rew_rsp;
 
 	func = rewp->rew_func;
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -400,7 +398,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 			mask_ofl_test |= mask;
 			continue;
 		}
-		ret = smp_call_function_single(cpu, func, rsp, 0);
+		ret = smp_call_function_single(cpu, func, NULL, 0);
 		if (!ret) {
 			mask_ofl_ipi &= ~mask;
 			continue;
@@ -411,7 +409,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 		    (rnp->expmask & mask)) {
 			/* Online, so delay for a bit and try again. */
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-			trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl"));
+			trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
 			schedule_timeout_uninterruptible(1);
 			goto retry_ipi;
 		}
@@ -423,33 +421,31 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 	/* Report quiescent states for those that went offline. */
 	mask_ofl_test |= mask_ofl_ipi;
 	if (mask_ofl_test)
-		rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
+		rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
 }
 
 /*
  * Select the nodes that the upcoming expedited grace period needs
  * to wait for.
  */
-static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
-				     smp_call_func_t func)
+static void sync_rcu_exp_select_cpus(smp_call_func_t func)
 {
 	int cpu;
 	struct rcu_node *rnp;
 
-	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
-	sync_exp_reset_tree(rsp);
-	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select"));
+	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
+	sync_exp_reset_tree();
+	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
 
 	/* Schedule work for each leaf rcu_node structure. */
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(&rcu_state, rnp) {
 		rnp->exp_need_flush = false;
 		if (!READ_ONCE(rnp->expmask))
 			continue; /* Avoid early boot non-existent wq. */
 		rnp->rew.rew_func = func;
-		rnp->rew.rew_rsp = rsp;
 		if (!READ_ONCE(rcu_par_gp_wq) ||
 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
-		    rcu_is_last_leaf_node(rsp, rnp)) {
+		    rcu_is_last_leaf_node(&rcu_state, rnp)) {
 			/* No workqueues yet or last leaf, do direct call. */
 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
 			continue;
@@ -466,12 +462,12 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
 	}
 
 	/* Wait for workqueue jobs (if any) to complete. */
-	rcu_for_each_leaf_node(rsp, rnp)
+	rcu_for_each_leaf_node(&rcu_state, rnp)
 		if (rnp->exp_need_flush)
 			flush_work(&rnp->rew.rew_work);
 }
 
-static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
+static void synchronize_sched_expedited_wait(void)
 {
 	int cpu;
 	unsigned long jiffies_stall;
@@ -482,13 +478,13 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 	struct rcu_node *rnp_root = rcu_get_root();
 	int ret;
 
-	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait"));
+	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
 	jiffies_stall = rcu_jiffies_till_stall_check();
 	jiffies_start = jiffies;
 
 	for (;;) {
 		ret = swait_event_timeout_exclusive(
-				rsp->expedited_wq,
+				rcu_state.expedited_wq,
 				sync_rcu_preempt_exp_done_unlocked(rnp_root),
 				jiffies_stall);
 		if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
@@ -498,9 +494,9 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 			continue;
 		panic_on_rcu_stall();
 		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
-		       rsp->name);
+		       rcu_state.name);
 		ndetected = 0;
-		rcu_for_each_leaf_node(rsp, rnp) {
+		rcu_for_each_leaf_node(&rcu_state, rnp) {
 			ndetected += rcu_print_task_exp_stall(rnp);
 			for_each_leaf_node_possible_cpu(rnp, cpu) {
 				struct rcu_data *rdp;
@@ -517,11 +513,11 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 			}
 		}
 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
-			jiffies - jiffies_start, rsp->expedited_sequence,
+			jiffies - jiffies_start, rcu_state.expedited_sequence,
 			rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
 		if (ndetected) {
 			pr_err("blocking rcu_node structures:");
-			rcu_for_each_node_breadth_first(rsp, rnp) {
+			rcu_for_each_node_breadth_first(&rcu_state, rnp) {
 				if (rnp == rnp_root)
 					continue; /* printed unconditionally */
 				if (sync_rcu_preempt_exp_done_unlocked(rnp))
@@ -533,7 +529,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 			}
 			pr_cont("\n");
 		}
-		rcu_for_each_leaf_node(rsp, rnp) {
+		rcu_for_each_leaf_node(&rcu_state, rnp) {
 			for_each_leaf_node_possible_cpu(rnp, cpu) {
 				mask = leaf_node_cpu_bit(rnp, cpu);
 				if (!(rnp->expmask & mask))
@@ -551,21 +547,21 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
  * grace period.  Also update all the ->exp_seq_rq counters as needed
  * in order to avoid counter-wrap problems.
  */
-static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
+static void rcu_exp_wait_wake(unsigned long s)
 {
 	struct rcu_node *rnp;
 
-	synchronize_sched_expedited_wait(rsp);
-	rcu_exp_gp_seq_end(rsp);
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+	synchronize_sched_expedited_wait();
+	rcu_exp_gp_seq_end();
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
 
 	/*
 	 * Switch over to wakeup mode, allowing the next GP, but -only- the
 	 * next GP, to proceed.
 	 */
-	mutex_lock(&rsp->exp_wake_mutex);
+	mutex_lock(&rcu_state.exp_wake_mutex);
 
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	rcu_for_each_node_breadth_first(&rcu_state, rnp) {
 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
 			spin_lock(&rnp->exp_lock);
 			/* Recheck, avoid hang in case someone just arrived. */
@@ -574,24 +570,23 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
 			spin_unlock(&rnp->exp_lock);
 		}
 		smp_mb(); /* All above changes before wakeup. */
-		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
+		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
 	}
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
-	mutex_unlock(&rsp->exp_wake_mutex);
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
+	mutex_unlock(&rcu_state.exp_wake_mutex);
 }
 
 /*
  * Common code to drive an expedited grace period forward, used by
  * workqueues and mid-boot-time tasks.
  */
-static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
-				  smp_call_func_t func, unsigned long s)
+static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s)
 {
 	/* Initialize the rcu_node tree in preparation for the wait. */
-	sync_rcu_exp_select_cpus(rsp, func);
+	sync_rcu_exp_select_cpus(func);
 
 	/* Wait and clean up, including waking everyone. */
-	rcu_exp_wait_wake(rsp, s);
+	rcu_exp_wait_wake(s);
 }
 
 /*
@@ -602,15 +597,14 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
 	struct rcu_exp_work *rewp;
 
 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
-	rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
+	rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s);
 }
 
 /*
  * Given an rcu_state pointer and a smp_call_function() handler, kick
  * off the specified flavor of expedited grace period.
  */
-static void _synchronize_rcu_expedited(struct rcu_state *rsp,
-				       smp_call_func_t func)
+static void _synchronize_rcu_expedited(smp_call_func_t func)
 {
 	struct rcu_data *rdp;
 	struct rcu_exp_work rew;
@@ -624,18 +618,17 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
 	}
 
 	/* Take a snapshot of the sequence number.  */
-	s = rcu_exp_gp_seq_snap(rsp);
-	if (exp_funnel_lock(rsp, s))
+	s = rcu_exp_gp_seq_snap();
+	if (exp_funnel_lock(s))
 		return;  /* Someone else did our work for us. */
 
 	/* Ensure that load happens before action based on it. */
 	if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
 		/* Direct call during scheduler init and early_initcalls(). */
-		rcu_exp_sel_wait_wake(rsp, func, s);
+		rcu_exp_sel_wait_wake(func, s);
 	} else {
 		/* Marshall arguments & schedule the expedited grace period. */
 		rew.rew_func = func;
-		rew.rew_rsp = rsp;
 		rew.rew_s = s;
 		INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
 		queue_work(rcu_gp_wq, &rew.rew_work);
@@ -645,11 +638,11 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
 	rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
 	rnp = rcu_get_root();
 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
-		   sync_exp_work_done(rsp, s));
+		   sync_exp_work_done(s));
 	smp_mb(); /* Workqueue actions happen before return. */
 
 	/* Let the next expedited grace period start. */
-	mutex_unlock(&rsp->exp_mutex);
+	mutex_unlock(&rcu_state.exp_mutex);
 }
 
 #ifdef CONFIG_PREEMPT_RCU
@@ -661,10 +654,9 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
  * report the quiescent state.
  */
-static void sync_rcu_exp_handler(void *info)
+static void sync_rcu_exp_handler(void *unused)
 {
 	unsigned long flags;
-	struct rcu_state *rsp = info;
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 	struct rcu_node *rnp = rdp->mynode;
 	struct task_struct *t = current;
@@ -677,7 +669,7 @@ static void sync_rcu_exp_handler(void *info)
 	if (!t->rcu_read_lock_nesting) {
 		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
 		    rcu_dynticks_curr_cpu_in_eqs()) {
-			rcu_report_exp_rdp(rsp, rdp);
+			rcu_report_exp_rdp(rdp);
 		} else {
 			rdp->deferred_qs = true;
 			resched_cpu(rdp->cpu);
@@ -756,8 +748,6 @@ static void sync_sched_exp_online_cleanup(int cpu)
  */
 void synchronize_rcu_expedited(void)
 {
-	struct rcu_state *rsp = &rcu_state;
-
 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 			 lock_is_held(&rcu_lock_map) ||
 			 lock_is_held(&rcu_sched_lock_map),
@@ -765,7 +755,7 @@ void synchronize_rcu_expedited(void)
 
 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
 		return;
-	_synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
+	_synchronize_rcu_expedited(sync_rcu_exp_handler);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
@@ -783,7 +773,7 @@ static void sync_sched_exp_handler(void *unused)
 	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
 		return;
 	if (rcu_is_cpu_rrupt_from_idle()) {
-		rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
+		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
 		return;
 	}
 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
@@ -798,13 +788,12 @@ static void sync_sched_exp_online_cleanup(int cpu)
 	struct rcu_data *rdp;
 	int ret;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp = &rcu_state;
 
 	rdp = per_cpu_ptr(&rcu_data, cpu);
 	rnp = rdp->mynode;
 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
 		return;
-	ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
+	ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0);
 	WARN_ON_ONCE(ret);
 }
 
@@ -831,8 +820,6 @@ static int rcu_blocking_is_gp(void)
 /* PREEMPT=n implementation of synchronize_rcu_expedited(). */
 void synchronize_rcu_expedited(void)
 {
-	struct rcu_state *rsp = &rcu_state;
-
 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 			 lock_is_held(&rcu_lock_map) ||
 			 lock_is_held(&rcu_sched_lock_map),
@@ -842,7 +829,7 @@ void synchronize_rcu_expedited(void)
 	if (rcu_blocking_is_gp())
 		return;
 
-	_synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
+	_synchronize_rcu_expedited(sync_sched_exp_handler);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 69705ec13527..e6ec25e47d00 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -123,8 +123,7 @@ static void __init rcu_bootup_announce_oddness(void)
 
 #ifdef CONFIG_PREEMPT_RCU
 
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
-			       bool wake);
+static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
 static void rcu_read_unlock_special(struct task_struct *t);
 
 /*
@@ -281,7 +280,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
 	 * still in a quiescent state in any case.)
 	 */
 	if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
-		rcu_report_exp_rdp(rdp->rsp, rdp);
+		rcu_report_exp_rdp(rdp);
 	else
 		WARN_ON_ONCE(rdp->deferred_qs);
 }
@@ -381,7 +380,7 @@ void rcu_note_context_switch(bool preempt)
 	 */
 	rcu_qs();
 	if (rdp->deferred_qs)
-		rcu_report_exp_rdp(&rcu_state, rdp);
+		rcu_report_exp_rdp(rdp);
 	trace_rcu_utilization(TPS("End context switch"));
 	barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
@@ -509,7 +508,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 	 * blocked-tasks list below.
 	 */
 	if (rdp->deferred_qs) {
-		rcu_report_exp_rdp(&rcu_state, rdp);
+		rcu_report_exp_rdp(rdp);
 		if (!t->rcu_read_unlock_special.s) {
 			local_irq_restore(flags);
 			return;
@@ -580,7 +579,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 		 * then we need to report up the rcu_node hierarchy.
 		 */
 		if (!empty_exp && empty_exp_now)
-			rcu_report_exp_rnp(&rcu_state, rnp, true);
+			rcu_report_exp_rnp(rnp, true);
 	} else {
 		local_irq_restore(flags);
 	}
@@ -947,7 +946,7 @@ static void rcu_qs(void)
 	if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
 		return;
 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, false);
-	rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
+	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
 }
 
 /*
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 40/52] rcu: Remove rsp parameter from rcu_node tree accessor macros
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (38 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 39/52] rcu: Remove rsp parameter from expedited grace-period functions Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 41/52] rcu: Remove rcu_data structure's ->rsp field Paul E. McKenney
                   ` (12 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

There now is only one rcu_state structure in a given build of the Linux
kernel, so there is no need to pass it as a parameter to RCU's rcu_node
tree's accessor macros.  This commit therefore removes the rsp parameter
from those macros in kernel/rcu/rcu.h, and removes some now-unused rsp
local variables while in the area.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 .../Data-Structures/Data-Structures.html      | 23 +++++----------
 kernel/rcu/rcu.h                              | 28 ++++++++-----------
 kernel/rcu/srcutree.c                         |  4 +--
 kernel/rcu/tree.c                             | 19 ++++++-------
 kernel/rcu/tree_exp.h                         | 18 ++++++------
 kernel/rcu/tree_plugin.h                      |  4 +--
 6 files changed, 40 insertions(+), 56 deletions(-)

diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index f5120a00f511..772c26a3865a 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -1372,8 +1372,7 @@ that is, if the CPU is currently idle.
 Accessor Functions</a></h3>
 
 <p>The following listing shows the
-<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
-<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
+<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt> and
 <tt>rcu_for_each_leaf_node()</tt> function and macros:
 
 <pre>
@@ -1386,13 +1385,9 @@ Accessor Functions</a></h3>
   7   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
   8        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
   9
- 10 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
- 11   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
- 12        (rnp) &lt; (rsp)-&gt;level[NUM_RCU_LVLS - 1]; (rnp)++)
- 13
- 14 #define rcu_for_each_leaf_node(rsp, rnp) \
- 15   for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
- 16        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+ 10 #define rcu_for_each_leaf_node(rsp, rnp) \
+ 11   for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
+ 12        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
 </pre>
 
 <p>The <tt>rcu_get_root()</tt> simply returns a pointer to the
@@ -1405,10 +1400,7 @@ macro takes advantage of the layout of the <tt>rcu_node</tt>
 structures in the <tt>rcu_state</tt> structure's
 <tt>-&gt;node[]</tt> array, performing a breadth-first traversal by
 simply traversing the array in order.
-The <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> macro operates
-similarly, but traverses only the first part of the array, thus excluding
-the leaf <tt>rcu_node</tt> structures.
-Finally, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
+Similarly, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
 the last part of the array, thus traversing only the leaf
 <tt>rcu_node</tt> structures.
 
@@ -1416,15 +1408,14 @@ the last part of the array, thus traversing only the leaf
 <tr><th>&nbsp;</th></tr>
 <tr><th align="left">Quick Quiz:</th></tr>
 <tr><td>
-	What do <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> and
+	What does
 	<tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
 	contains only a single node?
 </td></tr>
 <tr><th align="left">Answer:</th></tr>
 <tr><td bgcolor="#ffffff"><font color="ffffff">
 	In the single-node case,
-	<tt>rcu_for_each_nonleaf_node_breadth_first()</tt> is a no-op
-	and <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
+	<tt>rcu_for_each_leaf_node()</tt> traverses the single node.
 </font></td></tr>
 <tr><td>&nbsp;</td></tr>
 </table>
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 4d04683c31b2..2bb77fddc11f 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -329,29 +329,23 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 }
 
 /* Returns first leaf rcu_node of the specified RCU flavor. */
-#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1])
+#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
 
 /* Is this rcu_node a leaf? */
 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
 
 /* Is this rcu_node the last leaf? */
-#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
+#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
 
 /*
- * Do a full breadth-first scan of the rcu_node structures for the
+ * Do a full breadth-first scan of the {s,}rcu_node structures for the
  * specified rcu_state structure.
  */
-#define rcu_for_each_node_breadth_first(rsp, rnp) \
-	for ((rnp) = &(rsp)->node[0]; \
-	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
-
-/*
- * Do a breadth-first scan of the non-leaf rcu_node structures for the
- * specified rcu_state structure.  Note that if there is a singleton
- * rcu_node tree with but one rcu_node structure, this loop is a no-op.
- */
-#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
-	for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++)
+#define srcu_for_each_node_breadth_first(sp, rnp) \
+	for ((rnp) = &(sp)->node[0]; \
+	     (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
+#define rcu_for_each_node_breadth_first(rnp) \
+	srcu_for_each_node_breadth_first(&rcu_state, rnp)
 
 /*
  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
@@ -359,9 +353,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
  * one rcu_node structure, this loop -will- visit the rcu_node structure.
  * It is still a leaf node, even if it is also the root node.
  */
-#define rcu_for_each_leaf_node(rsp, rnp) \
-	for ((rnp) = rcu_first_leaf_node(rsp); \
-	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
+#define rcu_for_each_leaf_node(rnp) \
+	for ((rnp) = rcu_first_leaf_node(); \
+	     (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
 
 /*
  * Iterate over all possible CPUs in a leaf RCU node.
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 6c9866a854b1..2042080cd38b 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -105,7 +105,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
 	rcu_init_levelspread(levelspread, num_rcu_lvl);
 
 	/* Each pass through this loop initializes one srcu_node structure. */
-	rcu_for_each_node_breadth_first(sp, snp) {
+	srcu_for_each_node_breadth_first(sp, snp) {
 		spin_lock_init(&ACCESS_PRIVATE(snp, lock));
 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
@@ -561,7 +561,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
 
 	/* Initiate callback invocation as needed. */
 	idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
-	rcu_for_each_node_breadth_first(sp, snp) {
+	srcu_for_each_node_breadth_first(sp, snp) {
 		spin_lock_irq_rcu_node(snp);
 		cbs = false;
 		last_lvl = snp >= sp->level[rcu_num_lvls - 1];
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c3031b566851..35b705c1da40 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -573,7 +573,7 @@ void show_rcu_gp_kthreads(void)
 	for_each_rcu_flavor(rsp) {
 		pr_info("%s: wait state: %d ->state: %#lx\n",
 			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
-		rcu_for_each_node_breadth_first(rsp, rnp) {
+		rcu_for_each_node_breadth_first(rnp) {
 			if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
 				continue;
 			pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
@@ -1275,7 +1275,7 @@ static void rcu_dump_cpu_stacks(void)
 	unsigned long flags;
 	struct rcu_node *rnp;
 
-	rcu_for_each_leaf_node(&rcu_state, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		for_each_leaf_node_possible_cpu(rnp, cpu)
 			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
@@ -1335,7 +1335,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 	 */
 	pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
 	print_cpu_stall_info_begin();
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		ndetected += rcu_print_task_stall(rnp);
 		if (rnp->qsmask != 0) {
@@ -1872,7 +1872,7 @@ static bool rcu_gp_init(void)
 	 * will handle subsequent offline CPUs.
 	 */
 	rsp->gp_state = RCU_GP_ONOFF;
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		spin_lock(&rsp->ofl_lock);
 		raw_spin_lock_irq_rcu_node(rnp);
 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
@@ -1932,7 +1932,7 @@ static bool rcu_gp_init(void)
 	 * process finishes, because this kthread handles both.
 	 */
 	rsp->gp_state = RCU_GP_INIT;
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	rcu_for_each_node_breadth_first(rnp) {
 		rcu_gp_slow(gp_init_delay);
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		rdp = this_cpu_ptr(&rcu_data);
@@ -2045,7 +2045,7 @@ static void rcu_gp_cleanup(void)
 	 */
 	new_gp_seq = rsp->gp_seq;
 	rcu_seq_end(&new_gp_seq);
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	rcu_for_each_node_breadth_first(rnp) {
 		raw_spin_lock_irq_rcu_node(rnp);
 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
 			dump_blkd_tasks(rnp, 10);
@@ -2605,9 +2605,8 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rsp))
 	unsigned long flags;
 	unsigned long mask;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp = &rcu_state;
 
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		cond_resched_tasks_rcu_qs();
 		mask = 0;
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -3777,7 +3776,7 @@ static void __init rcu_init_one(void)
 
 	init_swait_queue_head(&rsp->gp_wq);
 	init_swait_queue_head(&rsp->expedited_wq);
-	rnp = rcu_first_leaf_node(rsp);
+	rnp = rcu_first_leaf_node();
 	for_each_possible_cpu(i) {
 		while (i > rnp->grphi)
 			rnp++;
@@ -3877,7 +3876,7 @@ static void __init rcu_dump_rcu_node_tree(void)
 
 	pr_info("rcu_node tree layout dump\n");
 	pr_info(" ");
-	rcu_for_each_node_breadth_first(&rcu_state, rnp) {
+	rcu_for_each_node_breadth_first(rnp) {
 		if (rnp->level != level) {
 			pr_cont("\n");
 			pr_info(" ");
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index b6f7bc34ac49..060bdb45cd95 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -97,7 +97,7 @@ static void sync_exp_reset_tree_hotplug(void)
 	 * Each pass through the following loop propagates newly onlined
 	 * CPUs for the current rcu_node structure up the rcu_node tree.
 	 */
-	rcu_for_each_leaf_node(&rcu_state, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -141,7 +141,7 @@ static void __maybe_unused sync_exp_reset_tree(void)
 	struct rcu_node *rnp;
 
 	sync_exp_reset_tree_hotplug();
-	rcu_for_each_node_breadth_first(&rcu_state, rnp) {
+	rcu_for_each_node_breadth_first(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		WARN_ON_ONCE(rnp->expmask);
 		rnp->expmask = rnp->expmaskinit;
@@ -438,14 +438,14 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
 
 	/* Schedule work for each leaf rcu_node structure. */
-	rcu_for_each_leaf_node(&rcu_state, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		rnp->exp_need_flush = false;
 		if (!READ_ONCE(rnp->expmask))
 			continue; /* Avoid early boot non-existent wq. */
 		rnp->rew.rew_func = func;
 		if (!READ_ONCE(rcu_par_gp_wq) ||
 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
-		    rcu_is_last_leaf_node(&rcu_state, rnp)) {
+		    rcu_is_last_leaf_node(rnp)) {
 			/* No workqueues yet or last leaf, do direct call. */
 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
 			continue;
@@ -462,7 +462,7 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
 	}
 
 	/* Wait for workqueue jobs (if any) to complete. */
-	rcu_for_each_leaf_node(&rcu_state, rnp)
+	rcu_for_each_leaf_node(rnp)
 		if (rnp->exp_need_flush)
 			flush_work(&rnp->rew.rew_work);
 }
@@ -496,7 +496,7 @@ static void synchronize_sched_expedited_wait(void)
 		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
 		       rcu_state.name);
 		ndetected = 0;
-		rcu_for_each_leaf_node(&rcu_state, rnp) {
+		rcu_for_each_leaf_node(rnp) {
 			ndetected += rcu_print_task_exp_stall(rnp);
 			for_each_leaf_node_possible_cpu(rnp, cpu) {
 				struct rcu_data *rdp;
@@ -517,7 +517,7 @@ static void synchronize_sched_expedited_wait(void)
 			rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
 		if (ndetected) {
 			pr_err("blocking rcu_node structures:");
-			rcu_for_each_node_breadth_first(&rcu_state, rnp) {
+			rcu_for_each_node_breadth_first(rnp) {
 				if (rnp == rnp_root)
 					continue; /* printed unconditionally */
 				if (sync_rcu_preempt_exp_done_unlocked(rnp))
@@ -529,7 +529,7 @@ static void synchronize_sched_expedited_wait(void)
 			}
 			pr_cont("\n");
 		}
-		rcu_for_each_leaf_node(&rcu_state, rnp) {
+		rcu_for_each_leaf_node(rnp) {
 			for_each_leaf_node_possible_cpu(rnp, cpu) {
 				mask = leaf_node_cpu_bit(rnp, cpu);
 				if (!(rnp->expmask & mask))
@@ -561,7 +561,7 @@ static void rcu_exp_wait_wake(unsigned long s)
 	 */
 	mutex_lock(&rcu_state.exp_wake_mutex);
 
-	rcu_for_each_node_breadth_first(&rcu_state, rnp) {
+	rcu_for_each_node_breadth_first(rnp) {
 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
 			spin_lock(&rnp->exp_lock);
 			/* Recheck, avoid hang in case someone just arrived. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index e6ec25e47d00..b60d3df92ff5 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -687,7 +687,7 @@ static void rcu_print_detail_task_stall(void)
 	struct rcu_node *rnp = rcu_get_root();
 
 	rcu_print_detail_task_stall_rnp(rnp);
-	rcu_for_each_leaf_node(&rcu_state, rnp)
+	rcu_for_each_leaf_node(rnp)
 		rcu_print_detail_task_stall_rnp(rnp);
 }
 
@@ -1427,7 +1427,7 @@ static void __init rcu_spawn_boost_kthreads(void)
 	for_each_possible_cpu(cpu)
 		per_cpu(rcu_cpu_has_work, cpu) = 0;
 	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
-	rcu_for_each_leaf_node(&rcu_state, rnp)
+	rcu_for_each_leaf_node(rnp)
 		(void)rcu_spawn_one_boost_kthread(rnp);
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 41/52] rcu: Remove rcu_data structure's ->rsp field
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (39 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 40/52] rcu: Remove rsp parameter from rcu_node tree accessor macros Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 42/52] rcu: Remove last non-flavor-traversal rsp local variable from tree_plugin.h Paul E. McKenney
                   ` (11 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Now that there is only one rcu_state structure, there is no need for the
rcu_data structure to indicate which it corresponds to.  This commit
therefore removes the rcu_data structure's ->rsp field, replacing all
remaining uses of it with &rcu_state.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 28 +++++++++++++--------------
 kernel/rcu/tree.h        |  1 -
 kernel/rcu/tree_plugin.h | 42 ++++++++++++++++++++--------------------
 3 files changed, 34 insertions(+), 37 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 35b705c1da40..bc52f8c16faf 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1069,7 +1069,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
 	rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
-		trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
+		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
 		rcu_gpnum_ovf(rdp->mynode, rdp);
 		return 1;
 	}
@@ -1119,7 +1119,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 	 * of the current RCU grace period.
 	 */
 	if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
-		trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
+		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
 		rdp->dynticks_fqs++;
 		rcu_gpnum_ovf(rnp, rdp);
 		return 1;
@@ -1133,20 +1133,20 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 	 */
 	jtsq = jiffies_till_sched_qs;
 	ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
-	if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
+	if (time_after(jiffies, rcu_state.gp_start + jtsq) &&
 	    READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
 	    rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
-		trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
+		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
 		rcu_gpnum_ovf(rnp, rdp);
 		return 1;
-	} else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
+	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
 		/* Load rcu_qs_ctr before store to rcu_urgent_qs. */
 		smp_store_release(ruqp, true);
 	}
 
 	/* If waiting too long on an offline CPU, complain. */
 	if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
-	    time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+	    time_after(jiffies, rcu_state.gp_start + HZ)) {
 		bool onl;
 		struct rcu_node *rnp1;
 
@@ -1184,12 +1184,12 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 	 */
 	rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
 	if (!READ_ONCE(*rnhqp) &&
-	    (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
-	     time_after(jiffies, rdp->rsp->jiffies_resched))) {
+	    (time_after(jiffies, rcu_state.gp_start + jtsq) ||
+	     time_after(jiffies, rcu_state.jiffies_resched))) {
 		WRITE_ONCE(*rnhqp, true);
 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
 		smp_store_release(ruqp, true);
-		rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
+		rcu_state.jiffies_resched += jtsq; /* Re-enable beating. */
 	}
 
 	/*
@@ -1198,7 +1198,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 	 * see if the CPU is getting hammered with interrupts, but only
 	 * once per grace period, just to keep the IPIs down to a dull roar.
 	 */
-	if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
+	if (jiffies - rcu_state.gp_start > rcu_jiffies_till_stall_check() / 2) {
 		resched_cpu(rdp->cpu);
 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
@@ -1525,7 +1525,7 @@ void rcu_cpu_stall_reset(void)
 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
 			      unsigned long gp_seq_req, const char *s)
 {
-	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
+	trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req,
 				      rnp->level, rnp->grplo, rnp->grphi, s);
 }
 
@@ -1549,7 +1549,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
 			      unsigned long gp_seq_req)
 {
 	bool ret = false;
-	struct rcu_state *rsp = rdp->rsp;
+	struct rcu_state *rsp = &rcu_state;
 	struct rcu_node *rnp;
 
 	/*
@@ -3166,8 +3166,7 @@ static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done)
  */
 static void rcu_barrier_callback(struct rcu_head *rhp)
 {
-	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
-	struct rcu_state *rsp = rdp->rsp;
+	struct rcu_state *rsp = &rcu_state;
 
 	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
 		_rcu_barrier_trace(TPS("LastCB"), -1, rsp->barrier_sequence);
@@ -3364,7 +3363,6 @@ rcu_boot_init_percpu_data(int cpu)
 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
 	rdp->cpu = cpu;
-	rdp->rsp = &rcu_state;
 	rcu_boot_init_nocb_percpu_data(rdp);
 }
 
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index b21d79bdab23..6f1b1a3fc23d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -265,7 +265,6 @@ struct rcu_data {
 	short rcu_onl_gp_flags;		/* ->gp_flags at last online. */
 
 	int cpu;
-	struct rcu_state *rsp;
 };
 
 /* Values for nocb_defer_wakeup field in struct rcu_data. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index b60d3df92ff5..5423f9e58494 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -350,7 +350,7 @@ void rcu_note_context_switch(bool preempt)
 		 */
 		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
-		trace_rcu_preempt_task(rdp->rsp->name,
+		trace_rcu_preempt_task(rcu_state.name,
 				       t->pid,
 				       (rnp->qsmask & rdp->grpmask)
 				       ? rnp->gp_seq
@@ -1951,7 +1951,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
 	if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
 		mod_timer(&rdp->nocb_timer, jiffies + 1);
 	WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
-	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason);
+	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
 	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 }
 
@@ -2030,7 +2030,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
 	/* If we are not being polled and there is a kthread, awaken it ... */
 	t = READ_ONCE(rdp->nocb_kthread);
 	if (rcu_nocb_poll || !t) {
-		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 				    TPS("WakeNotPoll"));
 		return;
 	}
@@ -2039,7 +2039,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
 		if (!irqs_disabled_flags(flags)) {
 			/* ... if queue was empty ... */
 			wake_nocb_leader(rdp, false);
-			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 					    TPS("WakeEmpty"));
 		} else {
 			wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE,
@@ -2050,7 +2050,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
 		/* ... or if many callbacks queued. */
 		if (!irqs_disabled_flags(flags)) {
 			wake_nocb_leader(rdp, true);
-			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 					    TPS("WakeOvf"));
 		} else {
 			wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE_FORCE,
@@ -2058,7 +2058,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
 		}
 		rdp->qlen_last_fqs_check = LONG_MAX / 2;
 	} else {
-		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
+		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
 	}
 	return;
 }
@@ -2080,12 +2080,12 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
 		return false;
 	__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
 	if (__is_kfree_rcu_offset((unsigned long)rhp->func))
-		trace_rcu_kfree_callback(rdp->rsp->name, rhp,
+		trace_rcu_kfree_callback(rcu_state.name, rhp,
 					 (unsigned long)rhp->func,
 					 -atomic_long_read(&rdp->nocb_q_count_lazy),
 					 -atomic_long_read(&rdp->nocb_q_count));
 	else
-		trace_rcu_callback(rdp->rsp->name, rhp,
+		trace_rcu_callback(rcu_state.name, rhp,
 				   -atomic_long_read(&rdp->nocb_q_count_lazy),
 				   -atomic_long_read(&rdp->nocb_q_count));
 
@@ -2135,7 +2135,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
 	struct rcu_node *rnp = rdp->mynode;
 
 	local_irq_save(flags);
-	c = rcu_seq_snap(&rdp->rsp->gp_seq);
+	c = rcu_seq_snap(&rcu_state.gp_seq);
 	if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
 		local_irq_restore(flags);
 	} else {
@@ -2180,7 +2180,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
 
 	/* Wait for callbacks to appear. */
 	if (!rcu_nocb_poll) {
-		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
+		trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Sleep"));
 		swait_event_interruptible_exclusive(my_rdp->nocb_wq,
 				!READ_ONCE(my_rdp->nocb_leader_sleep));
 		raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
@@ -2190,7 +2190,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
 		raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
 	} else if (firsttime) {
 		firsttime = false; /* Don't drown trace log with "Poll"! */
-		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll"));
+		trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Poll"));
 	}
 
 	/*
@@ -2217,7 +2217,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
 		if (rcu_nocb_poll) {
 			schedule_timeout_interruptible(1);
 		} else {
-			trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
+			trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu,
 					    TPS("WokeEmpty"));
 		}
 		goto wait_again;
@@ -2262,7 +2262,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
 static void nocb_follower_wait(struct rcu_data *rdp)
 {
 	for (;;) {
-		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
+		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FollowerSleep"));
 		swait_event_interruptible_exclusive(rdp->nocb_wq,
 					 READ_ONCE(rdp->nocb_follower_head));
 		if (smp_load_acquire(&rdp->nocb_follower_head)) {
@@ -2270,7 +2270,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
 			return;
 		}
 		WARN_ON(signal_pending(current));
-		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty"));
+		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
 	}
 }
 
@@ -2305,10 +2305,10 @@ static int rcu_nocb_kthread(void *arg)
 		rdp->nocb_follower_tail = &rdp->nocb_follower_head;
 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 		BUG_ON(!list);
-		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty"));
+		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty"));
 
 		/* Each pass through the following loop invokes a callback. */
-		trace_rcu_batch_start(rdp->rsp->name,
+		trace_rcu_batch_start(rcu_state.name,
 				      atomic_long_read(&rdp->nocb_q_count_lazy),
 				      atomic_long_read(&rdp->nocb_q_count), -1);
 		c = cl = 0;
@@ -2316,23 +2316,23 @@ static int rcu_nocb_kthread(void *arg)
 			next = list->next;
 			/* Wait for enqueuing to complete, if needed. */
 			while (next == NULL && &list->next != tail) {
-				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 						    TPS("WaitQueue"));
 				schedule_timeout_interruptible(1);
-				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 						    TPS("WokeQueue"));
 				next = list->next;
 			}
 			debug_rcu_head_unqueue(list);
 			local_bh_disable();
-			if (__rcu_reclaim(rdp->rsp->name, list))
+			if (__rcu_reclaim(rcu_state.name, list))
 				cl++;
 			c++;
 			local_bh_enable();
 			cond_resched_tasks_rcu_qs();
 			list = next;
 		}
-		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+		trace_rcu_batch_end(rcu_state.name, c, !!list, 0, 0, 1);
 		smp_mb__before_atomic();  /* _add after CB invocation. */
 		atomic_long_add(-c, &rdp->nocb_q_count);
 		atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
@@ -2360,7 +2360,7 @@ static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
 	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
 	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
 	__wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
-	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
+	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
 }
 
 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 42/52] rcu: Remove last non-flavor-traversal rsp local variable from tree_plugin.h
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (40 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 41/52] rcu: Remove rcu_data structure's ->rsp field Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 43/52] rcu: Remove for_each_rcu_flavor() flavor-traversal macro Paul E. McKenney
                   ` (10 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

This commit removes the last non-flavor-traversal rsp local variable from
kernel/rcu/tree_plugin.h in favor of &rcu_state.  The flavor-traversal
locals will be removed with the removal of flavor traversal.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree_plugin.h | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 5423f9e58494..59d66ee26310 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -782,7 +782,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  */
 static void rcu_flavor_check_callbacks(int user)
 {
-	struct rcu_state *rsp = &rcu_state;
 	struct task_struct *t = current;
 
 	if (user || rcu_is_cpu_rrupt_from_idle()) {
@@ -806,7 +805,7 @@ static void rcu_flavor_check_callbacks(int user)
 	    __this_cpu_read(rcu_data.core_needs_qs) &&
 	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
 	    !t->rcu_read_unlock_special.b.need_qs &&
-	    time_after(jiffies, rsp->gp_start + HZ))
+	    time_after(jiffies, rcu_state.gp_start + HZ))
 		t->rcu_read_unlock_special.b.need_qs = true;
 }
 
@@ -1761,12 +1760,11 @@ static void print_cpu_stall_info_begin(void)
 /*
  * Print out diagnostic information for the specified stalled CPU.
  *
- * If the specified CPU is aware of the current RCU grace period
- * (flavor specified by rsp), then print the number of scheduling
- * clock interrupts the CPU has taken during the time that it has
- * been aware.  Otherwise, print the number of RCU grace periods
- * that this CPU is ignorant of, for example, "1" if the CPU was
- * aware of the previous grace period.
+ * If the specified CPU is aware of the current RCU grace period, then
+ * print the number of scheduling clock interrupts the CPU has taken
+ * during the time that it has been aware.  Otherwise, print the number
+ * of RCU grace periods that this CPU is ignorant of, for example, "1"
+ * if the CPU was aware of the previous grace period.
  *
  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
  */
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 43/52] rcu: Remove for_each_rcu_flavor() flavor-traversal macro
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (41 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 42/52] rcu: Remove last non-flavor-traversal rsp local variable from tree_plugin.h Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 44/52] rcu: Simplify rcutorture_get_gp_data() Paul E. McKenney
                   ` (9 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Now that there is only ever a single flavor of RCU in a given kernel
build, there isn't a whole lot of point in having a flavor-traversal
macro.  This commit therefore removes it and converts calls to it to
straightline code, inlining trivial functions as appropriate.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c        | 172 ++++++++++++++++-----------------------
 kernel/rcu/tree.h        |   7 --
 kernel/rcu/tree_plugin.h |  59 +++++---------
 3 files changed, 92 insertions(+), 146 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index bc52f8c16faf..cec025cc1f87 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -85,8 +85,6 @@ struct rcu_state rcu_state = {
 	.ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
 };
 
-LIST_HEAD(rcu_struct_flavors);
-
 /* Dump rcu_node combining tree at boot to verify correct setup. */
 static bool dump_tree;
 module_param(dump_tree, bool, 0444);
@@ -568,31 +566,28 @@ void show_rcu_gp_kthreads(void)
 	int cpu;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp;
 
-	for_each_rcu_flavor(rsp) {
-		pr_info("%s: wait state: %d ->state: %#lx\n",
-			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
-		rcu_for_each_node_breadth_first(rnp) {
-			if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
-				continue;
-			pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
-				rnp->grplo, rnp->grphi, rnp->gp_seq,
-				rnp->gp_seq_needed);
-			if (!rcu_is_leaf_node(rnp))
+	pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name,
+		rcu_state.gp_state, rcu_state.gp_kthread->state);
+	rcu_for_each_node_breadth_first(rnp) {
+		if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
+			continue;
+		pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
+			rnp->grplo, rnp->grphi, rnp->gp_seq,
+			rnp->gp_seq_needed);
+		if (!rcu_is_leaf_node(rnp))
+			continue;
+		for_each_leaf_node_possible_cpu(rnp, cpu) {
+			rdp = per_cpu_ptr(&rcu_data, cpu);
+			if (rdp->gpwrap ||
+			    ULONG_CMP_GE(rcu_state.gp_seq,
+					 rdp->gp_seq_needed))
 				continue;
-			for_each_leaf_node_possible_cpu(rnp, cpu) {
-				rdp = per_cpu_ptr(&rcu_data, cpu);
-				if (rdp->gpwrap ||
-				    ULONG_CMP_GE(rsp->gp_seq,
-						 rdp->gp_seq_needed))
-					continue;
-				pr_info("\tcpu %d ->gp_seq_needed %lu\n",
-					cpu, rdp->gp_seq_needed);
-			}
+			pr_info("\tcpu %d ->gp_seq_needed %lu\n",
+				cpu, rdp->gp_seq_needed);
 		}
-		/* sched_show_task(rsp->gp_kthread); */
 	}
+	/* sched_show_task(rcu_state.gp_kthread); */
 }
 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 
@@ -638,7 +633,6 @@ static struct rcu_node *rcu_get_root(void)
  */
 static void rcu_eqs_enter(bool user)
 {
-	struct rcu_state *rsp;
 	struct rcu_data *rdp;
 	struct rcu_dynticks *rdtp;
 
@@ -655,10 +649,8 @@ static void rcu_eqs_enter(bool user)
 	lockdep_assert_irqs_disabled();
 	trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
-	for_each_rcu_flavor(rsp) {
-		rdp = this_cpu_ptr(&rcu_data);
-		do_nocb_deferred_wakeup(rdp);
-	}
+	rdp = this_cpu_ptr(&rcu_data);
+	do_nocb_deferred_wakeup(rdp);
 	rcu_prepare_for_idle();
 	rcu_preempt_deferred_qs(current);
 	WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
@@ -1023,21 +1015,17 @@ bool rcu_lockdep_current_cpu_online(void)
 {
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp;
+	bool ret = false;
 
 	if (in_nmi() || !rcu_scheduler_fully_active)
 		return true;
 	preempt_disable();
-	for_each_rcu_flavor(rsp) {
-		rdp = this_cpu_ptr(&rcu_data);
-		rnp = rdp->mynode;
-		if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
-			preempt_enable();
-			return true;
-		}
-	}
+	rdp = this_cpu_ptr(&rcu_data);
+	rnp = rdp->mynode;
+	if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
+		ret = true;
 	preempt_enable();
-	return false;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 
@@ -1515,10 +1503,7 @@ static void check_cpu_stall(struct rcu_data *rdp)
  */
 void rcu_cpu_stall_reset(void)
 {
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
+	WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
 }
 
 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
@@ -3133,17 +3118,12 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
 	bool al = true;
 	bool hc = false;
 	struct rcu_data *rdp;
-	struct rcu_state *rsp;
 
-	for_each_rcu_flavor(rsp) {
-		rdp = this_cpu_ptr(&rcu_data);
-		if (rcu_segcblist_empty(&rdp->cblist))
-			continue;
+	rdp = this_cpu_ptr(&rcu_data);
+	if (!rcu_segcblist_empty(&rdp->cblist)) {
 		hc = true;
-		if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
+		if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist))
 			al = false;
-			break;
-		}
 	}
 	if (all_lazy)
 		*all_lazy = al;
@@ -3435,15 +3415,12 @@ int rcutree_online_cpu(unsigned int cpu)
 	unsigned long flags;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp;
 
-	for_each_rcu_flavor(rsp) {
-		rdp = per_cpu_ptr(&rcu_data, cpu);
-		rnp = rdp->mynode;
-		raw_spin_lock_irqsave_rcu_node(rnp, flags);
-		rnp->ffmask |= rdp->grpmask;
-		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-	}
+	rdp = per_cpu_ptr(&rcu_data, cpu);
+	rnp = rdp->mynode;
+	raw_spin_lock_irqsave_rcu_node(rnp, flags);
+	rnp->ffmask |= rdp->grpmask;
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	if (IS_ENABLED(CONFIG_TREE_SRCU))
 		srcu_online_cpu(cpu);
 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
@@ -3462,15 +3439,12 @@ int rcutree_offline_cpu(unsigned int cpu)
 	unsigned long flags;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp;
 
-	for_each_rcu_flavor(rsp) {
-		rdp = per_cpu_ptr(&rcu_data, cpu);
-		rnp = rdp->mynode;
-		raw_spin_lock_irqsave_rcu_node(rnp, flags);
-		rnp->ffmask &= ~rdp->grpmask;
-		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-	}
+	rdp = per_cpu_ptr(&rcu_data, cpu);
+	rnp = rdp->mynode;
+	raw_spin_lock_irqsave_rcu_node(rnp, flags);
+	rnp->ffmask &= ~rdp->grpmask;
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
 	rcutree_affinity_setting(cpu, cpu);
 	if (IS_ENABLED(CONFIG_TREE_SRCU))
@@ -3499,34 +3473,32 @@ void rcu_cpu_starting(unsigned int cpu)
 	unsigned long oldmask;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp;
+	struct rcu_state *rsp = &rcu_state;
 
 	if (per_cpu(rcu_cpu_started, cpu))
 		return;
 
 	per_cpu(rcu_cpu_started, cpu) = 1;
 
-	for_each_rcu_flavor(rsp) {
-		rdp = per_cpu_ptr(&rcu_data, cpu);
-		rnp = rdp->mynode;
-		mask = rdp->grpmask;
-		raw_spin_lock_irqsave_rcu_node(rnp, flags);
-		rnp->qsmaskinitnext |= mask;
-		oldmask = rnp->expmaskinitnext;
-		rnp->expmaskinitnext |= mask;
-		oldmask ^= rnp->expmaskinitnext;
-		nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
-		/* Allow lockless access for expedited grace periods. */
-		smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
-		rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
-		rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
-		rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
-		if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
-			/* Report QS -after- changing ->qsmaskinitnext! */
-			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
-		} else {
-			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-		}
+	rdp = per_cpu_ptr(&rcu_data, cpu);
+	rnp = rdp->mynode;
+	mask = rdp->grpmask;
+	raw_spin_lock_irqsave_rcu_node(rnp, flags);
+	rnp->qsmaskinitnext |= mask;
+	oldmask = rnp->expmaskinitnext;
+	rnp->expmaskinitnext |= mask;
+	oldmask ^= rnp->expmaskinitnext;
+	nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
+	/* Allow lockless access for expedited grace periods. */
+	smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
+	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
+	rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
+	rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
+	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+		/* Report QS -after- changing ->qsmaskinitnext! */
+		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
+	} else {
+		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	}
 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
 }
@@ -3643,7 +3615,6 @@ static int __init rcu_spawn_gp_kthread(void)
 	unsigned long flags;
 	int kthread_prio_in = kthread_prio;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp;
 	struct sched_param sp;
 	struct task_struct *t;
 
@@ -3663,19 +3634,17 @@ static int __init rcu_spawn_gp_kthread(void)
 			 kthread_prio, kthread_prio_in);
 
 	rcu_scheduler_fully_active = 1;
-	for_each_rcu_flavor(rsp) {
-		t = kthread_create(rcu_gp_kthread, NULL, "%s", rsp->name);
-		BUG_ON(IS_ERR(t));
-		rnp = rcu_get_root();
-		raw_spin_lock_irqsave_rcu_node(rnp, flags);
-		rsp->gp_kthread = t;
-		if (kthread_prio) {
-			sp.sched_priority = kthread_prio;
-			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-		}
-		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-		wake_up_process(t);
+	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
+	BUG_ON(IS_ERR(t));
+	rnp = rcu_get_root();
+	raw_spin_lock_irqsave_rcu_node(rnp, flags);
+	rcu_state.gp_kthread = t;
+	if (kthread_prio) {
+		sp.sched_priority = kthread_prio;
+		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
 	}
+	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+	wake_up_process(t);
 	rcu_spawn_nocb_kthreads();
 	rcu_spawn_boost_kthreads();
 	return 0;
@@ -3781,7 +3750,6 @@ static void __init rcu_init_one(void)
 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
 		rcu_boot_init_percpu_data(i);
 	}
-	list_add(&rsp->flavors, &rcu_struct_flavors);
 }
 
 /*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 6f1b1a3fc23d..8abc15c42d84 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -360,7 +360,6 @@ struct rcu_state {
 						/*  jiffies. */
 	const char *name;			/* Name of structure. */
 	char abbr;				/* Abbreviated name. */
-	struct list_head flavors;		/* List of RCU flavors. */
 
 	spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
 						/* Synchronize offline with */
@@ -417,12 +416,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
 #define RCU_NAME rcu_name
 #endif /* #else #ifdef CONFIG_TRACING */
 
-extern struct list_head rcu_struct_flavors;
-
-/* Sequence through rcu_state structures for each RCU flavor. */
-#define for_each_rcu_flavor(rsp) \
-	list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
-
 /*
  * RCU implementation internal declarations:
  */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 59d66ee26310..878a1d2cd465 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1561,31 +1561,28 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
 	struct rcu_data *rdp;
 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 	struct rcu_node *rnp;
-	struct rcu_state *rsp;
 
 	/* Exit early if we advanced recently. */
 	if (jiffies == rdtp->last_advance_all)
 		return false;
 	rdtp->last_advance_all = jiffies;
 
-	for_each_rcu_flavor(rsp) {
-		rdp = this_cpu_ptr(&rcu_data);
-		rnp = rdp->mynode;
+	rdp = this_cpu_ptr(&rcu_data);
+	rnp = rdp->mynode;
 
-		/*
-		 * Don't bother checking unless a grace period has
-		 * completed since we last checked and there are
-		 * callbacks not yet ready to invoke.
-		 */
-		if ((rcu_seq_completed_gp(rdp->gp_seq,
-					  rcu_seq_current(&rnp->gp_seq)) ||
-		     unlikely(READ_ONCE(rdp->gpwrap))) &&
-		    rcu_segcblist_pend_cbs(&rdp->cblist))
-			note_gp_changes(rdp);
-
-		if (rcu_segcblist_ready_cbs(&rdp->cblist))
-			cbs_ready = true;
-	}
+	/*
+	 * Don't bother checking unless a grace period has
+	 * completed since we last checked and there are
+	 * callbacks not yet ready to invoke.
+	 */
+	if ((rcu_seq_completed_gp(rdp->gp_seq,
+				  rcu_seq_current(&rnp->gp_seq)) ||
+	     unlikely(READ_ONCE(rdp->gpwrap))) &&
+	    rcu_segcblist_pend_cbs(&rdp->cblist))
+		note_gp_changes(rdp);
+
+	if (rcu_segcblist_ready_cbs(&rdp->cblist))
+		cbs_ready = true;
 	return cbs_ready;
 }
 
@@ -1648,7 +1645,6 @@ static void rcu_prepare_for_idle(void)
 	struct rcu_data *rdp;
 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 	struct rcu_node *rnp;
-	struct rcu_state *rsp;
 	int tne;
 
 	lockdep_assert_irqs_disabled();
@@ -1686,10 +1682,8 @@ static void rcu_prepare_for_idle(void)
 	if (rdtp->last_accelerate == jiffies)
 		return;
 	rdtp->last_accelerate = jiffies;
-	for_each_rcu_flavor(rsp) {
-		rdp = this_cpu_ptr(&rcu_data);
-		if (!rcu_segcblist_pend_cbs(&rdp->cblist))
-			continue;
+	rdp = this_cpu_ptr(&rcu_data);
+	if (rcu_segcblist_pend_cbs(&rdp->cblist)) {
 		rnp = rdp->mynode;
 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
 		needwake = rcu_accelerate_cbs(rnp, rdp);
@@ -1824,10 +1818,7 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp)
 /* Increment ->ticks_this_gp for all flavors of RCU. */
 static void increment_cpu_stall_ticks(void)
 {
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		raw_cpu_inc(rcu_data.ticks_this_gp);
+	raw_cpu_inc(rcu_data.ticks_this_gp);
 }
 
 #ifdef CONFIG_RCU_NOCB_CPU
@@ -2384,7 +2375,6 @@ void __init rcu_init_nohz(void)
 {
 	int cpu;
 	bool need_rcu_nocb_mask = false;
-	struct rcu_state *rsp;
 
 #if defined(CONFIG_NO_HZ_FULL)
 	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
@@ -2418,11 +2408,9 @@ void __init rcu_init_nohz(void)
 	if (rcu_nocb_poll)
 		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
 
-	for_each_rcu_flavor(rsp) {
-		for_each_cpu(cpu, rcu_nocb_mask)
-			init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
-		rcu_organize_nocb_kthreads();
-	}
+	for_each_cpu(cpu, rcu_nocb_mask)
+		init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
+	rcu_organize_nocb_kthreads();
 }
 
 /* Initialize per-rcu_data variables for no-CBs CPUs. */
@@ -2489,11 +2477,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
  */
 static void rcu_spawn_all_nocb_kthreads(int cpu)
 {
-	struct rcu_state *rsp;
-
 	if (rcu_scheduler_fully_active)
-		for_each_rcu_flavor(rsp)
-			rcu_spawn_one_nocb_kthread(cpu);
+		rcu_spawn_one_nocb_kthread(cpu);
 }
 
 /*
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 44/52] rcu: Simplify rcutorture_get_gp_data()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (42 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 43/52] rcu: Remove for_each_rcu_flavor() flavor-traversal macro Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 45/52] rcu: Restructure rcu_check_gp_kthread_starvation() Paul E. McKenney
                   ` (8 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

This commit restructures rcutorture_get_gp_data() to take advantage of
the fact that there is only one flavor of RCU.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index cec025cc1f87..0eccfd02e0b3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -597,21 +597,16 @@ EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
 			    unsigned long *gp_seq)
 {
-	struct rcu_state *rsp = NULL;
-
 	switch (test_type) {
 	case RCU_FLAVOR:
 	case RCU_BH_FLAVOR:
 	case RCU_SCHED_FLAVOR:
-		rsp = &rcu_state;
+		*flags = READ_ONCE(rcu_state.gp_flags);
+		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
 		break;
 	default:
 		break;
 	}
-	if (rsp == NULL)
-		return;
-	*flags = READ_ONCE(rsp->gp_flags);
-	*gp_seq = rcu_seq_current(&rsp->gp_seq);
 }
 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 45/52] rcu: Restructure rcu_check_gp_kthread_starvation()
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (43 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 44/52] rcu: Simplify rcutorture_get_gp_data() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 46/52] rcu: Eliminate stall-warning use of rsp Paul E. McKenney
                   ` (7 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

This commit removes the rsp and gpa local variables, repurposes the j
local variable and adds a gpk (GP kthread) local to improve readability.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 25 +++++++++++--------------
 1 file changed, 11 insertions(+), 14 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0eccfd02e0b3..488ac7a36835 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1224,24 +1224,21 @@ static const char *gp_state_getname(short gs)
  */
 static void rcu_check_gp_kthread_starvation(void)
 {
-	unsigned long gpa;
+	struct task_struct *gpk = rcu_state.gp_kthread;
 	unsigned long j;
-	struct rcu_state *rsp = &rcu_state;
 
-	j = jiffies;
-	gpa = READ_ONCE(rsp->gp_activity);
-	if (j - gpa > 2 * HZ) {
+	j = jiffies - READ_ONCE(rcu_state.gp_activity);
+	if (j > 2 * HZ) {
 		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
-		       rsp->name, j - gpa,
-		       (long)rcu_seq_current(&rsp->gp_seq),
-		       rsp->gp_flags,
-		       gp_state_getname(rsp->gp_state), rsp->gp_state,
-		       rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
-		       rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
-		if (rsp->gp_kthread) {
+		       rcu_state.name, j,
+		       (long)rcu_seq_current(&rcu_state.gp_seq),
+		       rcu_state.gp_flags,
+		       gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
+		       gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
+		if (gpk) {
 			pr_err("RCU grace-period kthread stack dump:\n");
-			sched_show_task(rsp->gp_kthread);
-			wake_up_process(rsp->gp_kthread);
+			sched_show_task(gpk);
+			wake_up_process(gpk);
 		}
 	}
 }
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 46/52] rcu: Eliminate stall-warning use of rsp
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (44 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 45/52] rcu: Restructure rcu_check_gp_kthread_starvation() Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 47/52] rcu: Eliminate grace-period management code " Paul E. McKenney
                   ` (6 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Now that there is only one rcu_state structure, there is less point
in maintaining a pointer to it.  This commit therefore replaces rsp
with &rcu_state in print_other_cpu_stall(), print_cpu_stall(), and
check_cpu_stall().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 80 +++++++++++++++++++++++------------------------
 1 file changed, 39 insertions(+), 41 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 488ac7a36835..ccef04bf1636 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1272,17 +1272,17 @@ static void rcu_dump_cpu_stacks(void)
 static void rcu_stall_kick_kthreads(void)
 {
 	unsigned long j;
-	struct rcu_state *rsp = &rcu_state;
 
 	if (!rcu_kick_kthreads)
 		return;
-	j = READ_ONCE(rsp->jiffies_kick_kthreads);
-	if (time_after(jiffies, j) && rsp->gp_kthread &&
-	    (rcu_gp_in_progress() || READ_ONCE(rsp->gp_flags))) {
-		WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
+	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
+	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
+	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
+		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
+			  rcu_state.name);
 		rcu_ftrace_dump(DUMP_ALL);
-		wake_up_process(rsp->gp_kthread);
-		WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
+		wake_up_process(rcu_state.gp_kthread);
+		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
 	}
 }
 
@@ -1300,7 +1300,6 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 	unsigned long j;
 	int ndetected = 0;
 	struct rcu_node *rnp = rcu_get_root();
-	struct rcu_state *rsp = &rcu_state;
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
@@ -1313,7 +1312,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
 	 * RCU CPU stall warnings.
 	 */
-	pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
+	pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name);
 	print_cpu_stall_info_begin();
 	rcu_for_each_leaf_node(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1333,21 +1332,21 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 		totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
 							    cpu)->cblist);
 	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
-	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
-	       (long)rcu_seq_current(&rsp->gp_seq), totqlen);
+	       smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
+	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
 	if (ndetected) {
 		rcu_dump_cpu_stacks();
 
 		/* Complain about tasks blocking the grace period. */
 		rcu_print_detail_task_stall();
 	} else {
-		if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
+		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
 			pr_err("INFO: Stall ended before state dump start\n");
 		} else {
 			j = jiffies;
-			gpa = READ_ONCE(rsp->gp_activity);
+			gpa = READ_ONCE(rcu_state.gp_activity);
 			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
-			       rsp->name, j - gpa, j, gpa,
+			       rcu_state.name, j - gpa, j, gpa,
 			       jiffies_till_next_fqs,
 			       rcu_get_root()->qsmask);
 			/* In this case, the current CPU might be at fault. */
@@ -1355,8 +1354,8 @@ static void print_other_cpu_stall(unsigned long gp_seq)
 		}
 	}
 	/* Rewrite if needed in case of slow consoles. */
-	if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
-		WRITE_ONCE(rsp->jiffies_stall,
+	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
+		WRITE_ONCE(rcu_state.jiffies_stall,
 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 
 	rcu_check_gp_kthread_starvation();
@@ -1372,7 +1371,6 @@ static void print_cpu_stall(void)
 	unsigned long flags;
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 	struct rcu_node *rnp = rcu_get_root();
-	struct rcu_state *rsp = &rcu_state;
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
@@ -1385,7 +1383,7 @@ static void print_cpu_stall(void)
 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
 	 * RCU CPU stall warnings.
 	 */
-	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
+	pr_err("INFO: %s self-detected stall on CPU", rcu_state.name);
 	print_cpu_stall_info_begin();
 	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
 	print_cpu_stall_info(smp_processor_id());
@@ -1395,8 +1393,8 @@ static void print_cpu_stall(void)
 		totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
 							    cpu)->cblist);
 	pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
-		jiffies - rsp->gp_start,
-		(long)rcu_seq_current(&rsp->gp_seq), totqlen);
+		jiffies - rcu_state.gp_start,
+		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
 
 	rcu_check_gp_kthread_starvation();
 
@@ -1404,8 +1402,8 @@ static void print_cpu_stall(void)
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	/* Rewrite if needed in case of slow consoles. */
-	if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
-		WRITE_ONCE(rsp->jiffies_stall,
+	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
+		WRITE_ONCE(rcu_state.jiffies_stall,
 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
@@ -1430,7 +1428,6 @@ static void check_cpu_stall(struct rcu_data *rdp)
 	unsigned long jn;
 	unsigned long js;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp = &rcu_state;
 
 	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
 	    !rcu_gp_in_progress())
@@ -1441,27 +1438,28 @@ static void check_cpu_stall(struct rcu_data *rdp)
 	/*
 	 * Lots of memory barriers to reject false positives.
 	 *
-	 * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
-	 * then rsp->gp_start, and finally another copy of rsp->gp_seq.
-	 * These values are updated in the opposite order with memory
-	 * barriers (or equivalent) during grace-period initialization
-	 * and cleanup.  Now, a false positive can occur if we get an new
-	 * value of rsp->gp_start and a old value of rsp->jiffies_stall.
-	 * But given the memory barriers, the only way that this can happen
-	 * is if one grace period ends and another starts between these
-	 * two fetches.  This is detected by comparing the second fetch
-	 * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
+	 * The idea is to pick up rcu_state.gp_seq, then
+	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
+	 * another copy of rcu_state.gp_seq.  These values are updated in
+	 * the opposite order with memory barriers (or equivalent) during
+	 * grace-period initialization and cleanup.  Now, a false positive
+	 * can occur if we get an new value of rcu_state.gp_start and a old
+	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
+	 * the only way that this can happen is if one grace period ends
+	 * and another starts between these two fetches.  This is detected
+	 * by comparing the second fetch of rcu_state.gp_seq with the
+	 * previous fetch from rcu_state.gp_seq.
 	 *
-	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
-	 * and rsp->gp_start suffice to forestall false positives.
+	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
+	 * and rcu_state.gp_start suffice to forestall false positives.
 	 */
-	gs1 = READ_ONCE(rsp->gp_seq);
+	gs1 = READ_ONCE(rcu_state.gp_seq);
 	smp_rmb(); /* Pick up ->gp_seq first... */
-	js = READ_ONCE(rsp->jiffies_stall);
+	js = READ_ONCE(rcu_state.jiffies_stall);
 	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
-	gps = READ_ONCE(rsp->gp_start);
+	gps = READ_ONCE(rcu_state.gp_start);
 	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
-	gs2 = READ_ONCE(rsp->gp_seq);
+	gs2 = READ_ONCE(rcu_state.gp_seq);
 	if (gs1 != gs2 ||
 	    ULONG_CMP_LT(j, js) ||
 	    ULONG_CMP_GE(gps, js))
@@ -1470,14 +1468,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
 	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
 	if (rcu_gp_in_progress() &&
 	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
-	    cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
+	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 
 		/* We haven't checked in, so go dump stack. */
 		print_cpu_stall();
 
 	} else if (rcu_gp_in_progress() &&
 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
-		   cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
+		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 
 		/* They had a few time units to dump stack, so complain. */
 		print_other_cpu_stall(gs2);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 47/52] rcu: Eliminate grace-period management code use of rsp
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (45 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 46/52] rcu: Eliminate stall-warning use of rsp Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 48/52] rcu: Eliminate callback-invocation/invocation " Paul E. McKenney
                   ` (5 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Now that there is only one rcu_state structure, there is less point
in maintaining a pointer to it.  This commit therefore replaces
rsp with &rcu_state in rcu_start_this_gp(), rcu_accelerate_cbs(),
__note_gp_changes(), rcu_gp_init(), rcu_gp_fqs(), rcu_gp_cleanup(),
rcu_gp_kthread(), and rcu_report_qs_rsp().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 168 ++++++++++++++++++++++------------------------
 1 file changed, 82 insertions(+), 86 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index ccef04bf1636..734cd95368e6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1524,7 +1524,6 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
 			      unsigned long gp_seq_req)
 {
 	bool ret = false;
-	struct rcu_state *rsp = &rcu_state;
 	struct rcu_node *rnp;
 
 	/*
@@ -1573,13 +1572,13 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
 		goto unlock_out;
 	}
 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
-	WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
-	rsp->gp_req_activity = jiffies;
-	if (!rsp->gp_kthread) {
+	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
+	rcu_state.gp_req_activity = jiffies;
+	if (!rcu_state.gp_kthread) {
 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
 		goto unlock_out;
 	}
-	trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
+	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq"));
 	ret = true;  /* Caller must wake GP kthread. */
 unlock_out:
 	/* Push furthest requested GP to leaf node and rcu_data structure. */
@@ -1641,7 +1640,6 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	unsigned long gp_seq_req;
 	bool ret = false;
-	struct rcu_state *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -1659,15 +1657,15 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
 	 * accelerating callback invocation to an earlier grace-period
 	 * number.
 	 */
-	gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
+	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
 
 	/* Trace depending on how much we were able to accelerate. */
 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
-		trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
+		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB"));
 	else
-		trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
+		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB"));
 	return ret;
 }
 
@@ -1736,7 +1734,6 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	bool ret;
 	bool need_gp;
-	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -1747,7 +1744,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
 	    unlikely(READ_ONCE(rdp->gpwrap))) {
 		ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */
-		trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
+		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
 	} else {
 		ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */
 	}
@@ -1760,7 +1757,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
 		 * set up to detect a quiescent state, otherwise don't
 		 * go looking for one.
 		 */
-		trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
+		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
 		need_gp = !!(rnp->qsmask & rdp->grpmask);
 		rdp->cpu_no_qs.b.norm = need_gp;
 		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
@@ -1813,16 +1810,15 @@ static bool rcu_gp_init(void)
 	unsigned long mask;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp = rcu_get_root();
-	struct rcu_state *rsp = &rcu_state;
 
-	WRITE_ONCE(rsp->gp_activity, jiffies);
+	WRITE_ONCE(rcu_state.gp_activity, jiffies);
 	raw_spin_lock_irq_rcu_node(rnp);
-	if (!READ_ONCE(rsp->gp_flags)) {
+	if (!READ_ONCE(rcu_state.gp_flags)) {
 		/* Spurious wakeup, tell caller to go back to sleep.  */
 		raw_spin_unlock_irq_rcu_node(rnp);
 		return false;
 	}
-	WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
+	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
 
 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
 		/*
@@ -1836,8 +1832,8 @@ static bool rcu_gp_init(void)
 	/* Advance to a new grace period and initialize state. */
 	record_gp_stall_check_time();
 	/* Record GP times before starting GP, hence rcu_seq_start(). */
-	rcu_seq_start(&rsp->gp_seq);
-	trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
+	rcu_seq_start(&rcu_state.gp_seq);
+	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
 	raw_spin_unlock_irq_rcu_node(rnp);
 
 	/*
@@ -1846,15 +1842,15 @@ static bool rcu_gp_init(void)
 	 * for subsequent online CPUs, and that quiescent-state forcing
 	 * will handle subsequent offline CPUs.
 	 */
-	rsp->gp_state = RCU_GP_ONOFF;
+	rcu_state.gp_state = RCU_GP_ONOFF;
 	rcu_for_each_leaf_node(rnp) {
-		spin_lock(&rsp->ofl_lock);
+		spin_lock(&rcu_state.ofl_lock);
 		raw_spin_lock_irq_rcu_node(rnp);
 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
 		    !rnp->wait_blkd_tasks) {
 			/* Nothing to do on this leaf rcu_node structure. */
 			raw_spin_unlock_irq_rcu_node(rnp);
-			spin_unlock(&rsp->ofl_lock);
+			spin_unlock(&rcu_state.ofl_lock);
 			continue;
 		}
 
@@ -1890,34 +1886,34 @@ static bool rcu_gp_init(void)
 		}
 
 		raw_spin_unlock_irq_rcu_node(rnp);
-		spin_unlock(&rsp->ofl_lock);
+		spin_unlock(&rcu_state.ofl_lock);
 	}
 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
 
 	/*
 	 * Set the quiescent-state-needed bits in all the rcu_node
-	 * structures for all currently online CPUs in breadth-first order,
-	 * starting from the root rcu_node structure, relying on the layout
-	 * of the tree within the rsp->node[] array.  Note that other CPUs
-	 * will access only the leaves of the hierarchy, thus seeing that no
-	 * grace period is in progress, at least until the corresponding
-	 * leaf node has been initialized.
+	 * structures for all currently online CPUs in breadth-first
+	 * order, starting from the root rcu_node structure, relying on the
+	 * layout of the tree within the rcu_state.node[] array.  Note that
+	 * other CPUs will access only the leaves of the hierarchy, thus
+	 * seeing that no grace period is in progress, at least until the
+	 * corresponding leaf node has been initialized.
 	 *
 	 * The grace period cannot complete until the initialization
 	 * process finishes, because this kthread handles both.
 	 */
-	rsp->gp_state = RCU_GP_INIT;
+	rcu_state.gp_state = RCU_GP_INIT;
 	rcu_for_each_node_breadth_first(rnp) {
 		rcu_gp_slow(gp_init_delay);
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		rdp = this_cpu_ptr(&rcu_data);
 		rcu_preempt_check_blocked_tasks(rnp);
 		rnp->qsmask = rnp->qsmaskinit;
-		WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
+		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
 		if (rnp == rdp->mynode)
 			(void)__note_gp_changes(rnp, rdp);
 		rcu_preempt_boost_start_gp(rnp);
-		trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
+		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
 					    rnp->level, rnp->grplo,
 					    rnp->grphi, rnp->qsmask);
 		/* Quiescent states for tasks on any now-offline CPUs. */
@@ -1928,7 +1924,7 @@ static bool rcu_gp_init(void)
 		else
 			raw_spin_unlock_irq_rcu_node(rnp);
 		cond_resched_tasks_rcu_qs();
-		WRITE_ONCE(rsp->gp_activity, jiffies);
+		WRITE_ONCE(rcu_state.gp_activity, jiffies);
 	}
 
 	return true;
@@ -1960,10 +1956,9 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
 static void rcu_gp_fqs(bool first_time)
 {
 	struct rcu_node *rnp = rcu_get_root();
-	struct rcu_state *rsp = &rcu_state;
 
-	WRITE_ONCE(rsp->gp_activity, jiffies);
-	rsp->n_force_qs++;
+	WRITE_ONCE(rcu_state.gp_activity, jiffies);
+	rcu_state.n_force_qs++;
 	if (first_time) {
 		/* Collect dyntick-idle snapshots. */
 		force_qs_rnp(dyntick_save_progress_counter);
@@ -1972,10 +1967,10 @@ static void rcu_gp_fqs(bool first_time)
 		force_qs_rnp(rcu_implicit_dynticks_qs);
 	}
 	/* Clear flag to prevent immediate re-entry. */
-	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
 		raw_spin_lock_irq_rcu_node(rnp);
-		WRITE_ONCE(rsp->gp_flags,
-			   READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
+		WRITE_ONCE(rcu_state.gp_flags,
+			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
 		raw_spin_unlock_irq_rcu_node(rnp);
 	}
 }
@@ -1990,14 +1985,13 @@ static void rcu_gp_cleanup(void)
 	unsigned long new_gp_seq;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp = rcu_get_root();
-	struct rcu_state *rsp = &rcu_state;
 	struct swait_queue_head *sq;
 
-	WRITE_ONCE(rsp->gp_activity, jiffies);
+	WRITE_ONCE(rcu_state.gp_activity, jiffies);
 	raw_spin_lock_irq_rcu_node(rnp);
-	gp_duration = jiffies - rsp->gp_start;
-	if (gp_duration > rsp->gp_max)
-		rsp->gp_max = gp_duration;
+	gp_duration = jiffies - rcu_state.gp_start;
+	if (gp_duration > rcu_state.gp_max)
+		rcu_state.gp_max = gp_duration;
 
 	/*
 	 * We know the grace period is complete, but to everyone else
@@ -2018,7 +2012,7 @@ static void rcu_gp_cleanup(void)
 	 * the rcu_node structures before the beginning of the next grace
 	 * period is recorded in any of the rcu_node structures.
 	 */
-	new_gp_seq = rsp->gp_seq;
+	new_gp_seq = rcu_state.gp_seq;
 	rcu_seq_end(&new_gp_seq);
 	rcu_for_each_node_breadth_first(rnp) {
 		raw_spin_lock_irq_rcu_node(rnp);
@@ -2035,16 +2029,16 @@ static void rcu_gp_cleanup(void)
 		raw_spin_unlock_irq_rcu_node(rnp);
 		rcu_nocb_gp_cleanup(sq);
 		cond_resched_tasks_rcu_qs();
-		WRITE_ONCE(rsp->gp_activity, jiffies);
+		WRITE_ONCE(rcu_state.gp_activity, jiffies);
 		rcu_gp_slow(gp_cleanup_delay);
 	}
 	rnp = rcu_get_root();
-	raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
+	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
 
 	/* Declare grace period done. */
-	rcu_seq_end(&rsp->gp_seq);
-	trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
-	rsp->gp_state = RCU_GP_IDLE;
+	rcu_seq_end(&rcu_state.gp_seq);
+	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
+	rcu_state.gp_state = RCU_GP_IDLE;
 	/* Check for GP requests since above loop. */
 	rdp = this_cpu_ptr(&rcu_data);
 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
@@ -2054,12 +2048,14 @@ static void rcu_gp_cleanup(void)
 	}
 	/* Advance CBs to reduce false positives below. */
 	if (!rcu_accelerate_cbs(rnp, rdp) && needgp) {
-		WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
-		rsp->gp_req_activity = jiffies;
-		trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
+		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
+		rcu_state.gp_req_activity = jiffies;
+		trace_rcu_grace_period(rcu_state.name,
+				       READ_ONCE(rcu_state.gp_seq),
 				       TPS("newreq"));
 	} else {
-		WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
+		WRITE_ONCE(rcu_state.gp_flags,
+			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
 	}
 	raw_spin_unlock_irq_rcu_node(rnp);
 }
@@ -2073,7 +2069,6 @@ static int __noreturn rcu_gp_kthread(void *unused)
 	int gf;
 	unsigned long j;
 	int ret;
-	struct rcu_state *rsp = &rcu_state;
 	struct rcu_node *rnp = rcu_get_root();
 
 	rcu_bind_gp_kthread();
@@ -2081,21 +2076,22 @@ static int __noreturn rcu_gp_kthread(void *unused)
 
 		/* Handle grace-period start. */
 		for (;;) {
-			trace_rcu_grace_period(rsp->name,
-					       READ_ONCE(rsp->gp_seq),
+			trace_rcu_grace_period(rcu_state.name,
+					       READ_ONCE(rcu_state.gp_seq),
 					       TPS("reqwait"));
-			rsp->gp_state = RCU_GP_WAIT_GPS;
-			swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
-						     RCU_GP_FLAG_INIT);
-			rsp->gp_state = RCU_GP_DONE_GPS;
+			rcu_state.gp_state = RCU_GP_WAIT_GPS;
+			swait_event_idle_exclusive(rcu_state.gp_wq,
+					 READ_ONCE(rcu_state.gp_flags) &
+					 RCU_GP_FLAG_INIT);
+			rcu_state.gp_state = RCU_GP_DONE_GPS;
 			/* Locking provides needed memory barrier. */
 			if (rcu_gp_init())
 				break;
 			cond_resched_tasks_rcu_qs();
-			WRITE_ONCE(rsp->gp_activity, jiffies);
+			WRITE_ONCE(rcu_state.gp_activity, jiffies);
 			WARN_ON(signal_pending(current));
-			trace_rcu_grace_period(rsp->name,
-					       READ_ONCE(rsp->gp_seq),
+			trace_rcu_grace_period(rcu_state.name,
+					       READ_ONCE(rcu_state.gp_seq),
 					       TPS("reqwaitsig"));
 		}
 
@@ -2105,58 +2101,59 @@ static int __noreturn rcu_gp_kthread(void *unused)
 		ret = 0;
 		for (;;) {
 			if (!ret) {
-				rsp->jiffies_force_qs = jiffies + j;
-				WRITE_ONCE(rsp->jiffies_kick_kthreads,
+				rcu_state.jiffies_force_qs = jiffies + j;
+				WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
 					   jiffies + 3 * j);
 			}
-			trace_rcu_grace_period(rsp->name,
-					       READ_ONCE(rsp->gp_seq),
+			trace_rcu_grace_period(rcu_state.name,
+					       READ_ONCE(rcu_state.gp_seq),
 					       TPS("fqswait"));
-			rsp->gp_state = RCU_GP_WAIT_FQS;
-			ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
+			rcu_state.gp_state = RCU_GP_WAIT_FQS;
+			ret = swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
 					rcu_gp_fqs_check_wake(&gf), j);
-			rsp->gp_state = RCU_GP_DOING_FQS;
+			rcu_state.gp_state = RCU_GP_DOING_FQS;
 			/* Locking provides needed memory barriers. */
 			/* If grace period done, leave loop. */
 			if (!READ_ONCE(rnp->qsmask) &&
 			    !rcu_preempt_blocked_readers_cgp(rnp))
 				break;
 			/* If time for quiescent-state forcing, do it. */
-			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
+			if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) ||
 			    (gf & RCU_GP_FLAG_FQS)) {
-				trace_rcu_grace_period(rsp->name,
-						       READ_ONCE(rsp->gp_seq),
+				trace_rcu_grace_period(rcu_state.name,
+						       READ_ONCE(rcu_state.gp_seq),
 						       TPS("fqsstart"));
 				rcu_gp_fqs(first_gp_fqs);
 				first_gp_fqs = false;
-				trace_rcu_grace_period(rsp->name,
-						       READ_ONCE(rsp->gp_seq),
+				trace_rcu_grace_period(rcu_state.name,
+						       READ_ONCE(rcu_state.gp_seq),
 						       TPS("fqsend"));
 				cond_resched_tasks_rcu_qs();
-				WRITE_ONCE(rsp->gp_activity, jiffies);
+				WRITE_ONCE(rcu_state.gp_activity, jiffies);
 				ret = 0; /* Force full wait till next FQS. */
 				j = jiffies_till_next_fqs;
 			} else {
 				/* Deal with stray signal. */
 				cond_resched_tasks_rcu_qs();
-				WRITE_ONCE(rsp->gp_activity, jiffies);
+				WRITE_ONCE(rcu_state.gp_activity, jiffies);
 				WARN_ON(signal_pending(current));
-				trace_rcu_grace_period(rsp->name,
-						       READ_ONCE(rsp->gp_seq),
+				trace_rcu_grace_period(rcu_state.name,
+						       READ_ONCE(rcu_state.gp_seq),
 						       TPS("fqswaitsig"));
 				ret = 1; /* Keep old FQS timing. */
 				j = jiffies;
-				if (time_after(jiffies, rsp->jiffies_force_qs))
+				if (time_after(jiffies,
+					       rcu_state.jiffies_force_qs))
 					j = 1;
 				else
-					j = rsp->jiffies_force_qs - j;
+					j = rcu_state.jiffies_force_qs - j;
 			}
 		}
 
 		/* Handle grace-period end. */
-		rsp->gp_state = RCU_GP_CLEANUP;
+		rcu_state.gp_state = RCU_GP_CLEANUP;
 		rcu_gp_cleanup();
-		rsp->gp_state = RCU_GP_CLEANED;
+		rcu_state.gp_state = RCU_GP_CLEANED;
 	}
 }
 
@@ -2172,11 +2169,10 @@ static int __noreturn rcu_gp_kthread(void *unused)
 static void rcu_report_qs_rsp(unsigned long flags)
 	__releases(rcu_get_root()->lock)
 {
-	struct rcu_state *rsp = &rcu_state;
-
 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
 	WARN_ON_ONCE(!rcu_gp_in_progress());
-	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
+	WRITE_ONCE(rcu_state.gp_flags,
+		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
 	rcu_gp_kthread_wake();
 }
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 48/52] rcu: Eliminate callback-invocation/invocation use of rsp
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (46 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 47/52] rcu: Eliminate grace-period management code " Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 49/52] rcu: Eliminate quiescent-state and grace-period-nonstart " Paul E. McKenney
                   ` (4 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Now that there is only one rcu_state structure, there is less point in
maintaining a pointer to it.  This commit therefore replaces rsp with
&rcu_state in rcu_do_batch(), invoke_rcu_callbacks(), and __call_rcu().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 24 +++++++++++-------------
 1 file changed, 11 insertions(+), 13 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 734cd95368e6..e6f0d8b6bde3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2466,14 +2466,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
 	struct rcu_head *rhp;
 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
 	long bl, count;
-	struct rcu_state *rsp = &rcu_state;
 
 	/* If no callbacks are ready, just return. */
 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
-		trace_rcu_batch_start(rsp->name,
+		trace_rcu_batch_start(rcu_state.name,
 				      rcu_segcblist_n_lazy_cbs(&rdp->cblist),
 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
-		trace_rcu_batch_end(rsp->name, 0,
+		trace_rcu_batch_end(rcu_state.name, 0,
 				    !rcu_segcblist_empty(&rdp->cblist),
 				    need_resched(), is_idle_task(current),
 				    rcu_is_callbacks_kthread());
@@ -2488,7 +2487,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
 	local_irq_save(flags);
 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
 	bl = rdp->blimit;
-	trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
+	trace_rcu_batch_start(rcu_state.name,
+			      rcu_segcblist_n_lazy_cbs(&rdp->cblist),
 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
 	local_irq_restore(flags);
@@ -2497,7 +2497,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
 	rhp = rcu_cblist_dequeue(&rcl);
 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
 		debug_rcu_head_unqueue(rhp);
-		if (__rcu_reclaim(rsp->name, rhp))
+		if (__rcu_reclaim(rcu_state.name, rhp))
 			rcu_cblist_dequeued_lazy(&rcl);
 		/*
 		 * Stop only if limit reached and CPU has something to do.
@@ -2511,7 +2511,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
 
 	local_irq_save(flags);
 	count = -rcl.len;
-	trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
+	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
 			    is_idle_task(current), rcu_is_callbacks_kthread());
 
 	/* Update counts and requeue any remaining callbacks. */
@@ -2527,7 +2527,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
 		rdp->qlen_last_fqs_check = 0;
-		rdp->n_force_qs_snap = rsp->n_force_qs;
+		rdp->n_force_qs_snap = rcu_state.n_force_qs;
 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
 		rdp->qlen_last_fqs_check = count;
 
@@ -2763,11 +2763,9 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
  */
 static void invoke_rcu_callbacks(struct rcu_data *rdp)
 {
-	struct rcu_state *rsp = &rcu_state;
-
 	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
 		return;
-	if (likely(!rsp->boost)) {
+	if (likely(!rcu_state.boost)) {
 		rcu_do_batch(rdp);
 		return;
 	}
@@ -2843,7 +2841,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
 {
 	unsigned long flags;
 	struct rcu_data *rdp;
-	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	/* Misaligned rcu_head! */
 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
@@ -2892,11 +2889,12 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
 		rcu_idle_count_callbacks_posted();
 
 	if (__is_kfree_rcu_offset((unsigned long)func))
-		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
+		trace_rcu_kfree_callback(rcu_state.name, head,
+					 (unsigned long)func,
 					 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
 					 rcu_segcblist_n_cbs(&rdp->cblist));
 	else
-		trace_rcu_callback(rsp->name, head,
+		trace_rcu_callback(rcu_state.name, head,
 				   rcu_segcblist_n_lazy_cbs(&rdp->cblist),
 				   rcu_segcblist_n_cbs(&rdp->cblist));
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 49/52] rcu: Eliminate quiescent-state and grace-period-nonstart use of rsp
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (47 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 48/52] rcu: Eliminate callback-invocation/invocation " Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 50/52] rcu: Eliminate RCU-barrier " Paul E. McKenney
                   ` (3 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Now that there is only one rcu_state structure, there is less point in
maintaining a pointer to it.  This commit therefore replaces rsp with
&rcu_state in rcu_report_qs_rnp(), force_quiescent_state(), and
rcu_check_gp_start_stall().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 32 +++++++++++++++-----------------
 1 file changed, 15 insertions(+), 17 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e6f0d8b6bde3..962b75c9722a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2197,7 +2197,6 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
 {
 	unsigned long oldmask = 0;
 	struct rcu_node *rnp_c;
-	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -2216,7 +2215,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
 			     rcu_preempt_blocked_readers_cgp(rnp));
 		rnp->qsmask &= ~mask;
-		trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
+		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
 						 mask, rnp->qsmask, rnp->level,
 						 rnp->grplo, rnp->grphi,
 						 !!rnp->gp_tasks);
@@ -2623,12 +2622,11 @@ static void force_quiescent_state(void)
 	bool ret;
 	struct rcu_node *rnp;
 	struct rcu_node *rnp_old = NULL;
-	struct rcu_state *rsp = &rcu_state;
 
 	/* Funnel through hierarchy to reduce memory contention. */
 	rnp = __this_cpu_read(rcu_data.mynode);
 	for (; rnp != NULL; rnp = rnp->parent) {
-		ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
+		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
 		      !raw_spin_trylock(&rnp->fqslock);
 		if (rnp_old != NULL)
 			raw_spin_unlock(&rnp_old->fqslock);
@@ -2641,11 +2639,12 @@ static void force_quiescent_state(void)
 	/* Reached the root of the rcu_node tree, acquire lock. */
 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
 	raw_spin_unlock(&rnp_old->fqslock);
-	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
 		return;  /* Someone beat us to it. */
 	}
-	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
+	WRITE_ONCE(rcu_state.gp_flags,
+		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
 	rcu_gp_kthread_wake();
 }
@@ -2661,15 +2660,14 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
 	unsigned long flags;
 	unsigned long j;
 	struct rcu_node *rnp_root = rcu_get_root();
-	struct rcu_state *rsp = &rcu_state;
 	static atomic_t warned = ATOMIC_INIT(0);
 
 	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
 		return;
 	j = jiffies; /* Expensive access, and in common case don't get here. */
-	if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
-	    time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
+	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 	    atomic_read(&warned))
 		return;
 
@@ -2677,8 +2675,8 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
 	j = jiffies;
 	if (rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
-	    time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
-	    time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
+	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
 	    atomic_read(&warned)) {
 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;
@@ -2690,19 +2688,19 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
 	j = jiffies;
 	if (rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
-	    time_before(j, rsp->gp_req_activity + gpssdelay) ||
-	    time_before(j, rsp->gp_activity + gpssdelay) ||
+	    time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
+	    time_before(j, rcu_state.gp_activity + gpssdelay) ||
 	    atomic_xchg(&warned, 1)) {
 		raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;
 	}
 	pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
-		 __func__, (long)READ_ONCE(rsp->gp_seq),
+		 __func__, (long)READ_ONCE(rcu_state.gp_seq),
 		 (long)READ_ONCE(rnp_root->gp_seq_needed),
-		 j - rsp->gp_req_activity, j - rsp->gp_activity,
-		 rsp->gp_flags, rsp->gp_state, rsp->name,
-		 rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
+		 j - rcu_state.gp_req_activity, j - rcu_state.gp_activity,
+		 rcu_state.gp_flags, rcu_state.gp_state, rcu_state.name,
+		 rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL);
 	WARN_ON(1);
 	if (rnp_root != rnp)
 		raw_spin_unlock_rcu_node(rnp_root);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 50/52] rcu: Eliminate RCU-barrier use of rsp
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (48 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 49/52] rcu: Eliminate quiescent-state and grace-period-nonstart " Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 51/52] rcu: Eliminate initialization-time " Paul E. McKenney
                   ` (2 subsequent siblings)
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Now that there is only one rcu_state structure, there is less point
in maintaining a pointer to it.  This commit therefore replaces rsp
with &rcu_state in rcu_barrier_callback(), rcu_barrier_func(), and
_rcu_barrier().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 65 +++++++++++++++++++++++------------------------
 1 file changed, 32 insertions(+), 33 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 962b75c9722a..e573fb9f0ef8 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3128,32 +3128,31 @@ static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done)
  */
 static void rcu_barrier_callback(struct rcu_head *rhp)
 {
-	struct rcu_state *rsp = &rcu_state;
-
-	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
-		_rcu_barrier_trace(TPS("LastCB"), -1, rsp->barrier_sequence);
-		complete(&rsp->barrier_completion);
+	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
+		_rcu_barrier_trace(TPS("LastCB"), -1,
+				   rcu_state.barrier_sequence);
+		complete(&rcu_state.barrier_completion);
 	} else {
-		_rcu_barrier_trace(TPS("CB"), -1, rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
 	}
 }
 
 /*
  * Called with preemption disabled, and from cross-cpu IRQ context.
  */
-static void rcu_barrier_func(void *type)
+static void rcu_barrier_func(void *unused)
 {
-	struct rcu_state *rsp = type;
 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
 
-	_rcu_barrier_trace(TPS("IRQ"), -1, rsp->barrier_sequence);
+	_rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
 	rdp->barrier_head.func = rcu_barrier_callback;
 	debug_rcu_head_queue(&rdp->barrier_head);
 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
-		atomic_inc(&rsp->barrier_cpu_count);
+		atomic_inc(&rcu_state.barrier_cpu_count);
 	} else {
 		debug_rcu_head_unqueue(&rdp->barrier_head);
-		_rcu_barrier_trace(TPS("IRQNQ"), -1, rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("IRQNQ"), -1,
+				   rcu_state.barrier_sequence);
 	}
 }
 
@@ -3165,25 +3164,25 @@ static void _rcu_barrier(void)
 {
 	int cpu;
 	struct rcu_data *rdp;
-	struct rcu_state *rsp = &rcu_state;
-	unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
+	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
 
 	_rcu_barrier_trace(TPS("Begin"), -1, s);
 
 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
-	mutex_lock(&rsp->barrier_mutex);
+	mutex_lock(&rcu_state.barrier_mutex);
 
 	/* Did someone else do our work for us? */
-	if (rcu_seq_done(&rsp->barrier_sequence, s)) {
-		_rcu_barrier_trace(TPS("EarlyExit"), -1, rsp->barrier_sequence);
+	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
+		_rcu_barrier_trace(TPS("EarlyExit"), -1,
+				   rcu_state.barrier_sequence);
 		smp_mb(); /* caller's subsequent code after above check. */
-		mutex_unlock(&rsp->barrier_mutex);
+		mutex_unlock(&rcu_state.barrier_mutex);
 		return;
 	}
 
 	/* Mark the start of the barrier operation. */
-	rcu_seq_start(&rsp->barrier_sequence);
-	_rcu_barrier_trace(TPS("Inc1"), -1, rsp->barrier_sequence);
+	rcu_seq_start(&rcu_state.barrier_sequence);
+	_rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
 
 	/*
 	 * Initialize the count to one rather than to zero in order to
@@ -3191,8 +3190,8 @@ static void _rcu_barrier(void)
 	 * (or preemption of this task).  Exclude CPU-hotplug operations
 	 * to ensure that no offline CPU has callbacks queued.
 	 */
-	init_completion(&rsp->barrier_completion);
-	atomic_set(&rsp->barrier_cpu_count, 1);
+	init_completion(&rcu_state.barrier_completion);
+	atomic_set(&rcu_state.barrier_cpu_count, 1);
 	get_online_cpus();
 
 	/*
@@ -3207,22 +3206,22 @@ static void _rcu_barrier(void)
 		if (rcu_is_nocb_cpu(cpu)) {
 			if (!rcu_nocb_cpu_needs_barrier(cpu)) {
 				_rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
-						   rsp->barrier_sequence);
+						   rcu_state.barrier_sequence);
 			} else {
 				_rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
-						   rsp->barrier_sequence);
+						   rcu_state.barrier_sequence);
 				smp_mb__before_atomic();
-				atomic_inc(&rsp->barrier_cpu_count);
+				atomic_inc(&rcu_state.barrier_cpu_count);
 				__call_rcu(&rdp->barrier_head,
 					   rcu_barrier_callback, cpu, 0);
 			}
 		} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
 			_rcu_barrier_trace(TPS("OnlineQ"), cpu,
-					   rsp->barrier_sequence);
-			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
+					   rcu_state.barrier_sequence);
+			smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
 		} else {
 			_rcu_barrier_trace(TPS("OnlineNQ"), cpu,
-					   rsp->barrier_sequence);
+					   rcu_state.barrier_sequence);
 		}
 	}
 	put_online_cpus();
@@ -3231,18 +3230,18 @@ static void _rcu_barrier(void)
 	 * Now that we have an rcu_barrier_callback() callback on each
 	 * CPU, and thus each counted, remove the initial count.
 	 */
-	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
-		complete(&rsp->barrier_completion);
+	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count))
+		complete(&rcu_state.barrier_completion);
 
 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
-	wait_for_completion(&rsp->barrier_completion);
+	wait_for_completion(&rcu_state.barrier_completion);
 
 	/* Mark the end of the barrier operation. */
-	_rcu_barrier_trace(TPS("Inc2"), -1, rsp->barrier_sequence);
-	rcu_seq_end(&rsp->barrier_sequence);
+	_rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
+	rcu_seq_end(&rcu_state.barrier_sequence);
 
 	/* Other rcu_barrier() invocations can now safely proceed. */
-	mutex_unlock(&rsp->barrier_mutex);
+	mutex_unlock(&rcu_state.barrier_mutex);
 }
 
 /**
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 51/52] rcu: Eliminate initialization-time use of rsp
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (49 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 50/52] rcu: Eliminate RCU-barrier " Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-29 22:38 ` [PATCH tip/core/rcu 52/52] rcu: Fix typo in force_qs_rnp()'s parameter's parameter Paul E. McKenney
  2018-08-30  2:00 ` [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Steven Rostedt
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Now that there is only one rcu_state structure, there is less point in
maintaining a pointer to it.  This commit therefore replaces rsp with
&rcu_state in rcu_cpu_starting() and rcu_init_one().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e573fb9f0ef8..9c82981d7822 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3454,7 +3454,6 @@ void rcu_cpu_starting(unsigned int cpu)
 	unsigned long oldmask;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp = &rcu_state;
 
 	if (per_cpu(rcu_cpu_started, cpu))
 		return;
@@ -3471,10 +3470,10 @@ void rcu_cpu_starting(unsigned int cpu)
 	oldmask ^= rnp->expmaskinitnext;
 	nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
 	/* Allow lockless access for expedited grace periods. */
-	smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
+	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */
 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
-	rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
-	rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
+	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
+	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
 	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
 		/* Report QS -after- changing ->qsmaskinitnext! */
 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
@@ -3666,7 +3665,6 @@ static void __init rcu_init_one(void)
 	int i;
 	int j;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp = &rcu_state;
 
 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
 
@@ -3677,14 +3675,15 @@ static void __init rcu_init_one(void)
 	/* Initialize the level-tracking arrays. */
 
 	for (i = 1; i < rcu_num_lvls; i++)
-		rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
+		rcu_state.level[i] =
+			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
 	rcu_init_levelspread(levelspread, num_rcu_lvl);
 
 	/* Initialize the elements themselves, starting from the leaves. */
 
 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
 		cpustride *= levelspread[i];
-		rnp = rsp->level[i];
+		rnp = rcu_state.level[i];
 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
@@ -3692,9 +3691,9 @@ static void __init rcu_init_one(void)
 			raw_spin_lock_init(&rnp->fqslock);
 			lockdep_set_class_and_name(&rnp->fqslock,
 						   &rcu_fqs_class[i], fqs[i]);
-			rnp->gp_seq = rsp->gp_seq;
-			rnp->gp_seq_needed = rsp->gp_seq;
-			rnp->completedqs = rsp->gp_seq;
+			rnp->gp_seq = rcu_state.gp_seq;
+			rnp->gp_seq_needed = rcu_state.gp_seq;
+			rnp->completedqs = rcu_state.gp_seq;
 			rnp->qsmask = 0;
 			rnp->qsmaskinit = 0;
 			rnp->grplo = j * cpustride;
@@ -3708,7 +3707,7 @@ static void __init rcu_init_one(void)
 			} else {
 				rnp->grpnum = j % levelspread[i - 1];
 				rnp->grpmask = 1UL << rnp->grpnum;
-				rnp->parent = rsp->level[i - 1] +
+				rnp->parent = rcu_state.level[i - 1] +
 					      j / levelspread[i - 1];
 			}
 			rnp->level = i;
@@ -3722,8 +3721,8 @@ static void __init rcu_init_one(void)
 		}
 	}
 
-	init_swait_queue_head(&rsp->gp_wq);
-	init_swait_queue_head(&rsp->expedited_wq);
+	init_swait_queue_head(&rcu_state.gp_wq);
+	init_swait_queue_head(&rcu_state.expedited_wq);
 	rnp = rcu_first_leaf_node();
 	for_each_possible_cpu(i) {
 		while (i > rnp->grphi)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* [PATCH tip/core/rcu 52/52] rcu: Fix typo in force_qs_rnp()'s parameter's parameter
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (50 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 51/52] rcu: Eliminate initialization-time " Paul E. McKenney
@ 2018-08-29 22:38 ` Paul E. McKenney
  2018-08-30  2:00 ` [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Steven Rostedt
  52 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-29 22:38 UTC (permalink / raw)
  To: linux-kernel
  Cc: mingo, jiangshanlai, dipankar, akpm, mathieu.desnoyers, josh,
	tglx, peterz, rostedt, dhowells, edumazet, fweisbec, oleg, joel,
	Paul E. McKenney

Pointers to rcu_data structures should be named rdp, not rsp.  This
commit therefore makes this change.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9c82981d7822..91f5d612502a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -477,7 +477,7 @@ module_param(rcu_kick_kthreads, bool, 0644);
 static ulong jiffies_till_sched_qs = HZ / 10;
 module_param(jiffies_till_sched_qs, ulong, 0444);
 
-static void force_qs_rnp(int (*f)(struct rcu_data *rsp));
+static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
 static void force_quiescent_state(void);
 static int rcu_pending(void);
 
@@ -2569,7 +2569,7 @@ void rcu_check_callbacks(int user)
  *
  * The caller must have suppressed start of new grace periods.
  */
-static void force_qs_rnp(int (*f)(struct rcu_data *rsp))
+static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
 {
 	int cpu;
 	unsigned long flags;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
                   ` (51 preceding siblings ...)
  2018-08-29 22:38 ` [PATCH tip/core/rcu 52/52] rcu: Fix typo in force_qs_rnp()'s parameter's parameter Paul E. McKenney
@ 2018-08-30  2:00 ` Steven Rostedt
  2018-08-30  3:22   ` Paul E. McKenney
  52 siblings, 1 reply; 62+ messages in thread
From: Steven Rostedt @ 2018-08-30  2:00 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: linux-kernel, mingo, jiangshanlai, dipankar, akpm,
	mathieu.desnoyers, josh, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Wed, 29 Aug 2018 15:38:30 -0700
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:

> Hello!
> 
> This commit does RCU-consolidation cleanups that get rid of pointers to
> the sole remaining rcu_state structure:
> 
> 1-40:	Remove the "rsp" parameter from numerous functions, given that
> 	the corresponding argument will always be &rcu_state.

Hmm, couldn't 1-40 have been made into a single patch?

-- Steve

> 
> 41.	Remove rcu_data structure's ->rsp field, now that it always
> 	contains a pointer to rcu_state.
> 
> 42.	Remove non-flavor-traversal rsp local variable from tree_plugin.h.
> 
> 43.	Remove the for_each_rcu_flavor() flavor-traversal macro, given
> 	that there is now only ever one flavor to traverse.
> 
> 44.	Simplify rcutorture_get_gp_data() based on there now being only
> 	one rcu_state structure.
> 
> 45.	Restructure rcu_check_gp_kthread_starvation() based on there
> 	now being only one rcu_state structure.
> 
> 46.	Restructure RCU CPU stall warnings based on there now being only
> 	one rcu_state structure.
> 
> 47.	Restructure grace-period management code based on there now being
> 	only one rcu_state structure.
> 
> 48.	Restructure callback registration/invocation code based on there
> 	now being only one rcu_state structure.
> 
> 49.	Restructure quiescent-state and grace-period-nonstart code based
> 	on there now being only one rcu_state structure.
> 
> 50.	Restructure rcu_barrier() based on there now being only one
> 	rcu_state structure.
> 
> 51.	Restructure initialization code based on there now being only
> 	one rcu_state structure.
> 
> 52.	Fix typo in force_qs_rnp()'s parameter's parameter, which was
> 	located by searching for "rsp".
> 
> 							Thanx, Paul
> 
> ------------------------------------------------------------------------
> 
>  Documentation/RCU/Design/Data-Structures/Data-Structures.html |   23 
>  kernel/rcu/rcu.h                                              |   28 
>  kernel/rcu/srcutree.c                                         |    4 
>  kernel/rcu/tree.c                                             | 1261 ++++------
>  kernel/rcu/tree.h                                             |   29 
>  kernel/rcu/tree_exp.h                                         |  209 -
>  kernel/rcu/tree_plugin.h                                      |  203 -
>  7 files changed, 784 insertions(+), 973 deletions(-)


^ permalink raw reply	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-30  2:00 ` [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Steven Rostedt
@ 2018-08-30  3:22   ` Paul E. McKenney
  2018-08-30  4:10     ` Paul E. McKenney
  0 siblings, 1 reply; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-30  3:22 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, mingo, jiangshanlai, dipankar, akpm,
	mathieu.desnoyers, josh, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Wed, Aug 29, 2018 at 10:00:26PM -0400, Steven Rostedt wrote:
> On Wed, 29 Aug 2018 15:38:30 -0700
> "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:
> 
> > Hello!
> > 
> > This commit does RCU-consolidation cleanups that get rid of pointers to
> > the sole remaining rcu_state structure:
> > 
> > 1-40:	Remove the "rsp" parameter from numerous functions, given that
> > 	the corresponding argument will always be &rcu_state.
> 
> Hmm, couldn't 1-40 have been made into a single patch?

They could.  I separated them to make finding the inevitable typos easier.
But at this point, it is easy enough to squash them together, though.

							Thanx, Paul

> -- Steve
> 
> > 
> > 41.	Remove rcu_data structure's ->rsp field, now that it always
> > 	contains a pointer to rcu_state.
> > 
> > 42.	Remove non-flavor-traversal rsp local variable from tree_plugin.h.
> > 
> > 43.	Remove the for_each_rcu_flavor() flavor-traversal macro, given
> > 	that there is now only ever one flavor to traverse.
> > 
> > 44.	Simplify rcutorture_get_gp_data() based on there now being only
> > 	one rcu_state structure.
> > 
> > 45.	Restructure rcu_check_gp_kthread_starvation() based on there
> > 	now being only one rcu_state structure.
> > 
> > 46.	Restructure RCU CPU stall warnings based on there now being only
> > 	one rcu_state structure.
> > 
> > 47.	Restructure grace-period management code based on there now being
> > 	only one rcu_state structure.
> > 
> > 48.	Restructure callback registration/invocation code based on there
> > 	now being only one rcu_state structure.
> > 
> > 49.	Restructure quiescent-state and grace-period-nonstart code based
> > 	on there now being only one rcu_state structure.
> > 
> > 50.	Restructure rcu_barrier() based on there now being only one
> > 	rcu_state structure.
> > 
> > 51.	Restructure initialization code based on there now being only
> > 	one rcu_state structure.
> > 
> > 52.	Fix typo in force_qs_rnp()'s parameter's parameter, which was
> > 	located by searching for "rsp".
> > 
> > 							Thanx, Paul
> > 
> > ------------------------------------------------------------------------
> > 
> >  Documentation/RCU/Design/Data-Structures/Data-Structures.html |   23 
> >  kernel/rcu/rcu.h                                              |   28 
> >  kernel/rcu/srcutree.c                                         |    4 
> >  kernel/rcu/tree.c                                             | 1261 ++++------
> >  kernel/rcu/tree.h                                             |   29 
> >  kernel/rcu/tree_exp.h                                         |  209 -
> >  kernel/rcu/tree_plugin.h                                      |  203 -
> >  7 files changed, 784 insertions(+), 973 deletions(-)
> 


^ permalink raw reply	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-30  3:22   ` Paul E. McKenney
@ 2018-08-30  4:10     ` Paul E. McKenney
  2018-08-30  4:20       ` Josh Triplett
  2018-08-30 15:44       ` Steven Rostedt
  0 siblings, 2 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-30  4:10 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, mingo, jiangshanlai, dipankar, akpm,
	mathieu.desnoyers, josh, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Wed, Aug 29, 2018 at 08:22:16PM -0700, Paul E. McKenney wrote:
> On Wed, Aug 29, 2018 at 10:00:26PM -0400, Steven Rostedt wrote:
> > On Wed, 29 Aug 2018 15:38:30 -0700
> > "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:
> > 
> > > Hello!
> > > 
> > > This commit does RCU-consolidation cleanups that get rid of pointers to
> > > the sole remaining rcu_state structure:
> > > 
> > > 1-40:	Remove the "rsp" parameter from numerous functions, given that
> > > 	the corresponding argument will always be &rcu_state.
> > 
> > Hmm, couldn't 1-40 have been made into a single patch?
> 
> They could.  I separated them to make finding the inevitable typos easier.
> But at this point, it is easy enough to squash them together, though.

And please see below for what the resulting diff would look like.  Is
this an improvement?

							Thanx, Paul

------------------------------------------------------------------------

diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index f5120a00f511..772c26a3865a 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -1372,8 +1372,7 @@ that is, if the CPU is currently idle.
 Accessor Functions</a></h3>
 
 <p>The following listing shows the
-<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
-<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
+<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt> and
 <tt>rcu_for_each_leaf_node()</tt> function and macros:
 
 <pre>
@@ -1386,13 +1385,9 @@ Accessor Functions</a></h3>
   7   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
   8        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
   9
- 10 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
- 11   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
- 12        (rnp) &lt; (rsp)-&gt;level[NUM_RCU_LVLS - 1]; (rnp)++)
- 13
- 14 #define rcu_for_each_leaf_node(rsp, rnp) \
- 15   for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
- 16        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+ 10 #define rcu_for_each_leaf_node(rsp, rnp) \
+ 11   for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
+ 12        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
 </pre>
 
 <p>The <tt>rcu_get_root()</tt> simply returns a pointer to the
@@ -1405,10 +1400,7 @@ macro takes advantage of the layout of the <tt>rcu_node</tt>
 structures in the <tt>rcu_state</tt> structure's
 <tt>-&gt;node[]</tt> array, performing a breadth-first traversal by
 simply traversing the array in order.
-The <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> macro operates
-similarly, but traverses only the first part of the array, thus excluding
-the leaf <tt>rcu_node</tt> structures.
-Finally, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
+Similarly, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
 the last part of the array, thus traversing only the leaf
 <tt>rcu_node</tt> structures.
 
@@ -1416,15 +1408,14 @@ the last part of the array, thus traversing only the leaf
 <tr><th>&nbsp;</th></tr>
 <tr><th align="left">Quick Quiz:</th></tr>
 <tr><td>
-	What do <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> and
+	What does
 	<tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
 	contains only a single node?
 </td></tr>
 <tr><th align="left">Answer:</th></tr>
 <tr><td bgcolor="#ffffff"><font color="ffffff">
 	In the single-node case,
-	<tt>rcu_for_each_nonleaf_node_breadth_first()</tt> is a no-op
-	and <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
+	<tt>rcu_for_each_leaf_node()</tt> traverses the single node.
 </font></td></tr>
 <tr><td>&nbsp;</td></tr>
 </table>
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 4d04683c31b2..2bb77fddc11f 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -329,29 +329,23 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 }
 
 /* Returns first leaf rcu_node of the specified RCU flavor. */
-#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1])
+#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
 
 /* Is this rcu_node a leaf? */
 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
 
 /* Is this rcu_node the last leaf? */
-#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
+#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
 
 /*
- * Do a full breadth-first scan of the rcu_node structures for the
+ * Do a full breadth-first scan of the {s,}rcu_node structures for the
  * specified rcu_state structure.
  */
-#define rcu_for_each_node_breadth_first(rsp, rnp) \
-	for ((rnp) = &(rsp)->node[0]; \
-	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
-
-/*
- * Do a breadth-first scan of the non-leaf rcu_node structures for the
- * specified rcu_state structure.  Note that if there is a singleton
- * rcu_node tree with but one rcu_node structure, this loop is a no-op.
- */
-#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
-	for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++)
+#define srcu_for_each_node_breadth_first(sp, rnp) \
+	for ((rnp) = &(sp)->node[0]; \
+	     (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
+#define rcu_for_each_node_breadth_first(rnp) \
+	srcu_for_each_node_breadth_first(&rcu_state, rnp)
 
 /*
  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
@@ -359,9 +353,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
  * one rcu_node structure, this loop -will- visit the rcu_node structure.
  * It is still a leaf node, even if it is also the root node.
  */
-#define rcu_for_each_leaf_node(rsp, rnp) \
-	for ((rnp) = rcu_first_leaf_node(rsp); \
-	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
+#define rcu_for_each_leaf_node(rnp) \
+	for ((rnp) = rcu_first_leaf_node(); \
+	     (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
 
 /*
  * Iterate over all possible CPUs in a leaf RCU node.
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 6c9866a854b1..2042080cd38b 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -105,7 +105,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
 	rcu_init_levelspread(levelspread, num_rcu_lvl);
 
 	/* Each pass through this loop initializes one srcu_node structure. */
-	rcu_for_each_node_breadth_first(sp, snp) {
+	srcu_for_each_node_breadth_first(sp, snp) {
 		spin_lock_init(&ACCESS_PRIVATE(snp, lock));
 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
@@ -561,7 +561,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
 
 	/* Initiate callback invocation as needed. */
 	idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
-	rcu_for_each_node_breadth_first(sp, snp) {
+	srcu_for_each_node_breadth_first(sp, snp) {
 		spin_lock_irq_rcu_node(snp);
 		cbs = false;
 		last_lvl = snp >= sp->level[rcu_num_lvls - 1];
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f0e7e3972fd9..35b705c1da40 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -132,15 +132,14 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
-static void
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
-		  struct rcu_node *rnp, unsigned long gps, unsigned long flags);
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
+			      unsigned long gps, unsigned long flags);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
-static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp);
+static void invoke_rcu_callbacks(struct rcu_data *rdp);
+static void rcu_report_exp_rdp(struct rcu_data *rdp);
 static void sync_sched_exp_online_cleanup(int cpu);
 
 /* rcuc/rcub kthread realtime priority */
@@ -190,9 +189,9 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  * permit this function to be invoked without holding the root rcu_node
  * structure's ->lock, but of course results can be subject to change.
  */
-static int rcu_gp_in_progress(struct rcu_state *rsp)
+static int rcu_gp_in_progress(void)
 {
-	return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
+	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
 }
 
 void rcu_softirq_qs(void)
@@ -480,8 +479,8 @@ module_param(rcu_kick_kthreads, bool, 0644);
 static ulong jiffies_till_sched_qs = HZ / 10;
 module_param(jiffies_till_sched_qs, ulong, 0444);
 
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
-static void force_quiescent_state(struct rcu_state *rsp);
+static void force_qs_rnp(int (*f)(struct rcu_data *rsp));
+static void force_quiescent_state(void);
 static int rcu_pending(void);
 
 /*
@@ -539,7 +538,7 @@ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
  */
 void rcu_force_quiescent_state(void)
 {
-	force_quiescent_state(&rcu_state);
+	force_quiescent_state();
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
@@ -548,7 +547,7 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  */
 void rcu_bh_force_quiescent_state(void)
 {
-	force_quiescent_state(&rcu_state);
+	force_quiescent_state();
 }
 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 
@@ -574,7 +573,7 @@ void show_rcu_gp_kthreads(void)
 	for_each_rcu_flavor(rsp) {
 		pr_info("%s: wait state: %d ->state: %#lx\n",
 			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
-		rcu_for_each_node_breadth_first(rsp, rnp) {
+		rcu_for_each_node_breadth_first(rnp) {
 			if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
 				continue;
 			pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
@@ -624,9 +623,9 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 /*
  * Return the root node of the specified rcu_state structure.
  */
-static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+static struct rcu_node *rcu_get_root(void)
 {
-	return &rsp->node[0];
+	return &rcu_state.node[0];
 }
 
 /*
@@ -1214,17 +1213,17 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 	return 0;
 }
 
-static void record_gp_stall_check_time(struct rcu_state *rsp)
+static void record_gp_stall_check_time(void)
 {
 	unsigned long j = jiffies;
 	unsigned long j1;
 
-	rsp->gp_start = j;
+	rcu_state.gp_start = j;
 	j1 = rcu_jiffies_till_stall_check();
 	/* Record ->gp_start before ->jiffies_stall. */
-	smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
-	rsp->jiffies_resched = j + j1 / 2;
-	rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
+	smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
+	rcu_state.jiffies_resched = j + j1 / 2;
+	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
 }
 
 /*
@@ -1240,10 +1239,11 @@ static const char *gp_state_getname(short gs)
 /*
  * Complain about starvation of grace-period kthread.
  */
-static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
+static void rcu_check_gp_kthread_starvation(void)
 {
 	unsigned long gpa;
 	unsigned long j;
+	struct rcu_state *rsp = &rcu_state;
 
 	j = jiffies;
 	gpa = READ_ONCE(rsp->gp_activity);
@@ -1269,13 +1269,13 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
  * that don't support NMI-based stack dumps.  The NMI-triggered stack
  * traces are more accurate because they are printed by the target CPU.
  */
-static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
+static void rcu_dump_cpu_stacks(void)
 {
 	int cpu;
 	unsigned long flags;
 	struct rcu_node *rnp;
 
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		for_each_leaf_node_possible_cpu(rnp, cpu)
 			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
@@ -1289,15 +1289,16 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
  * If too much time has passed in the current grace period, and if
  * so configured, go kick the relevant kthreads.
  */
-static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
+static void rcu_stall_kick_kthreads(void)
 {
 	unsigned long j;
+	struct rcu_state *rsp = &rcu_state;
 
 	if (!rcu_kick_kthreads)
 		return;
 	j = READ_ONCE(rsp->jiffies_kick_kthreads);
 	if (time_after(jiffies, j) && rsp->gp_kthread &&
-	    (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
+	    (rcu_gp_in_progress() || READ_ONCE(rsp->gp_flags))) {
 		WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
 		rcu_ftrace_dump(DUMP_ALL);
 		wake_up_process(rsp->gp_kthread);
@@ -1311,18 +1312,19 @@ static void panic_on_rcu_stall(void)
 		panic("RCU Stall\n");
 }
 
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
+static void print_other_cpu_stall(unsigned long gp_seq)
 {
 	int cpu;
 	unsigned long flags;
 	unsigned long gpa;
 	unsigned long j;
 	int ndetected = 0;
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
-	rcu_stall_kick_kthreads(rsp);
+	rcu_stall_kick_kthreads();
 	if (rcu_cpu_stall_suppress)
 		return;
 
@@ -1333,13 +1335,13 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 	 */
 	pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
 	print_cpu_stall_info_begin();
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		ndetected += rcu_print_task_stall(rnp);
 		if (rnp->qsmask != 0) {
 			for_each_leaf_node_possible_cpu(rnp, cpu)
 				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
-					print_cpu_stall_info(rsp, cpu);
+					print_cpu_stall_info(cpu);
 					ndetected++;
 				}
 		}
@@ -1354,10 +1356,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
 	       (long)rcu_seq_current(&rsp->gp_seq), totqlen);
 	if (ndetected) {
-		rcu_dump_cpu_stacks(rsp);
+		rcu_dump_cpu_stacks();
 
 		/* Complain about tasks blocking the grace period. */
-		rcu_print_detail_task_stall(rsp);
+		rcu_print_detail_task_stall();
 	} else {
 		if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
 			pr_err("INFO: Stall ended before state dump start\n");
@@ -1367,7 +1369,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
 			       rsp->name, j - gpa, j, gpa,
 			       jiffies_till_next_fqs,
-			       rcu_get_root(rsp)->qsmask);
+			       rcu_get_root()->qsmask);
 			/* In this case, the current CPU might be at fault. */
 			sched_show_task(current);
 		}
@@ -1377,23 +1379,24 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 		WRITE_ONCE(rsp->jiffies_stall,
 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 
-	rcu_check_gp_kthread_starvation(rsp);
+	rcu_check_gp_kthread_starvation();
 
 	panic_on_rcu_stall();
 
-	force_quiescent_state(rsp);  /* Kick them all. */
+	force_quiescent_state();  /* Kick them all. */
 }
 
-static void print_cpu_stall(struct rcu_state *rsp)
+static void print_cpu_stall(void)
 {
 	int cpu;
 	unsigned long flags;
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 	long totqlen = 0;
 
 	/* Kick and suppress, if so configured. */
-	rcu_stall_kick_kthreads(rsp);
+	rcu_stall_kick_kthreads();
 	if (rcu_cpu_stall_suppress)
 		return;
 
@@ -1405,7 +1408,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
 	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
 	print_cpu_stall_info_begin();
 	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
-	print_cpu_stall_info(rsp, smp_processor_id());
+	print_cpu_stall_info(smp_processor_id());
 	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
 	print_cpu_stall_info_end();
 	for_each_possible_cpu(cpu)
@@ -1415,9 +1418,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
 		jiffies - rsp->gp_start,
 		(long)rcu_seq_current(&rsp->gp_seq), totqlen);
 
-	rcu_check_gp_kthread_starvation(rsp);
+	rcu_check_gp_kthread_starvation();
 
-	rcu_dump_cpu_stacks(rsp);
+	rcu_dump_cpu_stacks();
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	/* Rewrite if needed in case of slow consoles. */
@@ -1438,7 +1441,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
 	resched_cpu(smp_processor_id());
 }
 
-static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+static void check_cpu_stall(struct rcu_data *rdp)
 {
 	unsigned long gs1;
 	unsigned long gs2;
@@ -1447,11 +1450,12 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 	unsigned long jn;
 	unsigned long js;
 	struct rcu_node *rnp;
+	struct rcu_state *rsp = &rcu_state;
 
 	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
-	    !rcu_gp_in_progress(rsp))
+	    !rcu_gp_in_progress())
 		return;
-	rcu_stall_kick_kthreads(rsp);
+	rcu_stall_kick_kthreads();
 	j = jiffies;
 
 	/*
@@ -1484,19 +1488,19 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 		return; /* No stall or GP completed since entering function. */
 	rnp = rdp->mynode;
 	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
-	if (rcu_gp_in_progress(rsp) &&
+	if (rcu_gp_in_progress() &&
 	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
 	    cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
 		/* We haven't checked in, so go dump stack. */
-		print_cpu_stall(rsp);
+		print_cpu_stall();
 
-	} else if (rcu_gp_in_progress(rsp) &&
+	} else if (rcu_gp_in_progress() &&
 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
 		   cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
 		/* They had a few time units to dump stack, so complain. */
-		print_other_cpu_stall(rsp, gs2);
+		print_other_cpu_stall(gs2);
 	}
 }
 
@@ -1589,7 +1593,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
 	}
 
 	/* If GP already in progress, just leave, otherwise start one. */
-	if (rcu_gp_in_progress(rsp)) {
+	if (rcu_gp_in_progress()) {
 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
 		goto unlock_out;
 	}
@@ -1617,7 +1621,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
  * Clean up any old requests for the just-ended grace period.  Also return
  * whether any additional grace periods have been requested.
  */
-static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
 {
 	bool needmore;
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -1637,13 +1641,13 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  * raced to awaken, and we lost), and finally don't try to awaken
  * a kthread that has not yet been created.
  */
-static void rcu_gp_kthread_wake(struct rcu_state *rsp)
+static void rcu_gp_kthread_wake(void)
 {
-	if (current == rsp->gp_kthread ||
-	    !READ_ONCE(rsp->gp_flags) ||
-	    !rsp->gp_kthread)
+	if (current == rcu_state.gp_kthread ||
+	    !READ_ONCE(rcu_state.gp_flags) ||
+	    !rcu_state.gp_kthread)
 		return;
-	swake_up_one(&rsp->gp_wq);
+	swake_up_one(&rcu_state.gp_wq);
 }
 
 /*
@@ -1658,11 +1662,11 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
  *
  * The caller must hold rnp->lock with interrupts disabled.
  */
-static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
-			       struct rcu_data *rdp)
+static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	unsigned long gp_seq_req;
 	bool ret = false;
+	struct rcu_state *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -1699,25 +1703,24 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
  * while holding the leaf rcu_node structure's ->lock.
  */
-static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
-					struct rcu_node *rnp,
+static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
 					struct rcu_data *rdp)
 {
 	unsigned long c;
 	bool needwake;
 
 	lockdep_assert_irqs_disabled();
-	c = rcu_seq_snap(&rsp->gp_seq);
+	c = rcu_seq_snap(&rcu_state.gp_seq);
 	if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
 		/* Old request still live, so mark recent callbacks. */
 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
 		return;
 	}
 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
-	needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+	needwake = rcu_accelerate_cbs(rnp, rdp);
 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
 	if (needwake)
-		rcu_gp_kthread_wake(rsp);
+		rcu_gp_kthread_wake();
 }
 
 /*
@@ -1730,8 +1733,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
  *
  * The caller must hold rnp->lock with interrupts disabled.
  */
-static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
-			    struct rcu_data *rdp)
+static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -1746,7 +1748,7 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
 
 	/* Classify any remaining callbacks. */
-	return rcu_accelerate_cbs(rsp, rnp, rdp);
+	return rcu_accelerate_cbs(rnp, rdp);
 }
 
 /*
@@ -1755,11 +1757,11 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
  * structure corresponding to the current CPU, and must have irqs disabled.
  * Returns true if the grace-period kthread needs to be awakened.
  */
-static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
-			      struct rcu_data *rdp)
+static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	bool ret;
 	bool need_gp;
+	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -1769,10 +1771,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 	/* Handle the ends of any preceding grace periods first. */
 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
 	    unlikely(READ_ONCE(rdp->gpwrap))) {
-		ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
+		ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */
 		trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
 	} else {
-		ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
+		ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */
 	}
 
 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
@@ -1798,7 +1800,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 	return ret;
 }
 
-static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
+static void note_gp_changes(struct rcu_data *rdp)
 {
 	unsigned long flags;
 	bool needwake;
@@ -1812,16 +1814,16 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 		local_irq_restore(flags);
 		return;
 	}
-	needwake = __note_gp_changes(rsp, rnp, rdp);
+	needwake = __note_gp_changes(rnp, rdp);
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	if (needwake)
-		rcu_gp_kthread_wake(rsp);
+		rcu_gp_kthread_wake();
 }
 
-static void rcu_gp_slow(struct rcu_state *rsp, int delay)
+static void rcu_gp_slow(int delay)
 {
 	if (delay > 0 &&
-	    !(rcu_seq_ctr(rsp->gp_seq) %
+	    !(rcu_seq_ctr(rcu_state.gp_seq) %
 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
 		schedule_timeout_uninterruptible(delay);
 }
@@ -1829,13 +1831,14 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
 /*
  * Initialize a new grace period.  Return false if no grace period required.
  */
-static bool rcu_gp_init(struct rcu_state *rsp)
+static bool rcu_gp_init(void)
 {
 	unsigned long flags;
 	unsigned long oldmask;
 	unsigned long mask;
 	struct rcu_data *rdp;
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
 	raw_spin_lock_irq_rcu_node(rnp);
@@ -1846,7 +1849,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 	}
 	WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
 
-	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
+	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
 		/*
 		 * Grace period already in progress, don't start another.
 		 * Not supposed to be able to happen.
@@ -1856,7 +1859,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 	}
 
 	/* Advance to a new grace period and initialize state. */
-	record_gp_stall_check_time(rsp);
+	record_gp_stall_check_time();
 	/* Record GP times before starting GP, hence rcu_seq_start(). */
 	rcu_seq_start(&rsp->gp_seq);
 	trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
@@ -1869,7 +1872,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 	 * will handle subsequent offline CPUs.
 	 */
 	rsp->gp_state = RCU_GP_ONOFF;
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		spin_lock(&rsp->ofl_lock);
 		raw_spin_lock_irq_rcu_node(rnp);
 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
@@ -1914,7 +1917,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 		raw_spin_unlock_irq_rcu_node(rnp);
 		spin_unlock(&rsp->ofl_lock);
 	}
-	rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
+	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
 
 	/*
 	 * Set the quiescent-state-needed bits in all the rcu_node
@@ -1929,15 +1932,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 	 * process finishes, because this kthread handles both.
 	 */
 	rsp->gp_state = RCU_GP_INIT;
-	rcu_for_each_node_breadth_first(rsp, rnp) {
-		rcu_gp_slow(rsp, gp_init_delay);
+	rcu_for_each_node_breadth_first(rnp) {
+		rcu_gp_slow(gp_init_delay);
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		rdp = this_cpu_ptr(&rcu_data);
-		rcu_preempt_check_blocked_tasks(rsp, rnp);
+		rcu_preempt_check_blocked_tasks(rnp);
 		rnp->qsmask = rnp->qsmaskinit;
 		WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
 		if (rnp == rdp->mynode)
-			(void)__note_gp_changes(rsp, rnp, rdp);
+			(void)__note_gp_changes(rnp, rdp);
 		rcu_preempt_boost_start_gp(rnp);
 		trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
 					    rnp->level, rnp->grplo,
@@ -1946,7 +1949,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
 		rnp->rcu_gp_init_mask = mask;
 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
-			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		else
 			raw_spin_unlock_irq_rcu_node(rnp);
 		cond_resched_tasks_rcu_qs();
@@ -1960,12 +1963,12 @@ static bool rcu_gp_init(struct rcu_state *rsp)
  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
  * time.
  */
-static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
+static bool rcu_gp_fqs_check_wake(int *gfp)
 {
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	/* Someone like call_rcu() requested a force-quiescent-state scan. */
-	*gfp = READ_ONCE(rsp->gp_flags);
+	*gfp = READ_ONCE(rcu_state.gp_flags);
 	if (*gfp & RCU_GP_FLAG_FQS)
 		return true;
 
@@ -1979,18 +1982,19 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
 /*
  * Do one round of quiescent-state forcing.
  */
-static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
+static void rcu_gp_fqs(bool first_time)
 {
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
 	rsp->n_force_qs++;
 	if (first_time) {
 		/* Collect dyntick-idle snapshots. */
-		force_qs_rnp(rsp, dyntick_save_progress_counter);
+		force_qs_rnp(dyntick_save_progress_counter);
 	} else {
 		/* Handle dyntick-idle and offline CPUs. */
-		force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
+		force_qs_rnp(rcu_implicit_dynticks_qs);
 	}
 	/* Clear flag to prevent immediate re-entry. */
 	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
@@ -2004,13 +2008,14 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
 /*
  * Clean up after the old grace period.
  */
-static void rcu_gp_cleanup(struct rcu_state *rsp)
+static void rcu_gp_cleanup(void)
 {
 	unsigned long gp_duration;
 	bool needgp = false;
 	unsigned long new_gp_seq;
 	struct rcu_data *rdp;
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 	struct swait_queue_head *sq;
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2040,25 +2045,25 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 	 */
 	new_gp_seq = rsp->gp_seq;
 	rcu_seq_end(&new_gp_seq);
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	rcu_for_each_node_breadth_first(rnp) {
 		raw_spin_lock_irq_rcu_node(rnp);
 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
-			dump_blkd_tasks(rsp, rnp, 10);
+			dump_blkd_tasks(rnp, 10);
 		WARN_ON_ONCE(rnp->qsmask);
 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
 		rdp = this_cpu_ptr(&rcu_data);
 		if (rnp == rdp->mynode)
-			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
+			needgp = __note_gp_changes(rnp, rdp) || needgp;
 		/* smp_mb() provided by prior unlock-lock pair. */
-		needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp;
+		needgp = rcu_future_gp_cleanup(rnp) || needgp;
 		sq = rcu_nocb_gp_get(rnp);
 		raw_spin_unlock_irq_rcu_node(rnp);
 		rcu_nocb_gp_cleanup(sq);
 		cond_resched_tasks_rcu_qs();
 		WRITE_ONCE(rsp->gp_activity, jiffies);
-		rcu_gp_slow(rsp, gp_cleanup_delay);
+		rcu_gp_slow(gp_cleanup_delay);
 	}
-	rnp = rcu_get_root(rsp);
+	rnp = rcu_get_root();
 	raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
 
 	/* Declare grace period done. */
@@ -2073,7 +2078,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 		needgp = true;
 	}
 	/* Advance CBs to reduce false positives below. */
-	if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
+	if (!rcu_accelerate_cbs(rnp, rdp) && needgp) {
 		WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
 		rsp->gp_req_activity = jiffies;
 		trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
@@ -2087,14 +2092,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 /*
  * Body of kthread that handles grace periods.
  */
-static int __noreturn rcu_gp_kthread(void *arg)
+static int __noreturn rcu_gp_kthread(void *unused)
 {
 	bool first_gp_fqs;
 	int gf;
 	unsigned long j;
 	int ret;
-	struct rcu_state *rsp = arg;
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_state *rsp = &rcu_state;
+	struct rcu_node *rnp = rcu_get_root();
 
 	rcu_bind_gp_kthread();
 	for (;;) {
@@ -2109,7 +2114,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 						     RCU_GP_FLAG_INIT);
 			rsp->gp_state = RCU_GP_DONE_GPS;
 			/* Locking provides needed memory barrier. */
-			if (rcu_gp_init(rsp))
+			if (rcu_gp_init())
 				break;
 			cond_resched_tasks_rcu_qs();
 			WRITE_ONCE(rsp->gp_activity, jiffies);
@@ -2134,7 +2139,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 					       TPS("fqswait"));
 			rsp->gp_state = RCU_GP_WAIT_FQS;
 			ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
-					rcu_gp_fqs_check_wake(rsp, &gf), j);
+					rcu_gp_fqs_check_wake(&gf), j);
 			rsp->gp_state = RCU_GP_DOING_FQS;
 			/* Locking provides needed memory barriers. */
 			/* If grace period done, leave loop. */
@@ -2147,7 +2152,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 				trace_rcu_grace_period(rsp->name,
 						       READ_ONCE(rsp->gp_seq),
 						       TPS("fqsstart"));
-				rcu_gp_fqs(rsp, first_gp_fqs);
+				rcu_gp_fqs(first_gp_fqs);
 				first_gp_fqs = false;
 				trace_rcu_grace_period(rsp->name,
 						       READ_ONCE(rsp->gp_seq),
@@ -2175,7 +2180,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 
 		/* Handle grace-period end. */
 		rsp->gp_state = RCU_GP_CLEANUP;
-		rcu_gp_cleanup(rsp);
+		rcu_gp_cleanup();
 		rsp->gp_state = RCU_GP_CLEANED;
 	}
 }
@@ -2189,14 +2194,16 @@ static int __noreturn rcu_gp_kthread(void *arg)
  * just-completed grace period.  Note that the caller must hold rnp->lock,
  * which is released before return.
  */
-static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
-	__releases(rcu_get_root(rsp)->lock)
+static void rcu_report_qs_rsp(unsigned long flags)
+	__releases(rcu_get_root()->lock)
 {
-	raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
-	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+	struct rcu_state *rsp = &rcu_state;
+
+	raw_lockdep_assert_held_rcu_node(rcu_get_root());
+	WARN_ON_ONCE(!rcu_gp_in_progress());
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
-	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
-	rcu_gp_kthread_wake(rsp);
+	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
+	rcu_gp_kthread_wake();
 }
 
 /*
@@ -2213,13 +2220,13 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  * disabled.  This allows propagating quiescent state due to resumed tasks
  * during grace-period initialization.
  */
-static void
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
-		  struct rcu_node *rnp, unsigned long gps, unsigned long flags)
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
+			      unsigned long gps, unsigned long flags)
 	__releases(rnp->lock)
 {
 	unsigned long oldmask = 0;
 	struct rcu_node *rnp_c;
+	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -2268,7 +2275,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
 	 * to clean up and start the next grace period if one is needed.
 	 */
-	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
+	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
 }
 
 /*
@@ -2279,8 +2286,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  * disabled.
  */
 static void __maybe_unused
-rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
-			  struct rcu_node *rnp, unsigned long flags)
+rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
 	__releases(rnp->lock)
 {
 	unsigned long gps;
@@ -2302,7 +2308,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
 		 * Only one rcu_node structure in the tree, so don't
 		 * try to report up to its nonexistent parent!
 		 */
-		rcu_report_qs_rsp(rsp, flags);
+		rcu_report_qs_rsp(flags);
 		return;
 	}
 
@@ -2311,7 +2317,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
 	mask = rnp->grpmask;
 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
-	rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
+	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
 }
 
 /*
@@ -2319,7 +2325,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
  * structure.  This must be called from the specified CPU.
  */
 static void
-rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
+rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
 {
 	unsigned long flags;
 	unsigned long mask;
@@ -2352,12 +2358,12 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 		 * This GP can't end until cpu checks in, so all of our
 		 * callbacks can be processed during the next GP.
 		 */
-		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+		needwake = rcu_accelerate_cbs(rnp, rdp);
 
-		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		/* ^^^ Released rnp->lock */
 		if (needwake)
-			rcu_gp_kthread_wake(rsp);
+			rcu_gp_kthread_wake();
 	}
 }
 
@@ -2368,10 +2374,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
  * quiescent state for this grace period, and record that fact if so.
  */
 static void
-rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
+rcu_check_quiescent_state(struct rcu_data *rdp)
 {
 	/* Check for grace-period ends and beginnings. */
-	note_gp_changes(rsp, rdp);
+	note_gp_changes(rdp);
 
 	/*
 	 * Does this CPU still need to do its part for current grace period?
@@ -2391,24 +2397,26 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
 	 * judge of that).
 	 */
-	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
+	rcu_report_qs_rdp(rdp->cpu, rdp);
 }
 
 /*
- * Trace the fact that this CPU is going offline.
+ * Near the end of the offline process.  Trace the fact that this CPU
+ * is going offline.
  */
-static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
+int rcutree_dying_cpu(unsigned int cpu)
 {
 	RCU_TRACE(bool blkd;)
 	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
 	RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
 
 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
-		return;
+		return 0;
 
 	RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
-	trace_rcu_grace_period(rsp->name, rnp->gp_seq,
+	trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
 			       blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
+	return 0;
 }
 
 /*
@@ -2462,28 +2470,32 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
  * There can only be one CPU hotplug operation at a time, so no need for
  * explicit locking.
  */
-static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
+int rcutree_dead_cpu(unsigned int cpu)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
-		return;
+		return 0;
 
 	/* Adjust any no-longer-needed kthreads. */
 	rcu_boost_kthread_setaffinity(rnp, -1);
+	/* Do any needed no-CB deferred wakeups from this CPU. */
+	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
+	return 0;
 }
 
 /*
  * Invoke any RCU callbacks that have made it to the end of their grace
  * period.  Thottle as specified by rdp->blimit.
  */
-static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
+static void rcu_do_batch(struct rcu_data *rdp)
 {
 	unsigned long flags;
 	struct rcu_head *rhp;
 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
 	long bl, count;
+	struct rcu_state *rsp = &rcu_state;
 
 	/* If no callbacks are ready, just return. */
 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
@@ -2587,14 +2599,14 @@ void rcu_check_callbacks(int user)
  *
  * The caller must have suppressed start of new grace periods.
  */
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
+static void force_qs_rnp(int (*f)(struct rcu_data *rsp))
 {
 	int cpu;
 	unsigned long flags;
 	unsigned long mask;
 	struct rcu_node *rnp;
 
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		cond_resched_tasks_rcu_qs();
 		mask = 0;
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -2622,7 +2634,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
 		}
 		if (mask != 0) {
 			/* Idle/offline CPUs, report (releases rnp->lock). */
-			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		} else {
 			/* Nothing to do here, so just drop the lock. */
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2634,12 +2646,13 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
  * Force quiescent states on reluctant CPUs, and also detect which
  * CPUs are in dyntick-idle mode.
  */
-static void force_quiescent_state(struct rcu_state *rsp)
+static void force_quiescent_state(void)
 {
 	unsigned long flags;
 	bool ret;
 	struct rcu_node *rnp;
 	struct rcu_node *rnp_old = NULL;
+	struct rcu_state *rsp = &rcu_state;
 
 	/* Funnel through hierarchy to reduce memory contention. */
 	rnp = __this_cpu_read(rcu_data.mynode);
@@ -2652,7 +2665,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
 			return;
 		rnp_old = rnp;
 	}
-	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
+	/* rnp_old == rcu_get_root(), rnp == NULL. */
 
 	/* Reached the root of the rcu_node tree, acquire lock. */
 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
@@ -2663,7 +2676,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
 	}
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
-	rcu_gp_kthread_wake(rsp);
+	rcu_gp_kthread_wake();
 }
 
 /*
@@ -2671,16 +2684,16 @@ static void force_quiescent_state(struct rcu_state *rsp)
  * RCU to come out of its idle mode.
  */
 static void
-rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
-			 struct rcu_data *rdp)
+rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
 {
 	const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
 	unsigned long flags;
 	unsigned long j;
-	struct rcu_node *rnp_root = rcu_get_root(rsp);
+	struct rcu_node *rnp_root = rcu_get_root();
+	struct rcu_state *rsp = &rcu_state;
 	static atomic_t warned = ATOMIC_INIT(0);
 
-	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
+	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
 		return;
 	j = jiffies; /* Expensive access, and in common case don't get here. */
@@ -2691,7 +2704,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	j = jiffies;
-	if (rcu_gp_in_progress(rsp) ||
+	if (rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
 	    time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
 	    time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
@@ -2704,7 +2717,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
 	if (rnp_root != rnp)
 		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
 	j = jiffies;
-	if (rcu_gp_in_progress(rsp) ||
+	if (rcu_gp_in_progress() ||
 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
 	    time_before(j, rsp->gp_req_activity + gpssdelay) ||
 	    time_before(j, rsp->gp_activity + gpssdelay) ||
@@ -2726,17 +2739,19 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
 }
 
 /*
- * This does the RCU core processing work for the specified rcu_state
- * and rcu_data structures.  This may be called only from the CPU to
- * whom the rdp belongs.
+ * This does the RCU core processing work for the specified rcu_data
+ * structures.  This may be called only from the CPU to whom the rdp
+ * belongs.
  */
-static void
-__rcu_process_callbacks(struct rcu_state *rsp)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
 {
 	unsigned long flags;
 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
 	struct rcu_node *rnp = rdp->mynode;
 
+	if (cpu_is_offline(smp_processor_id()))
+		return;
+	trace_rcu_utilization(TPS("Start RCU core"));
 	WARN_ON_ONCE(!rdp->beenonline);
 
 	/* Report any deferred quiescent states if preemption enabled. */
@@ -2746,39 +2761,25 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 		resched_cpu(rdp->cpu); /* Provoke future context switch. */
 
 	/* Update RCU state based on any recent quiescent states. */
-	rcu_check_quiescent_state(rsp, rdp);
+	rcu_check_quiescent_state(rdp);
 
 	/* No grace period and unregistered callbacks? */
-	if (!rcu_gp_in_progress(rsp) &&
+	if (!rcu_gp_in_progress() &&
 	    rcu_segcblist_is_enabled(&rdp->cblist)) {
 		local_irq_save(flags);
 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
-			rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
+			rcu_accelerate_cbs_unlocked(rnp, rdp);
 		local_irq_restore(flags);
 	}
 
-	rcu_check_gp_start_stall(rsp, rnp, rdp);
+	rcu_check_gp_start_stall(rnp, rdp);
 
 	/* If there are callbacks ready, invoke them. */
 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
-		invoke_rcu_callbacks(rsp, rdp);
+		invoke_rcu_callbacks(rdp);
 
 	/* Do any needed deferred wakeups of rcuo kthreads. */
 	do_nocb_deferred_wakeup(rdp);
-}
-
-/*
- * Do RCU core processing for the current CPU.
- */
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-{
-	struct rcu_state *rsp;
-
-	if (cpu_is_offline(smp_processor_id()))
-		return;
-	trace_rcu_utilization(TPS("Start RCU core"));
-	for_each_rcu_flavor(rsp)
-		__rcu_process_callbacks(rsp);
 	trace_rcu_utilization(TPS("End RCU core"));
 }
 
@@ -2789,12 +2790,14 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
  * are running on the current CPU with softirqs disabled, the
  * rcu_cpu_kthread_task cannot disappear out from under us.
  */
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+static void invoke_rcu_callbacks(struct rcu_data *rdp)
 {
+	struct rcu_state *rsp = &rcu_state;
+
 	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
 		return;
 	if (likely(!rsp->boost)) {
-		rcu_do_batch(rsp, rdp);
+		rcu_do_batch(rdp);
 		return;
 	}
 	invoke_rcu_callbacks_kthread();
@@ -2809,8 +2812,8 @@ static void invoke_rcu_core(void)
 /*
  * Handle any core-RCU processing required by a call_rcu() invocation.
  */
-static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
-			    struct rcu_head *head, unsigned long flags)
+static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
+			    unsigned long flags)
 {
 	/*
 	 * If called from an extended quiescent state, invoke the RCU
@@ -2834,18 +2837,18 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 		     rdp->qlen_last_fqs_check + qhimark)) {
 
 		/* Are we ignoring a completed grace period? */
-		note_gp_changes(rsp, rdp);
+		note_gp_changes(rdp);
 
 		/* Start a new grace period if one not already started. */
-		if (!rcu_gp_in_progress(rsp)) {
-			rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
+		if (!rcu_gp_in_progress()) {
+			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
 		} else {
 			/* Give the grace period a kick. */
 			rdp->blimit = LONG_MAX;
-			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
+			if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
-				force_quiescent_state(rsp);
-			rdp->n_force_qs_snap = rsp->n_force_qs;
+				force_quiescent_state();
+			rdp->n_force_qs_snap = rcu_state.n_force_qs;
 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
 		}
 	}
@@ -2865,11 +2868,11 @@ static void rcu_leak_callback(struct rcu_head *rhp)
  * is expected to specify a CPU.
  */
 static void
-__call_rcu(struct rcu_head *head, rcu_callback_t func,
-	   struct rcu_state *rsp, int cpu, bool lazy)
+__call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
 {
 	unsigned long flags;
 	struct rcu_data *rdp;
+	struct rcu_state __maybe_unused *rsp = &rcu_state;
 
 	/* Misaligned rcu_head! */
 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
@@ -2927,7 +2930,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
 				   rcu_segcblist_n_cbs(&rdp->cblist));
 
 	/* Go handle any RCU core processing required. */
-	__call_rcu_core(rsp, rdp, head, flags);
+	__call_rcu_core(rdp, head, flags);
 	local_irq_restore(flags);
 }
 
@@ -2968,7 +2971,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
  */
 void call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
-	__call_rcu(head, func, &rcu_state, -1, 0);
+	__call_rcu(head, func, -1, 0);
 }
 EXPORT_SYMBOL_GPL(call_rcu);
 
@@ -2992,10 +2995,9 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
  * callbacks in the list of pending callbacks. Until then, this
  * function may only be called from __kfree_rcu().
  */
-void kfree_call_rcu(struct rcu_head *head,
-		    rcu_callback_t func)
+void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
-	__call_rcu(head, func, &rcu_state, -1, 1);
+	__call_rcu(head, func, -1, 1);
 }
 EXPORT_SYMBOL_GPL(kfree_call_rcu);
 
@@ -3075,21 +3077,23 @@ void cond_synchronize_sched(unsigned long oldstate)
 EXPORT_SYMBOL_GPL(cond_synchronize_sched);
 
 /*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, for the specified type of RCU, returning 1 if so.
- * The checks are in order of increasing expense: checks that can be
- * carried out against CPU-local state are performed first.  However,
- * we must check for CPU stalls first, else we might not get a chance.
+ * Check to see if there is any immediate RCU-related work to be done by
+ * the current CPU, for the specified type of RCU, returning 1 if so and
+ * zero otherwise.  The checks are in order of increasing expense: checks
+ * that can be carried out against CPU-local state are performed first.
+ * However, we must check for CPU stalls first, else we might not get
+ * a chance.
  */
-static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
+static int rcu_pending(void)
 {
+	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 	struct rcu_node *rnp = rdp->mynode;
 
 	/* Check for CPU stalls, if enabled. */
-	check_cpu_stall(rsp, rdp);
+	check_cpu_stall(rdp);
 
 	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
-	if (rcu_nohz_full_cpu(rsp))
+	if (rcu_nohz_full_cpu())
 		return 0;
 
 	/* Is the RCU core waiting for a quiescent state from this CPU? */
@@ -3101,7 +3105,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 		return 1;
 
 	/* Has RCU gone idle with this CPU needing another grace period? */
-	if (!rcu_gp_in_progress(rsp) &&
+	if (!rcu_gp_in_progress() &&
 	    rcu_segcblist_is_enabled(&rdp->cblist) &&
 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
 		return 1;
@@ -3119,21 +3123,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 	return 0;
 }
 
-/*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, returning 1 if so.  This function is part of the
- * RCU implementation; it is -not- an exported member of the RCU API.
- */
-static int rcu_pending(void)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		if (__rcu_pending(rsp, this_cpu_ptr(&rcu_data)))
-			return 1;
-	return 0;
-}
-
 /*
  * Return true if the specified CPU has any callback.  If all_lazy is
  * non-NULL, store an indication of whether all callbacks are lazy.
@@ -3165,11 +3154,10 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
  * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
  * the compiler is expected to optimize this away.
  */
-static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
-			       int cpu, unsigned long done)
+static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done)
 {
-	trace_rcu_barrier(rsp->name, s, cpu,
-			  atomic_read(&rsp->barrier_cpu_count), done);
+	trace_rcu_barrier(rcu_state.name, s, cpu,
+			  atomic_read(&rcu_state.barrier_cpu_count), done);
 }
 
 /*
@@ -3182,11 +3170,10 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
 	struct rcu_state *rsp = rdp->rsp;
 
 	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
-		_rcu_barrier_trace(rsp, TPS("LastCB"), -1,
-				   rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("LastCB"), -1, rsp->barrier_sequence);
 		complete(&rsp->barrier_completion);
 	} else {
-		_rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("CB"), -1, rsp->barrier_sequence);
 	}
 }
 
@@ -3198,15 +3185,14 @@ static void rcu_barrier_func(void *type)
 	struct rcu_state *rsp = type;
 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
 
-	_rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
+	_rcu_barrier_trace(TPS("IRQ"), -1, rsp->barrier_sequence);
 	rdp->barrier_head.func = rcu_barrier_callback;
 	debug_rcu_head_queue(&rdp->barrier_head);
 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
 		atomic_inc(&rsp->barrier_cpu_count);
 	} else {
 		debug_rcu_head_unqueue(&rdp->barrier_head);
-		_rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
-				   rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("IRQNQ"), -1, rsp->barrier_sequence);
 	}
 }
 
@@ -3214,21 +3200,21 @@ static void rcu_barrier_func(void *type)
  * Orchestrate the specified type of RCU barrier, waiting for all
  * RCU callbacks of the specified type to complete.
  */
-static void _rcu_barrier(struct rcu_state *rsp)
+static void _rcu_barrier(void)
 {
 	int cpu;
 	struct rcu_data *rdp;
+	struct rcu_state *rsp = &rcu_state;
 	unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
 
-	_rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
+	_rcu_barrier_trace(TPS("Begin"), -1, s);
 
 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
 	mutex_lock(&rsp->barrier_mutex);
 
 	/* Did someone else do our work for us? */
 	if (rcu_seq_done(&rsp->barrier_sequence, s)) {
-		_rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
-				   rsp->barrier_sequence);
+		_rcu_barrier_trace(TPS("EarlyExit"), -1, rsp->barrier_sequence);
 		smp_mb(); /* caller's subsequent code after above check. */
 		mutex_unlock(&rsp->barrier_mutex);
 		return;
@@ -3236,7 +3222,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
 	/* Mark the start of the barrier operation. */
 	rcu_seq_start(&rsp->barrier_sequence);
-	_rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
+	_rcu_barrier_trace(TPS("Inc1"), -1, rsp->barrier_sequence);
 
 	/*
 	 * Initialize the count to one rather than to zero in order to
@@ -3258,23 +3244,23 @@ static void _rcu_barrier(struct rcu_state *rsp)
 			continue;
 		rdp = per_cpu_ptr(&rcu_data, cpu);
 		if (rcu_is_nocb_cpu(cpu)) {
-			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
-				_rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
+			if (!rcu_nocb_cpu_needs_barrier(cpu)) {
+				_rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
 						   rsp->barrier_sequence);
 			} else {
-				_rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
+				_rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
 						   rsp->barrier_sequence);
 				smp_mb__before_atomic();
 				atomic_inc(&rsp->barrier_cpu_count);
 				__call_rcu(&rdp->barrier_head,
-					   rcu_barrier_callback, rsp, cpu, 0);
+					   rcu_barrier_callback, cpu, 0);
 			}
 		} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
-			_rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
+			_rcu_barrier_trace(TPS("OnlineQ"), cpu,
 					   rsp->barrier_sequence);
 			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
 		} else {
-			_rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
+			_rcu_barrier_trace(TPS("OnlineNQ"), cpu,
 					   rsp->barrier_sequence);
 		}
 	}
@@ -3291,7 +3277,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 	wait_for_completion(&rsp->barrier_completion);
 
 	/* Mark the end of the barrier operation. */
-	_rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
+	_rcu_barrier_trace(TPS("Inc2"), -1, rsp->barrier_sequence);
 	rcu_seq_end(&rsp->barrier_sequence);
 
 	/* Other rcu_barrier() invocations can now safely proceed. */
@@ -3303,7 +3289,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
  */
 void rcu_barrier_bh(void)
 {
-	_rcu_barrier(&rcu_state);
+	_rcu_barrier();
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
 
@@ -3317,7 +3303,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  */
 void rcu_barrier(void)
 {
-	_rcu_barrier(&rcu_state);
+	_rcu_barrier();
 }
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
@@ -3364,7 +3350,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
  * Do boot-time initialization of a CPU's per-CPU RCU data.
  */
 static void __init
-rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
+rcu_boot_init_percpu_data(int cpu)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 
@@ -3373,32 +3359,34 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
 	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
-	rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
-	rdp->rcu_onl_gp_seq = rsp->gp_seq;
+	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
 	rdp->cpu = cpu;
-	rdp->rsp = rsp;
+	rdp->rsp = &rcu_state;
 	rcu_boot_init_nocb_percpu_data(rdp);
 }
 
 /*
- * Initialize a CPU's per-CPU RCU data.  Note that only one online or
+ * Invoked early in the CPU-online process, when pretty much all services
+ * are available.  The incoming CPU is not present.
+ *
+ * Initializes a CPU's per-CPU RCU data.  Note that only one online or
  * offline event can be happening at a given time.  Note also that we can
  * accept some slop in the rsp->gp_seq access due to the fact that this
  * CPU cannot possibly have any RCU callbacks in flight yet.
  */
-static void
-rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
+int rcutree_prepare_cpu(unsigned int cpu)
 {
 	unsigned long flags;
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	/* Set up local state, ensuring consistent view of global state. */
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	rdp->qlen_last_fqs_check = 0;
-	rdp->n_force_qs_snap = rsp->n_force_qs;
+	rdp->n_force_qs_snap = rcu_state.n_force_qs;
 	rdp->blimit = blimit;
 	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
 	    !init_nocb_callback_list(rdp))
@@ -3422,21 +3410,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
 	rdp->core_needs_qs = false;
 	rdp->rcu_iw_pending = false;
 	rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
-	trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
+	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-}
-
-/*
- * Invoked early in the CPU-online process, when pretty much all
- * services are available.  The incoming CPU is not present.
- */
-int rcutree_prepare_cpu(unsigned int cpu)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		rcu_init_percpu_data(cpu, rsp);
-
 	rcu_prepare_kthreads(cpu);
 	rcu_spawn_all_nocb_kthreads(cpu);
 
@@ -3505,32 +3480,6 @@ int rcutree_offline_cpu(unsigned int cpu)
 	return 0;
 }
 
-/*
- * Near the end of the offline process.  We do only tracing here.
- */
-int rcutree_dying_cpu(unsigned int cpu)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		rcu_cleanup_dying_cpu(rsp);
-	return 0;
-}
-
-/*
- * The outgoing CPU is gone and we are running elsewhere.
- */
-int rcutree_dead_cpu(unsigned int cpu)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp) {
-		rcu_cleanup_dead_cpu(cpu, rsp);
-		do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
-	}
-	return 0;
-}
-
 static DEFINE_PER_CPU(int, rcu_cpu_started);
 
 /*
@@ -3576,7 +3525,7 @@ void rcu_cpu_starting(unsigned int cpu)
 		rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
 		if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
 			/* Report QS -after- changing ->qsmaskinitnext! */
-			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		} else {
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		}
@@ -3586,63 +3535,55 @@ void rcu_cpu_starting(unsigned int cpu)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
- * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function.  We now remove it from the rcu_node tree's ->qsmaskinitnext
- * bit masks.
+ * The outgoing function has no further need of RCU, so remove it from
+ * the rcu_node tree's ->qsmaskinitnext bit masks.
+ *
+ * Note that this function is special in that it is invoked directly
+ * from the outgoing CPU rather than from the cpuhp_step mechanism.
+ * This is because this function must be invoked at a precise location.
  */
-static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+void rcu_report_dead(unsigned int cpu)
 {
 	unsigned long flags;
 	unsigned long mask;
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
+	/* QS for any half-done expedited RCU-sched GP. */
+	preempt_disable();
+	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
+	preempt_enable();
+	rcu_preempt_deferred_qs(current);
+
 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
 	mask = rdp->grpmask;
-	spin_lock(&rsp->ofl_lock);
+	spin_lock(&rcu_state.ofl_lock);
 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
-	rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
-	rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
+	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
-		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 	}
 	rnp->qsmaskinitnext &= ~mask;
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-	spin_unlock(&rsp->ofl_lock);
-}
-
-/*
- * The outgoing function has no further need of RCU, so remove it from
- * the list of CPUs that RCU must track.
- *
- * Note that this function is special in that it is invoked directly
- * from the outgoing CPU rather than from the cpuhp_step mechanism.
- * This is because this function must be invoked at a precise location.
- */
-void rcu_report_dead(unsigned int cpu)
-{
-	struct rcu_state *rsp;
-
-	/* QS for any half-done expedited RCU-sched GP. */
-	preempt_disable();
-	rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
-	preempt_enable();
-	rcu_preempt_deferred_qs(current);
-	for_each_rcu_flavor(rsp)
-		rcu_cleanup_dying_idle_cpu(cpu, rsp);
+	spin_unlock(&rcu_state.ofl_lock);
 
 	per_cpu(rcu_cpu_started, cpu) = 0;
 }
 
-/* Migrate the dead CPU's callbacks to the current CPU. */
-static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
+/*
+ * The outgoing CPU has just passed through the dying-idle state, and we
+ * are being invoked from the CPU that was IPIed to continue the offline
+ * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
+ */
+void rcutree_migrate_callbacks(int cpu)
 {
 	unsigned long flags;
 	struct rcu_data *my_rdp;
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
+	struct rcu_node *rnp_root = rcu_get_root();
 	bool needwake;
 
 	if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
@@ -3656,33 +3597,20 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
 	}
 	raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
 	/* Leverage recent GPs and set GP for new callbacks. */
-	needwake = rcu_advance_cbs(rsp, rnp_root, rdp) ||
-		   rcu_advance_cbs(rsp, rnp_root, my_rdp);
+	needwake = rcu_advance_cbs(rnp_root, rdp) ||
+		   rcu_advance_cbs(rnp_root, my_rdp);
 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
 	raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
 	if (needwake)
-		rcu_gp_kthread_wake(rsp);
+		rcu_gp_kthread_wake();
 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
 		  !rcu_segcblist_empty(&rdp->cblist),
 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
 		  rcu_segcblist_first_cb(&rdp->cblist));
 }
-
-/*
- * The outgoing CPU has just passed through the dying-idle state,
- * and we are being invoked from the CPU that was IPIed to continue the
- * offline operation.  We need to migrate the outgoing CPU's callbacks.
- */
-void rcutree_migrate_callbacks(int cpu)
-{
-	struct rcu_state *rsp;
-
-	for_each_rcu_flavor(rsp)
-		rcu_migrate_callbacks(cpu, rsp);
-}
 #endif
 
 /*
@@ -3738,9 +3666,9 @@ static int __init rcu_spawn_gp_kthread(void)
 
 	rcu_scheduler_fully_active = 1;
 	for_each_rcu_flavor(rsp) {
-		t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
+		t = kthread_create(rcu_gp_kthread, NULL, "%s", rsp->name);
 		BUG_ON(IS_ERR(t));
-		rnp = rcu_get_root(rsp);
+		rnp = rcu_get_root();
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		rsp->gp_kthread = t;
 		if (kthread_prio) {
@@ -3778,7 +3706,7 @@ void rcu_scheduler_starting(void)
 /*
  * Helper function for rcu_init() that initializes one rcu_state structure.
  */
-static void __init rcu_init_one(struct rcu_state *rsp)
+static void __init rcu_init_one(void)
 {
 	static const char * const buf[] = RCU_NODE_NAME_INIT;
 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
@@ -3790,6 +3718,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
 	int i;
 	int j;
 	struct rcu_node *rnp;
+	struct rcu_state *rsp = &rcu_state;
 
 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
 
@@ -3847,12 +3776,12 @@ static void __init rcu_init_one(struct rcu_state *rsp)
 
 	init_swait_queue_head(&rsp->gp_wq);
 	init_swait_queue_head(&rsp->expedited_wq);
-	rnp = rcu_first_leaf_node(rsp);
+	rnp = rcu_first_leaf_node();
 	for_each_possible_cpu(i) {
 		while (i > rnp->grphi)
 			rnp++;
 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
-		rcu_boot_init_percpu_data(i, rsp);
+		rcu_boot_init_percpu_data(i);
 	}
 	list_add(&rsp->flavors, &rcu_struct_flavors);
 }
@@ -3940,14 +3869,14 @@ static void __init rcu_init_geometry(void)
  * Dump out the structure of the rcu_node combining tree associated
  * with the rcu_state structure referenced by rsp.
  */
-static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
+static void __init rcu_dump_rcu_node_tree(void)
 {
 	int level = 0;
 	struct rcu_node *rnp;
 
 	pr_info("rcu_node tree layout dump\n");
 	pr_info(" ");
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	rcu_for_each_node_breadth_first(rnp) {
 		if (rnp->level != level) {
 			pr_cont("\n");
 			pr_info(" ");
@@ -3969,9 +3898,9 @@ void __init rcu_init(void)
 
 	rcu_bootup_announce();
 	rcu_init_geometry();
-	rcu_init_one(&rcu_state);
+	rcu_init_one();
 	if (dump_tree)
-		rcu_dump_rcu_node_tree(&rcu_state);
+		rcu_dump_rcu_node_tree();
 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
 	/*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index d60304f1ef56..b21d79bdab23 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -61,7 +61,6 @@ struct rcu_dynticks {
 /* Communicate arguments to a workqueue handler. */
 struct rcu_exp_work {
 	smp_call_func_t rew_func;
-	struct rcu_state *rew_rsp;
 	unsigned long rew_s;
 	struct work_struct rew_work;
 };
@@ -452,23 +451,17 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static void rcu_print_detail_task_stall(struct rcu_state *rsp);
+static void rcu_print_detail_task_stall(void);
 static int rcu_print_task_stall(struct rcu_node *rnp);
 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
-static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
-					    struct rcu_node *rnp);
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 static void rcu_flavor_check_callbacks(int user);
 void call_rcu(struct rcu_head *head, rcu_callback_t func);
-static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
-			    int ncheck);
+static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static void invoke_rcu_callbacks_kthread(void);
 static bool rcu_is_callbacks_kthread(void);
-#ifdef CONFIG_RCU_BOOST
-static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
-						 struct rcu_node *rnp);
-#endif /* #ifdef CONFIG_RCU_BOOST */
 static void __init rcu_spawn_boost_kthreads(void);
 static void rcu_prepare_kthreads(int cpu);
 static void rcu_cleanup_after_idle(void);
@@ -478,11 +471,11 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
 static void rcu_preempt_deferred_qs(struct task_struct *t);
 static void print_cpu_stall_info_begin(void);
-static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
+static void print_cpu_stall_info(int cpu);
 static void print_cpu_stall_info_end(void);
 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
 static void increment_cpu_stall_ticks(void);
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
+static bool rcu_nocb_cpu_needs_barrier(int cpu);
 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
 static void rcu_init_one_nocb(struct rcu_node *rnp);
@@ -497,11 +490,11 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
 static void rcu_spawn_all_nocb_kthreads(int cpu);
 static void __init rcu_spawn_nocb_kthreads(void);
 #ifdef CONFIG_RCU_NOCB_CPU
-static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
+static void __init rcu_organize_nocb_kthreads(void);
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_bind_gp_kthread(void);
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
+static bool rcu_nohz_full_cpu(void);
 static void rcu_dynticks_task_enter(void);
 static void rcu_dynticks_task_exit(void);
 
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 298a6904bbcd..060bdb45cd95 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -25,39 +25,39 @@
 /*
  * Record the start of an expedited grace period.
  */
-static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
+static void rcu_exp_gp_seq_start(void)
 {
-	rcu_seq_start(&rsp->expedited_sequence);
+	rcu_seq_start(&rcu_state.expedited_sequence);
 }
 
 /*
  * Return then value that expedited-grace-period counter will have
  * at the end of the current grace period.
  */
-static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp)
+static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
 {
-	return rcu_seq_endval(&rsp->expedited_sequence);
+	return rcu_seq_endval(&rcu_state.expedited_sequence);
 }
 
 /*
  * Record the end of an expedited grace period.
  */
-static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
+static void rcu_exp_gp_seq_end(void)
 {
-	rcu_seq_end(&rsp->expedited_sequence);
+	rcu_seq_end(&rcu_state.expedited_sequence);
 	smp_mb(); /* Ensure that consecutive grace periods serialize. */
 }
 
 /*
  * Take a snapshot of the expedited-grace-period counter.
  */
-static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
+static unsigned long rcu_exp_gp_seq_snap(void)
 {
 	unsigned long s;
 
 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
-	s = rcu_seq_snap(&rsp->expedited_sequence);
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
+	s = rcu_seq_snap(&rcu_state.expedited_sequence);
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
 	return s;
 }
 
@@ -66,9 +66,9 @@ static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
  * if a full expedited grace period has elapsed since that snapshot
  * was taken.
  */
-static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
+static bool rcu_exp_gp_seq_done(unsigned long s)
 {
-	return rcu_seq_done(&rsp->expedited_sequence, s);
+	return rcu_seq_done(&rcu_state.expedited_sequence, s);
 }
 
 /*
@@ -78,26 +78,26 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
  * ever been online.  This means that this function normally takes its
  * no-work-to-do fastpath.
  */
-static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
+static void sync_exp_reset_tree_hotplug(void)
 {
 	bool done;
 	unsigned long flags;
 	unsigned long mask;
 	unsigned long oldmask;
-	int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */
+	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
 	struct rcu_node *rnp;
 	struct rcu_node *rnp_up;
 
 	/* If no new CPUs onlined since last time, nothing to do. */
-	if (likely(ncpus == rsp->ncpus_snap))
+	if (likely(ncpus == rcu_state.ncpus_snap))
 		return;
-	rsp->ncpus_snap = ncpus;
+	rcu_state.ncpus_snap = ncpus;
 
 	/*
 	 * Each pass through the following loop propagates newly onlined
 	 * CPUs for the current rcu_node structure up the rcu_node tree.
 	 */
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -135,13 +135,13 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
  * Reset the ->expmask values in the rcu_node tree in preparation for
  * a new expedited grace period.
  */
-static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
+static void __maybe_unused sync_exp_reset_tree(void)
 {
 	unsigned long flags;
 	struct rcu_node *rnp;
 
-	sync_exp_reset_tree_hotplug(rsp);
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	sync_exp_reset_tree_hotplug();
+	rcu_for_each_node_breadth_first(rnp) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		WARN_ON_ONCE(rnp->expmask);
 		rnp->expmask = rnp->expmaskinit;
@@ -194,7 +194,7 @@ static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
  *
  * Caller must hold the specified rcu_node structure's ->lock.
  */
-static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+static void __rcu_report_exp_rnp(struct rcu_node *rnp,
 				 bool wake, unsigned long flags)
 	__releases(rnp->lock)
 {
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 			if (wake) {
 				smp_mb(); /* EGP done before wake_up(). */
-				swake_up_one(&rsp->expedited_wq);
+				swake_up_one(&rcu_state.expedited_wq);
 			}
 			break;
 		}
@@ -229,20 +229,19 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
  * Report expedited quiescent state for specified node.  This is a
  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
  */
-static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
-					      struct rcu_node *rnp, bool wake)
+static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
 {
 	unsigned long flags;
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
-	__rcu_report_exp_rnp(rsp, rnp, wake, flags);
+	__rcu_report_exp_rnp(rnp, wake, flags);
 }
 
 /*
  * Report expedited quiescent state for multiple CPUs, all covered by the
  * specified leaf rcu_node structure.
  */
-static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
+static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
 				    unsigned long mask, bool wake)
 {
 	unsigned long flags;
@@ -253,23 +252,23 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
 		return;
 	}
 	rnp->expmask &= ~mask;
-	__rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
+	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
 }
 
 /*
  * Report expedited quiescent state for specified rcu_data (CPU).
  */
-static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp)
+static void rcu_report_exp_rdp(struct rcu_data *rdp)
 {
 	WRITE_ONCE(rdp->deferred_qs, false);
-	rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true);
+	rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
 }
 
 /* Common code for work-done checking. */
-static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
+static bool sync_exp_work_done(unsigned long s)
 {
-	if (rcu_exp_gp_seq_done(rsp, s)) {
-		trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
+	if (rcu_exp_gp_seq_done(s)) {
+		trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
 		/* Ensure test happens before caller kfree(). */
 		smp_mb__before_atomic(); /* ^^^ */
 		return true;
@@ -284,28 +283,28 @@ static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
  * with the mutex held, indicating that the caller must actually do the
  * expedited grace period.
  */
-static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
+static bool exp_funnel_lock(unsigned long s)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
 	struct rcu_node *rnp = rdp->mynode;
-	struct rcu_node *rnp_root = rcu_get_root(rsp);
+	struct rcu_node *rnp_root = rcu_get_root();
 
 	/* Low-contention fastpath. */
 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
 	    (rnp == rnp_root ||
 	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
-	    mutex_trylock(&rsp->exp_mutex))
+	    mutex_trylock(&rcu_state.exp_mutex))
 		goto fastpath;
 
 	/*
 	 * Each pass through the following loop works its way up
 	 * the rcu_node tree, returning if others have done the work or
-	 * otherwise falls through to acquire rsp->exp_mutex.  The mapping
+	 * otherwise falls through to acquire ->exp_mutex.  The mapping
 	 * from CPU to rcu_node structure can be inexact, as it is just
 	 * promoting locality and is not strictly needed for correctness.
 	 */
 	for (; rnp != NULL; rnp = rnp->parent) {
-		if (sync_exp_work_done(rsp, s))
+		if (sync_exp_work_done(s))
 			return true;
 
 		/* Work not done, either wait here or go up. */
@@ -314,26 +313,26 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
 
 			/* Someone else doing GP, so wait for them. */
 			spin_unlock(&rnp->exp_lock);
-			trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
 						  rnp->grplo, rnp->grphi,
 						  TPS("wait"));
 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
-				   sync_exp_work_done(rsp, s));
+				   sync_exp_work_done(s));
 			return true;
 		}
 		rnp->exp_seq_rq = s; /* Followers can wait on us. */
 		spin_unlock(&rnp->exp_lock);
-		trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
-					  rnp->grphi, TPS("nxtlvl"));
+		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
+					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
 	}
-	mutex_lock(&rsp->exp_mutex);
+	mutex_lock(&rcu_state.exp_mutex);
 fastpath:
-	if (sync_exp_work_done(rsp, s)) {
-		mutex_unlock(&rsp->exp_mutex);
+	if (sync_exp_work_done(s)) {
+		mutex_unlock(&rcu_state.exp_mutex);
 		return true;
 	}
-	rcu_exp_gp_seq_start(rsp);
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
+	rcu_exp_gp_seq_start();
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
 	return false;
 }
 
@@ -352,7 +351,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 	struct rcu_exp_work *rewp =
 		container_of(wp, struct rcu_exp_work, rew_work);
 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
-	struct rcu_state *rsp = rewp->rew_rsp;
 
 	func = rewp->rew_func;
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -400,7 +398,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 			mask_ofl_test |= mask;
 			continue;
 		}
-		ret = smp_call_function_single(cpu, func, rsp, 0);
+		ret = smp_call_function_single(cpu, func, NULL, 0);
 		if (!ret) {
 			mask_ofl_ipi &= ~mask;
 			continue;
@@ -411,7 +409,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 		    (rnp->expmask & mask)) {
 			/* Online, so delay for a bit and try again. */
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-			trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl"));
+			trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
 			schedule_timeout_uninterruptible(1);
 			goto retry_ipi;
 		}
@@ -423,33 +421,31 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 	/* Report quiescent states for those that went offline. */
 	mask_ofl_test |= mask_ofl_ipi;
 	if (mask_ofl_test)
-		rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
+		rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
 }
 
 /*
  * Select the nodes that the upcoming expedited grace period needs
  * to wait for.
  */
-static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
-				     smp_call_func_t func)
+static void sync_rcu_exp_select_cpus(smp_call_func_t func)
 {
 	int cpu;
 	struct rcu_node *rnp;
 
-	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
-	sync_exp_reset_tree(rsp);
-	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select"));
+	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
+	sync_exp_reset_tree();
+	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
 
 	/* Schedule work for each leaf rcu_node structure. */
-	rcu_for_each_leaf_node(rsp, rnp) {
+	rcu_for_each_leaf_node(rnp) {
 		rnp->exp_need_flush = false;
 		if (!READ_ONCE(rnp->expmask))
 			continue; /* Avoid early boot non-existent wq. */
 		rnp->rew.rew_func = func;
-		rnp->rew.rew_rsp = rsp;
 		if (!READ_ONCE(rcu_par_gp_wq) ||
 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
-		    rcu_is_last_leaf_node(rsp, rnp)) {
+		    rcu_is_last_leaf_node(rnp)) {
 			/* No workqueues yet or last leaf, do direct call. */
 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
 			continue;
@@ -466,12 +462,12 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
 	}
 
 	/* Wait for workqueue jobs (if any) to complete. */
-	rcu_for_each_leaf_node(rsp, rnp)
+	rcu_for_each_leaf_node(rnp)
 		if (rnp->exp_need_flush)
 			flush_work(&rnp->rew.rew_work);
 }
 
-static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
+static void synchronize_sched_expedited_wait(void)
 {
 	int cpu;
 	unsigned long jiffies_stall;
@@ -479,16 +475,16 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 	unsigned long mask;
 	int ndetected;
 	struct rcu_node *rnp;
-	struct rcu_node *rnp_root = rcu_get_root(rsp);
+	struct rcu_node *rnp_root = rcu_get_root();
 	int ret;
 
-	trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait"));
+	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
 	jiffies_stall = rcu_jiffies_till_stall_check();
 	jiffies_start = jiffies;
 
 	for (;;) {
 		ret = swait_event_timeout_exclusive(
-				rsp->expedited_wq,
+				rcu_state.expedited_wq,
 				sync_rcu_preempt_exp_done_unlocked(rnp_root),
 				jiffies_stall);
 		if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
@@ -498,9 +494,9 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 			continue;
 		panic_on_rcu_stall();
 		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
-		       rsp->name);
+		       rcu_state.name);
 		ndetected = 0;
-		rcu_for_each_leaf_node(rsp, rnp) {
+		rcu_for_each_leaf_node(rnp) {
 			ndetected += rcu_print_task_exp_stall(rnp);
 			for_each_leaf_node_possible_cpu(rnp, cpu) {
 				struct rcu_data *rdp;
@@ -517,11 +513,11 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 			}
 		}
 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
-			jiffies - jiffies_start, rsp->expedited_sequence,
+			jiffies - jiffies_start, rcu_state.expedited_sequence,
 			rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
 		if (ndetected) {
 			pr_err("blocking rcu_node structures:");
-			rcu_for_each_node_breadth_first(rsp, rnp) {
+			rcu_for_each_node_breadth_first(rnp) {
 				if (rnp == rnp_root)
 					continue; /* printed unconditionally */
 				if (sync_rcu_preempt_exp_done_unlocked(rnp))
@@ -533,7 +529,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 			}
 			pr_cont("\n");
 		}
-		rcu_for_each_leaf_node(rsp, rnp) {
+		rcu_for_each_leaf_node(rnp) {
 			for_each_leaf_node_possible_cpu(rnp, cpu) {
 				mask = leaf_node_cpu_bit(rnp, cpu);
 				if (!(rnp->expmask & mask))
@@ -551,21 +547,21 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
  * grace period.  Also update all the ->exp_seq_rq counters as needed
  * in order to avoid counter-wrap problems.
  */
-static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
+static void rcu_exp_wait_wake(unsigned long s)
 {
 	struct rcu_node *rnp;
 
-	synchronize_sched_expedited_wait(rsp);
-	rcu_exp_gp_seq_end(rsp);
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+	synchronize_sched_expedited_wait();
+	rcu_exp_gp_seq_end();
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
 
 	/*
 	 * Switch over to wakeup mode, allowing the next GP, but -only- the
 	 * next GP, to proceed.
 	 */
-	mutex_lock(&rsp->exp_wake_mutex);
+	mutex_lock(&rcu_state.exp_wake_mutex);
 
-	rcu_for_each_node_breadth_first(rsp, rnp) {
+	rcu_for_each_node_breadth_first(rnp) {
 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
 			spin_lock(&rnp->exp_lock);
 			/* Recheck, avoid hang in case someone just arrived. */
@@ -574,24 +570,23 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
 			spin_unlock(&rnp->exp_lock);
 		}
 		smp_mb(); /* All above changes before wakeup. */
-		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
+		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
 	}
-	trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
-	mutex_unlock(&rsp->exp_wake_mutex);
+	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
+	mutex_unlock(&rcu_state.exp_wake_mutex);
 }
 
 /*
  * Common code to drive an expedited grace period forward, used by
  * workqueues and mid-boot-time tasks.
  */
-static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
-				  smp_call_func_t func, unsigned long s)
+static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s)
 {
 	/* Initialize the rcu_node tree in preparation for the wait. */
-	sync_rcu_exp_select_cpus(rsp, func);
+	sync_rcu_exp_select_cpus(func);
 
 	/* Wait and clean up, including waking everyone. */
-	rcu_exp_wait_wake(rsp, s);
+	rcu_exp_wait_wake(s);
 }
 
 /*
@@ -602,15 +597,14 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
 	struct rcu_exp_work *rewp;
 
 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
-	rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
+	rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s);
 }
 
 /*
  * Given an rcu_state pointer and a smp_call_function() handler, kick
  * off the specified flavor of expedited grace period.
  */
-static void _synchronize_rcu_expedited(struct rcu_state *rsp,
-				       smp_call_func_t func)
+static void _synchronize_rcu_expedited(smp_call_func_t func)
 {
 	struct rcu_data *rdp;
 	struct rcu_exp_work rew;
@@ -624,18 +618,17 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
 	}
 
 	/* Take a snapshot of the sequence number.  */
-	s = rcu_exp_gp_seq_snap(rsp);
-	if (exp_funnel_lock(rsp, s))
+	s = rcu_exp_gp_seq_snap();
+	if (exp_funnel_lock(s))
 		return;  /* Someone else did our work for us. */
 
 	/* Ensure that load happens before action based on it. */
 	if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
 		/* Direct call during scheduler init and early_initcalls(). */
-		rcu_exp_sel_wait_wake(rsp, func, s);
+		rcu_exp_sel_wait_wake(func, s);
 	} else {
 		/* Marshall arguments & schedule the expedited grace period. */
 		rew.rew_func = func;
-		rew.rew_rsp = rsp;
 		rew.rew_s = s;
 		INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
 		queue_work(rcu_gp_wq, &rew.rew_work);
@@ -643,13 +636,13 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
 
 	/* Wait for expedited grace period to complete. */
 	rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
-	rnp = rcu_get_root(rsp);
+	rnp = rcu_get_root();
 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
-		   sync_exp_work_done(rsp, s));
+		   sync_exp_work_done(s));
 	smp_mb(); /* Workqueue actions happen before return. */
 
 	/* Let the next expedited grace period start. */
-	mutex_unlock(&rsp->exp_mutex);
+	mutex_unlock(&rcu_state.exp_mutex);
 }
 
 #ifdef CONFIG_PREEMPT_RCU
@@ -661,10 +654,9 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
  * report the quiescent state.
  */
-static void sync_rcu_exp_handler(void *info)
+static void sync_rcu_exp_handler(void *unused)
 {
 	unsigned long flags;
-	struct rcu_state *rsp = info;
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 	struct rcu_node *rnp = rdp->mynode;
 	struct task_struct *t = current;
@@ -677,7 +669,7 @@ static void sync_rcu_exp_handler(void *info)
 	if (!t->rcu_read_lock_nesting) {
 		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
 		    rcu_dynticks_curr_cpu_in_eqs()) {
-			rcu_report_exp_rdp(rsp, rdp);
+			rcu_report_exp_rdp(rdp);
 		} else {
 			rdp->deferred_qs = true;
 			resched_cpu(rdp->cpu);
@@ -756,8 +748,6 @@ static void sync_sched_exp_online_cleanup(int cpu)
  */
 void synchronize_rcu_expedited(void)
 {
-	struct rcu_state *rsp = &rcu_state;
-
 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 			 lock_is_held(&rcu_lock_map) ||
 			 lock_is_held(&rcu_sched_lock_map),
@@ -765,7 +755,7 @@ void synchronize_rcu_expedited(void)
 
 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
 		return;
-	_synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
+	_synchronize_rcu_expedited(sync_rcu_exp_handler);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
@@ -783,7 +773,7 @@ static void sync_sched_exp_handler(void *unused)
 	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
 		return;
 	if (rcu_is_cpu_rrupt_from_idle()) {
-		rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
+		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
 		return;
 	}
 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
@@ -798,13 +788,12 @@ static void sync_sched_exp_online_cleanup(int cpu)
 	struct rcu_data *rdp;
 	int ret;
 	struct rcu_node *rnp;
-	struct rcu_state *rsp = &rcu_state;
 
 	rdp = per_cpu_ptr(&rcu_data, cpu);
 	rnp = rdp->mynode;
 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
 		return;
-	ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
+	ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0);
 	WARN_ON_ONCE(ret);
 }
 
@@ -831,8 +820,6 @@ static int rcu_blocking_is_gp(void)
 /* PREEMPT=n implementation of synchronize_rcu_expedited(). */
 void synchronize_rcu_expedited(void)
 {
-	struct rcu_state *rsp = &rcu_state;
-
 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 			 lock_is_held(&rcu_lock_map) ||
 			 lock_is_held(&rcu_sched_lock_map),
@@ -842,7 +829,7 @@ void synchronize_rcu_expedited(void)
 	if (rcu_blocking_is_gp())
 		return;
 
-	_synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
+	_synchronize_rcu_expedited(sync_sched_exp_handler);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 18175ca19f34..b60d3df92ff5 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -123,8 +123,7 @@ static void __init rcu_bootup_announce_oddness(void)
 
 #ifdef CONFIG_PREEMPT_RCU
 
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
-			       bool wake);
+static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
 static void rcu_read_unlock_special(struct task_struct *t);
 
 /*
@@ -281,7 +280,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
 	 * still in a quiescent state in any case.)
 	 */
 	if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
-		rcu_report_exp_rdp(rdp->rsp, rdp);
+		rcu_report_exp_rdp(rdp);
 	else
 		WARN_ON_ONCE(rdp->deferred_qs);
 }
@@ -381,7 +380,7 @@ void rcu_note_context_switch(bool preempt)
 	 */
 	rcu_qs();
 	if (rdp->deferred_qs)
-		rcu_report_exp_rdp(&rcu_state, rdp);
+		rcu_report_exp_rdp(rdp);
 	trace_rcu_utilization(TPS("End context switch"));
 	barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
@@ -509,7 +508,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 	 * blocked-tasks list below.
 	 */
 	if (rdp->deferred_qs) {
-		rcu_report_exp_rdp(&rcu_state, rdp);
+		rcu_report_exp_rdp(rdp);
 		if (!t->rcu_read_unlock_special.s) {
 			local_irq_restore(flags);
 			return;
@@ -566,7 +565,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 							 rnp->grplo,
 							 rnp->grphi,
 							 !!rnp->gp_tasks);
-			rcu_report_unblock_qs_rnp(&rcu_state, rnp, flags);
+			rcu_report_unblock_qs_rnp(rnp, flags);
 		} else {
 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		}
@@ -580,7 +579,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 		 * then we need to report up the rcu_node hierarchy.
 		 */
 		if (!empty_exp && empty_exp_now)
-			rcu_report_exp_rnp(&rcu_state, rnp, true);
+			rcu_report_exp_rnp(rnp, true);
 	} else {
 		local_irq_restore(flags);
 	}
@@ -683,12 +682,12 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
  * Dump detailed information for all tasks blocking the current RCU
  * grace period.
  */
-static void rcu_print_detail_task_stall(struct rcu_state *rsp)
+static void rcu_print_detail_task_stall(void)
 {
-	struct rcu_node *rnp = rcu_get_root(rsp);
+	struct rcu_node *rnp = rcu_get_root();
 
 	rcu_print_detail_task_stall_rnp(rnp);
-	rcu_for_each_leaf_node(rsp, rnp)
+	rcu_for_each_leaf_node(rnp)
 		rcu_print_detail_task_stall_rnp(rnp);
 }
 
@@ -756,14 +755,13 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * Also, if there are blocked tasks on the list, they automatically
  * block the newly created grace period, so set up ->gp_tasks accordingly.
  */
-static void
-rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
 	struct task_struct *t;
 
 	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
 	if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
-		dump_blkd_tasks(rsp, rnp, 10);
+		dump_blkd_tasks(rnp, 10);
 	if (rcu_preempt_has_tasks(rnp) &&
 	    (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
 		rnp->gp_tasks = rnp->blkd_tasks.next;
@@ -884,7 +882,7 @@ void exit_rcu(void)
  * specified number of elements.
  */
 static void
-dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
 {
 	int cpu;
 	int i;
@@ -948,7 +946,7 @@ static void rcu_qs(void)
 	if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
 		return;
 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, false);
-	rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
+	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
 }
 
 /*
@@ -1005,7 +1003,7 @@ static void rcu_preempt_deferred_qs(struct task_struct *t) { }
  * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections.
  */
-static void rcu_print_detail_task_stall(struct rcu_state *rsp)
+static void rcu_print_detail_task_stall(void)
 {
 }
 
@@ -1033,8 +1031,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * so there is no need to check for blocked tasks.  So check only for
  * bogus qsmask values.
  */
-static void
-rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
 	WARN_ON_ONCE(rnp->qsmask);
 }
@@ -1095,7 +1092,7 @@ void exit_rcu(void)
  * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
  */
 static void
-dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
 {
 	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
 }
@@ -1292,21 +1289,20 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  * already exist.  We only create this kthread for preemptible RCU.
  * Returns zero if all is well, a negated errno otherwise.
  */
-static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
-				       struct rcu_node *rnp)
+static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
 {
-	int rnp_index = rnp - &rsp->node[0];
+	int rnp_index = rnp - rcu_get_root();
 	unsigned long flags;
 	struct sched_param sp;
 	struct task_struct *t;
 
-	if (&rcu_state != rsp)
+	if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
 		return 0;
 
 	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
 		return 0;
 
-	rsp->boost = 1;
+	rcu_state.boost = 1;
 	if (rnp->boost_kthread_task != NULL)
 		return 0;
 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1324,7 +1320,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 
 static void rcu_kthread_do_work(void)
 {
-	rcu_do_batch(&rcu_state, this_cpu_ptr(&rcu_data));
+	rcu_do_batch(this_cpu_ptr(&rcu_data));
 }
 
 static void rcu_cpu_kthread_setup(unsigned int cpu)
@@ -1431,8 +1427,8 @@ static void __init rcu_spawn_boost_kthreads(void)
 	for_each_possible_cpu(cpu)
 		per_cpu(rcu_cpu_has_work, cpu) = 0;
 	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
-	rcu_for_each_leaf_node(&rcu_state, rnp)
-		(void)rcu_spawn_one_boost_kthread(&rcu_state, rnp);
+	rcu_for_each_leaf_node(rnp)
+		(void)rcu_spawn_one_boost_kthread(rnp);
 }
 
 static void rcu_prepare_kthreads(int cpu)
@@ -1442,7 +1438,7 @@ static void rcu_prepare_kthreads(int cpu)
 
 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
 	if (rcu_scheduler_fully_active)
-		(void)rcu_spawn_one_boost_kthread(&rcu_state, rnp);
+		(void)rcu_spawn_one_boost_kthread(rnp);
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */
@@ -1586,7 +1582,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
 					  rcu_seq_current(&rnp->gp_seq)) ||
 		     unlikely(READ_ONCE(rdp->gpwrap))) &&
 		    rcu_segcblist_pend_cbs(&rdp->cblist))
-			note_gp_changes(rsp, rdp);
+			note_gp_changes(rdp);
 
 		if (rcu_segcblist_ready_cbs(&rdp->cblist))
 			cbs_ready = true;
@@ -1697,10 +1693,10 @@ static void rcu_prepare_for_idle(void)
 			continue;
 		rnp = rdp->mynode;
 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
-		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+		needwake = rcu_accelerate_cbs(rnp, rdp);
 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
 		if (needwake)
-			rcu_gp_kthread_wake(rsp);
+			rcu_gp_kthread_wake();
 	}
 }
 
@@ -1774,7 +1770,7 @@ static void print_cpu_stall_info_begin(void)
  *
  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
  */
-static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
+static void print_cpu_stall_info(int cpu)
 {
 	unsigned long delta;
 	char fast_no_hz[72];
@@ -1789,7 +1785,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
 	 */
 	touch_nmi_watchdog();
 
-	ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
+	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
 	if (ticks_value) {
 		ticks_title = "GPs behind";
 	} else {
@@ -1810,7 +1806,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
 	       rcu_dynticks_snap(rdtp) & 0xfff,
 	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
-	       READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
+	       READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
 	       fast_no_hz);
 }
 
@@ -1963,7 +1959,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
  * Does the specified CPU need an RCU callback for the specified flavor
  * of rcu_barrier()?
  */
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+static bool rcu_nocb_cpu_needs_barrier(int cpu)
 {
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 	unsigned long ret;
@@ -2147,7 +2143,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
 		needwake = rcu_start_this_gp(rnp, rdp, c);
 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		if (needwake)
-			rcu_gp_kthread_wake(rdp->rsp);
+			rcu_gp_kthread_wake();
 	}
 
 	/*
@@ -2427,7 +2423,7 @@ void __init rcu_init_nohz(void)
 	for_each_rcu_flavor(rsp) {
 		for_each_cpu(cpu, rcu_nocb_mask)
 			init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
-		rcu_organize_nocb_kthreads(rsp);
+		rcu_organize_nocb_kthreads();
 	}
 }
 
@@ -2447,7 +2443,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
  * brought online out of order, this can require re-organizing the
  * leader-follower relationships.
  */
-static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
+static void rcu_spawn_one_nocb_kthread(int cpu)
 {
 	struct rcu_data *rdp;
 	struct rcu_data *rdp_last;
@@ -2484,7 +2480,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
 
 	/* Spawn the kthread for this CPU and RCU flavor. */
 	t = kthread_run(rcu_nocb_kthread, rdp_spawn,
-			"rcuo%c/%d", rsp->abbr, cpu);
+			"rcuo%c/%d", rcu_state.abbr, cpu);
 	BUG_ON(IS_ERR(t));
 	WRITE_ONCE(rdp_spawn->nocb_kthread, t);
 }
@@ -2499,7 +2495,7 @@ static void rcu_spawn_all_nocb_kthreads(int cpu)
 
 	if (rcu_scheduler_fully_active)
 		for_each_rcu_flavor(rsp)
-			rcu_spawn_one_nocb_kthread(rsp, cpu);
+			rcu_spawn_one_nocb_kthread(cpu);
 }
 
 /*
@@ -2523,7 +2519,7 @@ module_param(rcu_nocb_leader_stride, int, 0444);
 /*
  * Initialize leader-follower relationships for all no-CBs CPU.
  */
-static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
+static void __init rcu_organize_nocb_kthreads(void)
 {
 	int cpu;
 	int ls = rcu_nocb_leader_stride;
@@ -2582,7 +2578,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
 
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
 
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+static bool rcu_nocb_cpu_needs_barrier(int cpu)
 {
 	WARN_ON_ONCE(1); /* Should be dead code. */
 	return false;
@@ -2651,12 +2647,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
  * This code relies on the fact that all NO_HZ_FULL CPUs are also
  * CONFIG_RCU_NOCB_CPU CPUs.
  */
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
+static bool rcu_nohz_full_cpu(void)
 {
 #ifdef CONFIG_NO_HZ_FULL
 	if (tick_nohz_full_cpu(smp_processor_id()) &&
-	    (!rcu_gp_in_progress(rsp) ||
-	     ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
+	    (!rcu_gp_in_progress() ||
+	     ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
 		return true;
 #endif /* #ifdef CONFIG_NO_HZ_FULL */
 	return false;


^ permalink raw reply related	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-30  4:10     ` Paul E. McKenney
@ 2018-08-30  4:20       ` Josh Triplett
  2018-08-30 15:42         ` Steven Rostedt
  2018-08-30 15:44       ` Steven Rostedt
  1 sibling, 1 reply; 62+ messages in thread
From: Josh Triplett @ 2018-08-30  4:20 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: Steven Rostedt, linux-kernel, mingo, jiangshanlai, dipankar,
	akpm, mathieu.desnoyers, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Wed, Aug 29, 2018 at 09:10:17PM -0700, Paul E. McKenney wrote:
> On Wed, Aug 29, 2018 at 08:22:16PM -0700, Paul E. McKenney wrote:
> > On Wed, Aug 29, 2018 at 10:00:26PM -0400, Steven Rostedt wrote:
> > > On Wed, 29 Aug 2018 15:38:30 -0700
> > > "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:
> > > 
> > > > Hello!
> > > > 
> > > > This commit does RCU-consolidation cleanups that get rid of pointers to
> > > > the sole remaining rcu_state structure:
> > > > 
> > > > 1-40:	Remove the "rsp" parameter from numerous functions, given that
> > > > 	the corresponding argument will always be &rcu_state.
> > > 
> > > Hmm, couldn't 1-40 have been made into a single patch?
> > 
> > They could.  I separated them to make finding the inevitable typos easier.
> > But at this point, it is easy enough to squash them together, though.
> 
> And please see below for what the resulting diff would look like.  Is
> this an improvement?

Honestly, as long as the result after each commit compiles, I prefer the
split version for ease of review.

^ permalink raw reply	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-30  4:20       ` Josh Triplett
@ 2018-08-30 15:42         ` Steven Rostedt
  0 siblings, 0 replies; 62+ messages in thread
From: Steven Rostedt @ 2018-08-30 15:42 UTC (permalink / raw)
  To: Josh Triplett
  Cc: Paul E. McKenney, linux-kernel, mingo, jiangshanlai, dipankar,
	akpm, mathieu.desnoyers, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Wed, 29 Aug 2018 21:20:07 -0700
Josh Triplett <josh@joshtriplett.org> wrote:


> > And please see below for what the resulting diff would look like.  Is
> > this an improvement?  
> 
> Honestly, as long as the result after each commit compiles, I prefer the
> split version for ease of review.

You and I have different preferences for reviewing changes like this ;-)

I prefer the one patch (I do think it is an improvement). It's all
basically the exact same change. Looking at 40 different patches is
much more work IMHO, then just looking at a single patch, and testing
it, then testing 40 different patches. That's a lot of compiling.

I usually stop reviewing after 10 patches of the same kind, as I run
out of time to review them.

-- Steve

^ permalink raw reply	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-30  4:10     ` Paul E. McKenney
  2018-08-30  4:20       ` Josh Triplett
@ 2018-08-30 15:44       ` Steven Rostedt
  2018-08-30 17:10         ` Paul E. McKenney
  1 sibling, 1 reply; 62+ messages in thread
From: Steven Rostedt @ 2018-08-30 15:44 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: linux-kernel, mingo, jiangshanlai, dipankar, akpm,
	mathieu.desnoyers, josh, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Wed, 29 Aug 2018 21:10:17 -0700
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:

> On Wed, Aug 29, 2018 at 08:22:16PM -0700, Paul E. McKenney wrote:
> > On Wed, Aug 29, 2018 at 10:00:26PM -0400, Steven Rostedt wrote:  
> > > On Wed, 29 Aug 2018 15:38:30 -0700
> > > "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:
> > >   
> > > > Hello!
> > > > 
> > > > This commit does RCU-consolidation cleanups that get rid of pointers to
> > > > the sole remaining rcu_state structure:
> > > > 
> > > > 1-40:	Remove the "rsp" parameter from numerous functions, given that
> > > > 	the corresponding argument will always be &rcu_state.  
> > > 
> > > Hmm, couldn't 1-40 have been made into a single patch?  
> > 
> > They could.  I separated them to make finding the inevitable typos easier.
> > But at this point, it is easy enough to squash them together, though.  
> 
> And please see below for what the resulting diff would look like.  Is
> this an improvement?

Somewhat...

> 
> 							Thanx, Paul
> 
> ------------------------------------------------------------------------
> 
> diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
> index f5120a00f511..772c26a3865a 100644
> --- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
> +++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html

I would just keep the documentation patches separate.

-- Steve


> @@ -1372,8 +1372,7 @@ that is, if the CPU is currently idle.
>  Accessor Functions</a></h3>
>  
>  <p>The following listing shows the
> -<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
> -<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
> +<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt> and
>  <tt>rcu_for_each_leaf_node()</tt> function and macros:
>  
>  

^ permalink raw reply	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-30 15:44       ` Steven Rostedt
@ 2018-08-30 17:10         ` Paul E. McKenney
  2018-08-30 17:40           ` Steven Rostedt
  0 siblings, 1 reply; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-30 17:10 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, mingo, jiangshanlai, dipankar, akpm,
	mathieu.desnoyers, josh, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Thu, Aug 30, 2018 at 11:44:52AM -0400, Steven Rostedt wrote:
> On Wed, 29 Aug 2018 21:10:17 -0700
> "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:
> 
> > On Wed, Aug 29, 2018 at 08:22:16PM -0700, Paul E. McKenney wrote:
> > > On Wed, Aug 29, 2018 at 10:00:26PM -0400, Steven Rostedt wrote:  
> > > > On Wed, 29 Aug 2018 15:38:30 -0700
> > > > "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:
> > > >   
> > > > > Hello!
> > > > > 
> > > > > This commit does RCU-consolidation cleanups that get rid of pointers to
> > > > > the sole remaining rcu_state structure:
> > > > > 
> > > > > 1-40:	Remove the "rsp" parameter from numerous functions, given that
> > > > > 	the corresponding argument will always be &rcu_state.  
> > > > 
> > > > Hmm, couldn't 1-40 have been made into a single patch?  
> > > 
> > > They could.  I separated them to make finding the inevitable typos easier.
> > > But at this point, it is easy enough to squash them together, though.  
> > 
> > And please see below for what the resulting diff would look like.  Is
> > this an improvement?
> 
> Somewhat...
> 
> > ------------------------------------------------------------------------
> > 
> > diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
> > index f5120a00f511..772c26a3865a 100644
> > --- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
> > +++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
> 
> I would just keep the documentation patches separate.

Combining yours and Josh's feedback, I will split the documentation
updates out of 4ca0508f7870 ("rcu: Remove rsp parameter from rcu_node tree
accessor macros") and put them into the "doc" series.  Possibly combining
it with 2ea66b6fce61 ("doc: Update documentation for removal of RCU-bh
update machinery") and/or 3f38a4626202 ("doc: Update documentation for
removal of RCU-sched update machinery").  Left to myself, I would merge
it in to the merger of those two documentation commits.  Having them
together is probably more convenient anyway, as it would provide a
summary of the entire RCU-flavor-consolidation change.

Fair enough?

							Thanx, Paul

> -- Steve
> 
> 
> > @@ -1372,8 +1372,7 @@ that is, if the CPU is currently idle.
> >  Accessor Functions</a></h3>
> >  
> >  <p>The following listing shows the
> > -<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
> > -<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
> > +<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt> and
> >  <tt>rcu_for_each_leaf_node()</tt> function and macros:
> >  
> >  
> 


^ permalink raw reply	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-30 17:10         ` Paul E. McKenney
@ 2018-08-30 17:40           ` Steven Rostedt
  2018-08-30 18:26             ` Paul E. McKenney
  0 siblings, 1 reply; 62+ messages in thread
From: Steven Rostedt @ 2018-08-30 17:40 UTC (permalink / raw)
  To: Paul E. McKenney
  Cc: linux-kernel, mingo, jiangshanlai, dipankar, akpm,
	mathieu.desnoyers, josh, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Thu, 30 Aug 2018 10:10:06 -0700
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:


> > I would just keep the documentation patches separate.  
> 
> Combining yours and Josh's feedback, I will split the documentation
> updates out of 4ca0508f7870 ("rcu: Remove rsp parameter from rcu_node tree
> accessor macros") and put them into the "doc" series.  Possibly combining
> it with 2ea66b6fce61 ("doc: Update documentation for removal of RCU-bh
> update machinery") and/or 3f38a4626202 ("doc: Update documentation for
> removal of RCU-sched update machinery").  Left to myself, I would merge
> it in to the merger of those two documentation commits.  Having them
> together is probably more convenient anyway, as it would provide a
> summary of the entire RCU-flavor-consolidation change.
> 
> Fair enough?
>

Go for it ;-)

-- Steve

^ permalink raw reply	[flat|nested] 62+ messages in thread

* Re: [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0
  2018-08-30 17:40           ` Steven Rostedt
@ 2018-08-30 18:26             ` Paul E. McKenney
  0 siblings, 0 replies; 62+ messages in thread
From: Paul E. McKenney @ 2018-08-30 18:26 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, mingo, jiangshanlai, dipankar, akpm,
	mathieu.desnoyers, josh, tglx, peterz, dhowells, edumazet,
	fweisbec, oleg, joel

On Thu, Aug 30, 2018 at 01:40:12PM -0400, Steven Rostedt wrote:
> On Thu, 30 Aug 2018 10:10:06 -0700
> "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> wrote:
> 
> 
> > > I would just keep the documentation patches separate.  
> > 
> > Combining yours and Josh's feedback, I will split the documentation
> > updates out of 4ca0508f7870 ("rcu: Remove rsp parameter from rcu_node tree
> > accessor macros") and put them into the "doc" series.  Possibly combining
> > it with 2ea66b6fce61 ("doc: Update documentation for removal of RCU-bh
> > update machinery") and/or 3f38a4626202 ("doc: Update documentation for
> > removal of RCU-sched update machinery").  Left to myself, I would merge
> > it in to the merger of those two documentation commits.  Having them
> > together is probably more convenient anyway, as it would provide a
> > summary of the entire RCU-flavor-consolidation change.
> > 
> > Fair enough?
> 
> Go for it ;-)

Done!  ;-)

							Thanx, Paul


^ permalink raw reply	[flat|nested] 62+ messages in thread

end of thread, other threads:[~2018-08-30 18:26 UTC | newest]

Thread overview: 62+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-29 22:38 [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 01/52] rcu: Remove rsp parameter from rcu_report_qs_rnp() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 02/52] rcu: Remove rsp parameter from rcu_report_qs_rsp() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 03/52] rcu: Remove rsp parameter from rcu_report_unblock_qs_rnp() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 04/52] rcu: Remove rsp parameter from rcu_report_qs_rdp() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 05/52] rcu: Remove rsp parameter from rcu_gp_in_progress() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 06/52] rcu: Remove rsp parameter from rcu_get_root() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 07/52] rcu: Remove rsp parameter from record_gp_stall_check_time() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 08/52] rcu: Remove rsp parameter from rcu_check_gp_kthread_starvation() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 09/52] rcu: Remove rsp parameter from rcu_dump_cpu_stacks() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 10/52] rcu: Remove rsp parameter from rcu_stall_kick_kthreads() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 11/52] rcu: Remove rsp parameter from print_other_cpu_stall() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 12/52] rcu: Remove rsp parameter from print_cpu_stall() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 13/52] rcu: Remove rsp parameter from check_cpu_stall() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 14/52] rcu: Remove rsp parameter from rcu_future_gp_cleanup() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 15/52] rcu: Remove rsp parameter from rcu_gp_kthread_wake() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 16/52] rcu: Remove rsp parameter from rcu_accelerate_cbs() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 17/52] rcu: Remove rsp parameter from rcu_accelerate_cbs_unlocked() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 18/52] rcu: Remove rsp parameter from rcu_advance_cbs() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 19/52] rcu: Remove rsp parameter from __note_gp_changes() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 20/52] rcu: Remove rsp parameter from note_gp_changes() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 21/52] rcu: Remove rsp parameter from rcu_gp_slow() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 22/52] rcu: Remove rsp parameter from rcu_gp_kthread() and friends Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 23/52] rcu: Remove rsp parameter from rcu_check_quiescent_state() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 24/52] rcu: Remove rsp parameter from CPU hotplug functions Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 25/52] rcu: Remove rsp parameter from rcu_do_batch() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 26/52] rcu: Remove rsp parameter from force-quiescent-state functions Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 27/52] rcu: Remove rsp parameter from rcu_check_gp_start_stall() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 28/52] rcu: Remove rsp parameter from __rcu_process_callbacks() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 29/52] rcu: Remove rsp parameter from __call_rcu() and friend Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 30/52] rcu: Remove rsp parameter from __rcu_pending() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 31/52] rcu: Remove rsp parameter from _rcu_barrier() and friends Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 32/52] rcu: Remove rsp parameter from rcu_boot_init_percpu_data() " Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 33/52] rcu: Remove rsp parameter from rcu_init_one() " Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 34/52] rcu: Remove rsp parameter from rcu_print_detail_task_stall() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 35/52] rcu: Remove rsp parameter from dump_blkd_tasks() and friend Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 36/52] rcu: Remove rsp parameter from rcu_spawn_one_boost_kthread() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 37/52] rcu: Remove rsp parameter from print_cpu_stall_info() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 38/52] rcu: Remove rsp parameter from no-CBs CPU functions Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 39/52] rcu: Remove rsp parameter from expedited grace-period functions Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 40/52] rcu: Remove rsp parameter from rcu_node tree accessor macros Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 41/52] rcu: Remove rcu_data structure's ->rsp field Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 42/52] rcu: Remove last non-flavor-traversal rsp local variable from tree_plugin.h Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 43/52] rcu: Remove for_each_rcu_flavor() flavor-traversal macro Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 44/52] rcu: Simplify rcutorture_get_gp_data() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 45/52] rcu: Restructure rcu_check_gp_kthread_starvation() Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 46/52] rcu: Eliminate stall-warning use of rsp Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 47/52] rcu: Eliminate grace-period management code " Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 48/52] rcu: Eliminate callback-invocation/invocation " Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 49/52] rcu: Eliminate quiescent-state and grace-period-nonstart " Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 50/52] rcu: Eliminate RCU-barrier " Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 51/52] rcu: Eliminate initialization-time " Paul E. McKenney
2018-08-29 22:38 ` [PATCH tip/core/rcu 52/52] rcu: Fix typo in force_qs_rnp()'s parameter's parameter Paul E. McKenney
2018-08-30  2:00 ` [PATCH tip/core/rcu 0/52] Remove rcu_state pointers for v4.20/v5.0 Steven Rostedt
2018-08-30  3:22   ` Paul E. McKenney
2018-08-30  4:10     ` Paul E. McKenney
2018-08-30  4:20       ` Josh Triplett
2018-08-30 15:42         ` Steven Rostedt
2018-08-30 15:44       ` Steven Rostedt
2018-08-30 17:10         ` Paul E. McKenney
2018-08-30 17:40           ` Steven Rostedt
2018-08-30 18:26             ` Paul E. McKenney

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).