linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <frederic@kernel.org>
To: "Paul E . McKenney" <paulmck@kernel.org>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Frederic Weisbecker <frederic@kernel.org>,
	Steven Rostedt <rostedt@goodmis.org>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	Lai Jiangshan <jiangshanlai@gmail.com>,
	Joel Fernandes <joel@joelfernandes.org>,
	Josh Triplett <josh@joshtriplett.org>
Subject: [PATCH 02/10] rcu: Use direct rdp->nocb_lock operations on local calls
Date: Wed, 13 May 2020 18:47:06 +0200	[thread overview]
Message-ID: <20200513164714.22557-3-frederic@kernel.org> (raw)
In-Reply-To: <20200513164714.22557-1-frederic@kernel.org>

Unconditionally lock rdp->nocb_lock on nocb code that is called after
we verified that the rdp is offloaded:

This clarify the locking rules and expectations.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
---
 kernel/rcu/tree_plugin.h | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 523570469864..1d22b16c03e0 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1628,11 +1628,11 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
 	if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 				    TPS("AlreadyAwake"));
-		rcu_nocb_unlock_irqrestore(rdp, flags);
+		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 		return;
 	}
 	del_timer(&rdp->nocb_timer);
-	rcu_nocb_unlock_irqrestore(rdp, flags);
+	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
 	if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
 		WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
@@ -1753,7 +1753,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 
 	// Don't use ->nocb_bypass during early boot.
 	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
-		rcu_nocb_lock(rdp);
+		raw_spin_lock(&rdp->nocb_lock);
 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
 		return false;
@@ -1778,7 +1778,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 	// this jiffy, tell the caller to enqueue onto ->cblist.  But flush
 	// ->nocb_bypass first.
 	if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
-		rcu_nocb_lock(rdp);
+		raw_spin_lock(&rdp->nocb_lock);
 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
 		if (*was_alldone)
 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
@@ -1792,7 +1792,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 	// flush ->nocb_bypass to ->cblist.
 	if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
 	    ncbs >= qhimark) {
-		rcu_nocb_lock(rdp);
+		raw_spin_lock(&rdp->nocb_lock);
 		if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
 			*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
 			if (*was_alldone)
@@ -1807,7 +1807,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
 			rdp->nocb_gp_adv_time = j;
 		}
-		rcu_nocb_unlock_irqrestore(rdp, flags);
+		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 		return true; // Callback already enqueued.
 	}
 
@@ -1827,7 +1827,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 		local_irq_restore(flags);
 	} else {
 		// No-CBs GP kthread might be indefinitely asleep, if so, wake.
-		rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
+		raw_spin_lock(&rdp->nocb_lock); // Rare during call_rcu() flood.
 		if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 					    TPS("FirstBQwake"));
@@ -1835,7 +1835,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 		} else {
 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 					    TPS("FirstBQnoWake"));
-			rcu_nocb_unlock_irqrestore(rdp, flags);
+			raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 		}
 	}
 	return true; // Callback already enqueued.
@@ -1861,7 +1861,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
 	if (rcu_nocb_poll || !t) {
 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 				    TPS("WakeNotPoll"));
-		rcu_nocb_unlock_irqrestore(rdp, flags);
+		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 		return;
 	}
 	// Need to actually to a wakeup.
@@ -1876,7 +1876,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
 		} else {
 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
 					   TPS("WakeEmptyIsDeferred"));
-			rcu_nocb_unlock_irqrestore(rdp, flags);
+			raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 		}
 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
 		/* ... or if many callbacks queued. */
@@ -1894,10 +1894,10 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
 		    !timer_pending(&rdp->nocb_bypass_timer))
 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
 					   TPS("WakeOvfIsDeferred"));
-		rcu_nocb_unlock_irqrestore(rdp, flags);
+		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 	} else {
 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
-		rcu_nocb_unlock_irqrestore(rdp, flags);
+		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
 	}
 	return;
 }
-- 
2.25.0


  parent reply	other threads:[~2020-05-13 16:47 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-13 16:47 [PATCH 00/10] rcu: Allow a CPU to leave and reenter NOCB state Frederic Weisbecker
2020-05-13 16:47 ` [PATCH 01/10] rcu: Directly lock rdp->nocb_lock on nocb code entrypoints Frederic Weisbecker
2020-05-20 12:29   ` Joel Fernandes
2020-05-22 17:57     ` Paul E. McKenney
2020-05-26 15:21       ` Joel Fernandes
2020-05-26 16:29         ` Paul E. McKenney
2020-05-26 20:18           ` Joel Fernandes
2020-05-26 21:09             ` Paul E. McKenney
2020-05-26 21:27               ` Joel Fernandes
2020-05-26 22:29                 ` Paul E. McKenney
2020-05-27  0:45                   ` Joel Fernandes
2020-05-27  0:58                     ` Paul E. McKenney
2020-06-04 11:41       ` Frederic Weisbecker
2020-06-04 16:36         ` Paul E. McKenney
2020-06-08 12:57           ` Frederic Weisbecker
2020-06-09 18:02             ` Paul E. McKenney
2020-06-10 13:12               ` Frederic Weisbecker
2020-06-10 14:02                 ` Paul E. McKenney
2020-06-10 22:12                   ` Frederic Weisbecker
2020-06-10 23:21                     ` Paul E. McKenney
2020-06-11  1:32                       ` Joel Fernandes
2020-05-13 16:47 ` Frederic Weisbecker [this message]
2020-05-13 16:47 ` [PATCH 03/10] rcu: Make locking explicit in do_nocb_deferred_wakeup_common() Frederic Weisbecker
2020-05-26 19:54   ` Joel Fernandes
2020-05-26 19:59   ` Joel Fernandes
2020-05-13 16:47 ` [PATCH 04/10] rcu: Implement rcu_segcblist_is_offloaded() config dependent Frederic Weisbecker
2020-05-13 18:20   ` Paul E. McKenney
2020-05-13 23:03     ` Frederic Weisbecker
2020-05-14 15:47       ` Paul E. McKenney
2020-05-13 16:47 ` [PATCH 05/10] rcu: Remove useless conditional nocb unlock Frederic Weisbecker
2020-05-13 16:47 ` [PATCH 06/10] rcu: Make nocb_cb kthread parkable Frederic Weisbecker
2020-06-11  1:34   ` Joel Fernandes
2020-05-13 16:47 ` [PATCH 07/10] rcu: Temporarily assume that nohz full CPUs might not be NOCB Frederic Weisbecker
2020-05-13 18:25   ` Paul E. McKenney
2020-05-13 23:08     ` Frederic Weisbecker
2020-05-14 15:50       ` Paul E. McKenney
2020-05-14 22:49         ` Frederic Weisbecker
2020-05-13 16:47 ` [PATCH 08/10] rcu: Allow to deactivate nocb on a CPU Frederic Weisbecker
2020-05-13 18:38   ` Paul E. McKenney
2020-05-13 22:45     ` Frederic Weisbecker
2020-05-14 15:47       ` Paul E. McKenney
2020-05-14 22:30         ` Frederic Weisbecker
2020-05-14 22:47           ` Paul E. McKenney
2020-05-14 22:55             ` Frederic Weisbecker
2020-05-26 21:20   ` Joel Fernandes
2020-05-26 22:49     ` Joel Fernandes
2020-06-04 13:10       ` Frederic Weisbecker
2020-06-11  1:32         ` Joel Fernandes
2020-06-11 17:03           ` Paul E. McKenney
2020-06-04 13:14     ` Frederic Weisbecker
2020-05-13 16:47 ` [PATCH 09/10] rcu: Allow to re-offload a CPU that used to be nocb Frederic Weisbecker
2020-05-13 18:41   ` Paul E. McKenney
2020-05-13 16:47 ` [PATCH 10/10] rcu: Nocb (de)activate through sysfs Frederic Weisbecker
2020-05-13 18:42   ` Paul E. McKenney
2020-05-13 23:23     ` Frederic Weisbecker
2020-05-14 15:51       ` Paul E. McKenney
2020-05-13 18:15 ` [PATCH 00/10] rcu: Allow a CPU to leave and reenter NOCB state Paul E. McKenney

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200513164714.22557-3-frederic@kernel.org \
    --to=frederic@kernel.org \
    --cc=jiangshanlai@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=josh@joshtriplett.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=paulmck@kernel.org \
    --cc=rostedt@goodmis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).