rcu.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <frederic@kernel.org>
To: "Paul E . McKenney" <paulmck@kernel.org>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Frederic Weisbecker <frederic@kernel.org>,
	rcu <rcu@vger.kernel.org>, Uladzislau Rezki <urezki@gmail.com>,
	Neeraj Upadhyay <quic_neeraju@quicinc.com>,
	Joel Fernandes <joel@joelfernandes.org>,
	Giovanni Gherdovich <ggherdovich@suse.cz>
Subject: [PATCH 6/9] rcu/nocb: Rename was_alldone to was_pending
Date: Wed, 31 May 2023 12:17:33 +0200	[thread overview]
Message-ID: <20230531101736.12981-7-frederic@kernel.org> (raw)
In-Reply-To: <20230531101736.12981-1-frederic@kernel.org>

Upon enqueuing on an offloaded rdp, RCU checks if the queue was
previsouly only made of done callbacks and relies on that information
in order to determine if the rcuog kthread needs to be woken up.

In order to prepare for moving the lazy callbacks from the bypass queue
to the main queue, track instead if there are pending callbacks. For
now the meaning of "having pending callbacks" is just the reverse of
"having only done callbacks". However lazy callbacks will be ignored
from the pending queue in a further patch.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
---
 kernel/rcu/tree.c      | 14 +++++++-------
 kernel/rcu/tree.h      |  2 +-
 kernel/rcu/tree_nocb.h | 28 ++++++++++++++--------------
 3 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e33c0d889216..d71b9915c91e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2633,7 +2633,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
 	unsigned long flags;
 	bool lazy;
 	struct rcu_data *rdp;
-	bool was_alldone;
+	bool was_pending;
 
 	/* Misaligned rcu_head! */
 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
@@ -2670,7 +2670,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
 	}
 
 	check_cb_ovld(rdp);
-	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
+	if (rcu_nocb_try_bypass(rdp, head, &was_pending, flags, lazy))
 		return; // Enqueued onto ->nocb_bypass, so just leave.
 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
 	rcu_segcblist_enqueue(&rdp->cblist, head);
@@ -2686,7 +2686,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
 
 	/* Go handle any RCU core processing required. */
 	if (unlikely(rcu_rdp_is_offloaded(rdp))) {
-		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
+		__call_rcu_nocb_wake(rdp, was_pending, flags); /* unlocks */
 	} else {
 		__call_rcu_core(rdp, head, flags);
 		local_irq_restore(flags);
@@ -3936,8 +3936,8 @@ static void rcu_barrier_entrain(struct rcu_data *rdp)
 {
 	unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
 	unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
+	bool nocb_no_pending = false;
 	bool wake_nocb = false;
-	bool was_alldone = false;
 
 	lockdep_assert_held(&rcu_state.barrier_lock);
 	if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
@@ -3951,9 +3951,9 @@ static void rcu_barrier_entrain(struct rcu_data *rdp)
 	 * queue. This way we don't wait for bypass timer that can reach seconds
 	 * if it's fully lazy.
 	 */
-	was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
+	nocb_no_pending = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
-	wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
+	wake_nocb = nocb_no_pending && rcu_segcblist_pend_cbs(&rdp->cblist);
 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
 		atomic_inc(&rcu_state.barrier_cpu_count);
 	} else {
@@ -4549,7 +4549,7 @@ void rcutree_migrate_callbacks(int cpu)
 	check_cb_ovld_locked(my_rdp, my_rnp);
 	if (rcu_rdp_is_offloaded(my_rdp)) {
 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
-		__call_rcu_nocb_wake(my_rdp, true, flags);
+		__call_rcu_nocb_wake(my_rdp, false, flags);
 	} else {
 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 192536916f9a..966abe037f57 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -464,7 +464,7 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 				  unsigned long j, bool lazy);
 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-				bool *was_alldone, unsigned long flags,
+				bool *was_pending, unsigned long flags,
 				bool lazy);
 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
 				 unsigned long flags);
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index c08447db5a2e..d8b17c69110a 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -413,7 +413,7 @@ static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
  * there is only one CPU in operation.
  */
 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-				bool *was_alldone, unsigned long flags,
+				bool *was_pending, unsigned long flags,
 				bool lazy)
 {
 	unsigned long c;
@@ -427,7 +427,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 	// Pure softirq/rcuc based processing: no bypassing, no
 	// locking.
 	if (!rcu_rdp_is_offloaded(rdp)) {
-		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+		*was_pending = rcu_segcblist_pend_cbs(&rdp->cblist);
 		return false;
 	}
 
@@ -435,7 +435,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 	// locking.
 	if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
 		rcu_nocb_lock(rdp);
-		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+		*was_pending = rcu_segcblist_pend_cbs(&rdp->cblist);
 		return false; /* Not offloaded, no bypassing. */
 	}
 
@@ -443,7 +443,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
 		rcu_nocb_lock(rdp);
 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+		*was_pending = rcu_segcblist_pend_cbs(&rdp->cblist);
 		return false;
 	}
 
@@ -468,8 +468,8 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 	// Lazy CBs throttle this back and do immediate bypass queuing.
 	if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) {
 		rcu_nocb_lock(rdp);
-		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-		if (*was_alldone)
+		*was_pending = rcu_segcblist_pend_cbs(&rdp->cblist);
+		if (!*was_pending)
 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 					    TPS("FirstQ"));
 
@@ -484,10 +484,10 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 	    ((!bypass_is_lazy && ((j != READ_ONCE(rdp->nocb_bypass_first)) || ncbs >= qhimark)) ||
 	     (bypass_is_lazy && (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_lazy_flush) || ncbs >= qhimark_lazy)))) {
 		rcu_nocb_lock(rdp);
-		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+		*was_pending = rcu_segcblist_pend_cbs(&rdp->cblist);
 
 		if (!rcu_nocb_flush_bypass(rdp, rhp, j, lazy)) {
-			if (*was_alldone)
+			if (!*was_pending)
 				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 						    TPS("FirstQ"));
 			WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
@@ -503,7 +503,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 		// The flush succeeded and we moved CBs into the regular list.
 		// Don't wait for the wake up timer as it may be too far ahead.
 		// Wake up the GP thread now instead, if the cblist was empty.
-		__call_rcu_nocb_wake(rdp, *was_alldone, flags);
+		__call_rcu_nocb_wake(rdp, *was_pending, flags);
 
 		return true; // Callback already enqueued.
 	}
@@ -539,7 +539,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 		if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 					    TPS("FirstBQwake"));
-			__call_rcu_nocb_wake(rdp, true, flags);
+			__call_rcu_nocb_wake(rdp, false, flags);
 		} else {
 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
 					    TPS("FirstBQnoWake"));
@@ -555,7 +555,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
  *
  * If warranted, also wake up the kthread servicing this CPUs queues.
  */
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_pending,
 				 unsigned long flags)
 				 __releases(rdp->nocb_lock)
 {
@@ -578,7 +578,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
 	len = rcu_segcblist_n_cbs(&rdp->cblist);
 	bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass);
 	lazy_len = READ_ONCE(rdp->lazy_len);
-	if (was_alldone) {
+	if (!was_pending) {
 		rdp->qlen_last_fqs_check = len;
 		// Only lazy CBs in bypass list
 		if (lazy_len && bypass_len == lazy_len) {
@@ -1767,12 +1767,12 @@ static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 }
 
 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-				bool *was_alldone, unsigned long flags, bool lazy)
+				bool *was_pending, unsigned long flags, bool lazy)
 {
 	return false;
 }
 
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_pending,
 				 unsigned long flags)
 {
 	WARN_ON_ONCE(1);  /* Should be dead code! */
-- 
2.40.1


  parent reply	other threads:[~2023-05-31 10:18 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-31 10:17 [PATCH 0/9] rcu: Support for lazy callbacks on !CONFIG_RCU_NOCB_CPU Frederic Weisbecker
2023-05-31 10:17 ` [PATCH 1/9] rcu: Assume IRQS disabled from rcu_report_dead() Frederic Weisbecker
2023-06-02 22:51   ` Joel Fernandes
2023-05-31 10:17 ` [PATCH 2/9] rcu: Use rcu_segcblist_segempty() instead of open coding it Frederic Weisbecker
2023-06-02 23:00   ` Joel Fernandes
2023-07-05 12:21     ` Frederic Weisbecker
2023-06-13  8:44   ` Zhuo, Qiuxu
2023-05-31 10:17 ` [PATCH 3/9] rcu: Rename jiffies_till_flush to jiffies_lazy_flush Frederic Weisbecker
2023-06-01 17:15   ` kernel test robot
2023-06-01 17:15   ` kernel test robot
2023-06-01 22:40   ` kernel test robot
2023-05-31 10:17 ` [PATCH 4/9] rcu: Introduce lazy queue's own qhimark Frederic Weisbecker
2023-06-03  1:23   ` Joel Fernandes
2023-07-05 12:45     ` Frederic Weisbecker
2023-05-31 10:17 ` [PATCH 5/9] rcu: Add rcutree.lazy_enabled boot parameter Frederic Weisbecker
2023-06-13  6:57   ` Zhuo, Qiuxu
2023-07-05 12:42     ` Frederic Weisbecker
2023-05-31 10:17 ` Frederic Weisbecker [this message]
2023-05-31 10:17 ` [PATCH 7/9] rcu: Implement lazyness on the main segcblist level Frederic Weisbecker
2023-05-31 10:17 ` [PATCH 8/9] rcu: Make segcblist flags test strict Frederic Weisbecker
2023-05-31 10:17 ` [PATCH 9/9] rcu: Support lazy callbacks with CONFIG_RCU_NOCB_CPU=n Frederic Weisbecker
2023-07-05 12:41 ` [PATCH 0/9] rcu: Support for lazy callbacks on !CONFIG_RCU_NOCB_CPU Frederic Weisbecker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230531101736.12981-7-frederic@kernel.org \
    --to=frederic@kernel.org \
    --cc=ggherdovich@suse.cz \
    --cc=joel@joelfernandes.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=paulmck@kernel.org \
    --cc=quic_neeraju@quicinc.com \
    --cc=rcu@vger.kernel.org \
    --cc=urezki@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).