rcu.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Paul E. McKenney" <paulmck@kernel.org>
To: Lai Jiangshan <laijs@linux.alibaba.com>
Cc: linux-kernel@vger.kernel.org,
	Josh Triplett <josh@joshtriplett.org>,
	Steven Rostedt <rostedt@goodmis.org>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	Lai Jiangshan <jiangshanlai@gmail.com>,
	Joel Fernandes <joel@joelfernandes.org>,
	rcu@vger.kernel.org
Subject: Re: [PATCH V2 5/7] rcu: wrap usages of rcu_read_lock_nesting
Date: Fri, 15 Nov 2019 14:25:03 -0800	[thread overview]
Message-ID: <20191115222503.GA2865@paulmck-ThinkPad-P72> (raw)
In-Reply-To: <20191102124559.1135-6-laijs@linux.alibaba.com>

On Sat, Nov 02, 2019 at 12:45:57PM +0000, Lai Jiangshan wrote:
> Prepare for using percpu rcu_preempt_depth on x86

This one makes a lot of sense just by itself -- I probably should have
done something like this years ago.  I hand-applied this due to conflicts
from not having the earlier commits.  Please see comments inline.
The resulting patch is at the end, so please let me know if I messed
anything up.

> Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
> ---
>  kernel/rcu/tree_exp.h    |  4 ++--
>  kernel/rcu/tree_plugin.h | 43 ++++++++++++++++++++++++++--------------
>  2 files changed, 30 insertions(+), 17 deletions(-)
> 
> diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
> index afa9f573b00f..dcb2124203cf 100644
> --- a/kernel/rcu/tree_exp.h
> +++ b/kernel/rcu/tree_exp.h
> @@ -608,7 +608,7 @@ static void rcu_exp_handler(void *unused)
>  	 * critical section.  If also enabled or idle, immediately
>  	 * report the quiescent state, otherwise defer.
>  	 */
> -	if (!t->rcu_read_lock_nesting) {
> +	if (!rcu_preempt_depth()) {
>  		rdp->exp_deferred_qs = true;
>  		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
>  		    rcu_dynticks_curr_cpu_in_eqs()) {
> @@ -632,7 +632,7 @@ static void rcu_exp_handler(void *unused)
>  	 * can have caused this quiescent state to already have been
>  	 * reported, so we really do need to check ->expmask.
>  	 */
> -	if (t->rcu_read_lock_nesting > 0) {
> +	if (rcu_preempt_depth() > 0) {
>  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
>  		if (rnp->expmask & rdp->grpmask) {
>  			rdp->exp_deferred_qs = true;
> diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
> index 98644966c808..eb5906c55c8d 100644
> --- a/kernel/rcu/tree_plugin.h
> +++ b/kernel/rcu/tree_plugin.h
> @@ -290,8 +290,8 @@ void rcu_note_context_switch(bool preempt)
>  
>  	trace_rcu_utilization(TPS("Start context switch"));
>  	lockdep_assert_irqs_disabled();
> -	WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
> -	if (t->rcu_read_lock_nesting > 0 &&
> +	WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
> +	if (rcu_preempt_depth() > 0 &&
>  	    !t->rcu_read_unlock_special.b.blocked) {
>  
>  		/* Possibly blocking in an RCU read-side critical section. */
> @@ -346,6 +346,21 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
>  /* Bias and limit values for ->rcu_read_lock_nesting. */
>  #define RCU_NEST_PMAX (INT_MAX / 2)
>  
> +static inline void rcu_preempt_depth_inc(void)
> +{
> +	current->rcu_read_lock_nesting++;
> +}
> +
> +static inline bool rcu_preempt_depth_dec_and_test(void)
> +{
> +	return --current->rcu_read_lock_nesting == 0;
> +}

I made an rcu_preempt_depth_dec() for the time being.  Hopefully we
can get something set up to require rcu_preempt_depth_dec_and_test()
soon, but we are not there yet.

> +static inline void rcu_preempt_depth_set(int val)
> +{
> +	current->rcu_read_lock_nesting = val;
> +}

These can just be static (rather than static inline) because this .h
file is included into only one .c file.  Best to let the compiler make
the inlining decisions.

>  /*
>   * Preemptible RCU implementation for rcu_read_lock().
>   * Just increment ->rcu_read_lock_nesting, shared state will be updated
> @@ -353,9 +368,9 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
>   */
>  void __rcu_read_lock(void)
>  {
> -	current->rcu_read_lock_nesting++;
> +	rcu_preempt_depth_inc();
>  	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
> -		WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
> +		WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
>  	barrier();  /* critical section after entry code. */
>  }
>  EXPORT_SYMBOL_GPL(__rcu_read_lock);
> @@ -371,15 +386,13 @@ void __rcu_read_unlock(void)
>  {
>  	struct task_struct *t = current;
>  
> -	if (--t->rcu_read_lock_nesting == 0) {
> +	if (rcu_preempt_depth_dec_and_test()) {
>  		barrier();  /* critical section before exit code. */
>  		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
>  			rcu_read_unlock_special(t);
>  	}
>  	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
> -		int rrln = t->rcu_read_lock_nesting;
> -
> -		WARN_ON_ONCE(rrln < 0);
> +		WARN_ON_ONCE(rcu_preempt_depth() < 0);
>  	}
>  }
>  EXPORT_SYMBOL_GPL(__rcu_read_unlock);
> @@ -531,7 +544,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
>  {
>  	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
>  		READ_ONCE(t->rcu_read_unlock_special.s)) &&
> -	       !t->rcu_read_lock_nesting;
> +	       !rcu_preempt_depth();
>  }
>  
>  /*
> @@ -662,7 +675,7 @@ static void rcu_flavor_sched_clock_irq(int user)
>  	if (user || rcu_is_cpu_rrupt_from_idle()) {
>  		rcu_note_voluntary_context_switch(current);
>  	}
> -	if (t->rcu_read_lock_nesting > 0 ||
> +	if (rcu_preempt_depth() > 0 ||
>  	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
>  		/* No QS, force context switch if deferred. */
>  		if (rcu_preempt_need_deferred_qs(t)) {
> @@ -672,13 +685,13 @@ static void rcu_flavor_sched_clock_irq(int user)
>  	} else if (rcu_preempt_need_deferred_qs(t)) {
>  		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
>  		return;
> -	} else if (!t->rcu_read_lock_nesting) {
> +	} else if (!rcu_preempt_depth()) {
>  		rcu_qs(); /* Report immediate QS. */
>  		return;
>  	}
>  
>  	/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
> -	if (t->rcu_read_lock_nesting > 0 &&
> +	if (rcu_preempt_depth() > 0 &&
>  	    __this_cpu_read(rcu_data.core_needs_qs) &&
>  	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
>  	    !t->rcu_read_unlock_special.b.need_qs &&
> @@ -699,11 +712,11 @@ void exit_rcu(void)
>  	struct task_struct *t = current;
>  
>  	if (unlikely(!list_empty(&current->rcu_node_entry))) {
> -		t->rcu_read_lock_nesting = 1;
> +		rcu_preempt_depth_set(1);
>  		barrier();
>  		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
> -	} else if (unlikely(t->rcu_read_lock_nesting)) {
> -		t->rcu_read_lock_nesting = 1;
> +	} else if (unlikely(rcu_preempt_depth())) {
> +		rcu_preempt_depth_set(1);
>  	} else {
>  		return;
>  	}
> -- 
> 2.20.1

------------------------------------------------------------------------

commit 2f19afd3450c855007e1b355df6f215a0477d809
Author: Lai Jiangshan <laijs@linux.alibaba.com>
Date:   Fri Nov 15 14:08:53 2019 -0800

    rcu: Provide wrappers for uses of ->rcu_read_lock_nesting
    
    This commit provides wrapper functions for uses of ->rcu_read_lock_nesting
    to improve readability and to ease future changes to support inlining
    of __rcu_read_lock() and __rcu_read_unlock().
    
    Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
    Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index e4b77d3..6ce598d 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -608,7 +608,7 @@ static void rcu_exp_handler(void *unused)
 	 * critical section.  If also enabled or idle, immediately
 	 * report the quiescent state, otherwise defer.
 	 */
-	if (!t->rcu_read_lock_nesting) {
+	if (!rcu_preempt_depth()) {
 		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
 		    rcu_dynticks_curr_cpu_in_eqs()) {
 			rcu_report_exp_rdp(rdp);
@@ -632,7 +632,7 @@ static void rcu_exp_handler(void *unused)
 	 * can have caused this quiescent state to already have been
 	 * reported, so we really do need to check ->expmask.
 	 */
-	if (t->rcu_read_lock_nesting > 0) {
+	if (rcu_preempt_depth() > 0) {
 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
 		if (rnp->expmask & rdp->grpmask) {
 			rdp->exp_deferred_qs = true;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 96dc3aa..b5605bd 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -292,8 +292,8 @@ void rcu_note_context_switch(bool preempt)
 
 	trace_rcu_utilization(TPS("Start context switch"));
 	lockdep_assert_irqs_disabled();
-	WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
-	if (t->rcu_read_lock_nesting > 0 &&
+	WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
+	if (rcu_preempt_depth() > 0 &&
 	    !t->rcu_read_unlock_special.b.blocked) {
 
 		/* Possibly blocking in an RCU read-side critical section. */
@@ -350,6 +350,21 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 #define RCU_NEST_NMAX (-INT_MAX / 2)
 #define RCU_NEST_PMAX (INT_MAX / 2)
 
+static void rcu_preempt_read_enter(void)
+{
+	current->rcu_read_lock_nesting++;
+}
+
+static void rcu_preempt_read_exit(void)
+{
+	current->rcu_read_lock_nesting--;
+}
+
+static void rcu_preempt_depth_set(int val)
+{
+	current->rcu_read_lock_nesting = val;
+}
+
 /*
  * Preemptible RCU implementation for rcu_read_lock().
  * Just increment ->rcu_read_lock_nesting, shared state will be updated
@@ -357,9 +372,9 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
  */
 void __rcu_read_lock(void)
 {
-	current->rcu_read_lock_nesting++;
+	rcu_preempt_read_enter();
 	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
-		WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
+		WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
 	barrier();  /* critical section after entry code. */
 }
 EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -375,19 +390,19 @@ void __rcu_read_unlock(void)
 {
 	struct task_struct *t = current;
 
-	if (t->rcu_read_lock_nesting != 1) {
-		--t->rcu_read_lock_nesting;
+	if (rcu_preempt_depth() != 1) {
+		rcu_preempt_read_exit();
 	} else {
 		barrier();  /* critical section before exit code. */
-		t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
+		rcu_preempt_depth_set(-RCU_NEST_BIAS);
 		barrier();  /* assign before ->rcu_read_unlock_special load */
 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
 			rcu_read_unlock_special(t);
 		barrier();  /* ->rcu_read_unlock_special load before assign */
-		t->rcu_read_lock_nesting = 0;
+		rcu_preempt_depth_set(0);
 	}
 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
-		int rrln = t->rcu_read_lock_nesting;
+		int rrln = rcu_preempt_depth();
 
 		WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
 	}
@@ -541,7 +556,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 {
 	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
 		READ_ONCE(t->rcu_read_unlock_special.s)) &&
-	       t->rcu_read_lock_nesting <= 0;
+	       rcu_preempt_depth() <= 0;
 }
 
 /*
@@ -554,16 +569,16 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 static void rcu_preempt_deferred_qs(struct task_struct *t)
 {
 	unsigned long flags;
-	bool couldrecurse = t->rcu_read_lock_nesting >= 0;
+	bool couldrecurse = rcu_preempt_depth() >= 0;
 
 	if (!rcu_preempt_need_deferred_qs(t))
 		return;
 	if (couldrecurse)
-		t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
+		rcu_preempt_depth_set(rcu_preempt_depth() - RCU_NEST_BIAS);
 	local_irq_save(flags);
 	rcu_preempt_deferred_qs_irqrestore(t, flags);
 	if (couldrecurse)
-		t->rcu_read_lock_nesting += RCU_NEST_BIAS;
+		rcu_preempt_depth_set(rcu_preempt_depth() + RCU_NEST_BIAS);
 }
 
 /*
@@ -674,7 +689,7 @@ static void rcu_flavor_sched_clock_irq(int user)
 	if (user || rcu_is_cpu_rrupt_from_idle()) {
 		rcu_note_voluntary_context_switch(current);
 	}
-	if (t->rcu_read_lock_nesting > 0 ||
+	if (rcu_preempt_depth() > 0 ||
 	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
 		/* No QS, force context switch if deferred. */
 		if (rcu_preempt_need_deferred_qs(t)) {
@@ -684,13 +699,13 @@ static void rcu_flavor_sched_clock_irq(int user)
 	} else if (rcu_preempt_need_deferred_qs(t)) {
 		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
 		return;
-	} else if (!t->rcu_read_lock_nesting) {
+	} else if (!rcu_preempt_depth()) {
 		rcu_qs(); /* Report immediate QS. */
 		return;
 	}
 
 	/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
-	if (t->rcu_read_lock_nesting > 0 &&
+	if (rcu_preempt_depth() > 0 &&
 	    __this_cpu_read(rcu_data.core_needs_qs) &&
 	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
 	    !t->rcu_read_unlock_special.b.need_qs &&
@@ -711,11 +726,11 @@ void exit_rcu(void)
 	struct task_struct *t = current;
 
 	if (unlikely(!list_empty(&current->rcu_node_entry))) {
-		t->rcu_read_lock_nesting = 1;
+		rcu_preempt_depth_set(1);
 		barrier();
 		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
-	} else if (unlikely(t->rcu_read_lock_nesting)) {
-		t->rcu_read_lock_nesting = 1;
+	} else if (unlikely(rcu_preempt_depth())) {
+		rcu_preempt_depth_set(1);
 	} else {
 		return;
 	}

  reply	other threads:[~2019-11-15 22:25 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-02 12:45 [PATCH V2 0/7] rcu: introduce percpu rcu_preempt_depth Lai Jiangshan
2019-11-02 12:45 ` [PATCH V2 1/7] rcu: use preempt_count to test whether scheduler locks is held Lai Jiangshan
2019-11-15 16:53   ` Paul E. McKenney
2020-02-19  3:31   ` Joel Fernandes
2020-02-19  3:59     ` Paul E. McKenney
2019-11-02 12:45 ` [PATCH V2 2/7] rcu: cleanup rcu_preempt_deferred_qs() Lai Jiangshan
2019-11-03  2:01   ` Boqun Feng
2019-11-03  5:01     ` Lai Jiangshan
2019-11-04 14:55       ` Paul E. McKenney
2019-11-05  2:09         ` Lai Jiangshan
2019-11-05  7:19           ` Paul E. McKenney
2019-11-11 14:32             ` Paul E. McKenney
2019-11-12  1:28               ` Lai Jiangshan
2020-02-17 23:23                 ` Paul E. McKenney
2020-02-18 14:41                   ` Steven Rostedt
2020-02-18 16:43                     ` Paul E. McKenney
2019-11-15 16:55   ` Paul E. McKenney
2019-11-02 12:45 ` [PATCH V2 3/7] rcu: remove useless special.b.deferred_qs Lai Jiangshan
2020-02-17 23:23   ` Paul E. McKenney
2019-11-02 12:45 ` [PATCH V2 4/7] rcu: don't use negative ->rcu_read_lock_nesting Lai Jiangshan
2020-02-17 23:26   ` Paul E. McKenney
2019-11-02 12:45 ` [PATCH V2 5/7] rcu: wrap usages of rcu_read_lock_nesting Lai Jiangshan
2019-11-15 22:25   ` Paul E. McKenney [this message]
2019-11-02 12:45 ` [PATCH V2 6/7] rcu: clear the special.b.need_qs in rcu_note_context_switch() Lai Jiangshan
2019-11-16 15:46   ` Paul E. McKenney
2019-11-02 12:45 ` [PATCH V2 7/7] x86,rcu: use percpu rcu_preempt_depth Lai Jiangshan
2019-11-02 16:30   ` Borislav Petkov
2019-11-03  4:33     ` Lai Jiangshan
2019-11-04  9:25   ` Sebastian Andrzej Siewior
2019-11-04 11:41     ` Lai Jiangshan
2019-11-04 12:09       ` Sebastian Andrzej Siewior
2019-11-16 15:48   ` Paul E. McKenney
2019-11-18  2:02     ` Lai Jiangshan
2019-11-18 14:59       ` Paul E. McKenney
2019-11-19  1:59         ` Lai Jiangshan
2019-11-19 21:14           ` Paul E. McKenney
2019-11-20  2:47             ` Lai Jiangshan
2019-11-21  4:02               ` Paul E. McKenney
2019-11-02 15:05 ` [PATCH V2 0/7] rcu: introduce " Paul E. McKenney

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191115222503.GA2865@paulmck-ThinkPad-P72 \
    --to=paulmck@kernel.org \
    --cc=jiangshanlai@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=josh@joshtriplett.org \
    --cc=laijs@linux.alibaba.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=rcu@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).