rcu.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Neeraj Upadhyay <neeraju@codeaurora.org>
To: paulmck@kernel.org, rcu@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, kernel-team@fb.com,
	mingo@kernel.org, jiangshanlai@gmail.com,
	akpm@linux-foundation.org, mathieu.desnoyers@efficios.com,
	josh@joshtriplett.org, tglx@linutronix.de, peterz@infradead.org,
	rostedt@goodmis.org, dhowells@redhat.com, edumazet@google.com,
	fweisbec@gmail.com, oleg@redhat.com, joel@joelfernandes.org
Subject: Re: [PATCH RFC tip/core/rcu 3/5] srcu: Provide internal interface to start a Tree SRCU grace period
Date: Fri, 20 Nov 2020 17:06:50 +0530	[thread overview]
Message-ID: <69c05cd0-8187-49a7-5b2d-1a10ba42fa44@codeaurora.org> (raw)
In-Reply-To: <20201117004052.14758-3-paulmck@kernel.org>



On 11/17/2020 6:10 AM, paulmck@kernel.org wrote:
> From: "Paul E. McKenney" <paulmck@kernel.org>
> 
> There is a need for a polling interface for SRCU grace periods.
> This polling needs to initiate an SRCU grace period without having
> to queue (and manage) a callback.  This commit therefore splits the
> Tree SRCU __call_srcu() function into callback-initialization and
> queuing/start-grace-period portions, with the latter in a new function
> named srcu_gp_start_if_needed().  This function may be passed a NULL
> callback pointer, in which case it will refrain from queuing anything.
> 
> Why have the new function mess with queuing?  Locking considerations,
> of course!
> 
> Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/
> Reported-by: Kent Overstreet <kent.overstreet@gmail.com>
> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> ---

Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>


Thanks
Neeraj

>   kernel/rcu/srcutree.c | 66 +++++++++++++++++++++++++++++----------------------
>   1 file changed, 37 insertions(+), 29 deletions(-)
> 
> diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
> index 79b7081..d930ece 100644
> --- a/kernel/rcu/srcutree.c
> +++ b/kernel/rcu/srcutree.c
> @@ -808,6 +808,42 @@ static void srcu_leak_callback(struct rcu_head *rhp)
>   }
>   
>   /*
> + * Start an SRCU grace period, and also queue the callback if non-NULL.
> + */
> +static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rhp, bool do_norm)
> +{
> +	unsigned long flags;
> +	int idx;
> +	bool needexp = false;
> +	bool needgp = false;
> +	unsigned long s;
> +	struct srcu_data *sdp;
> +
> +	idx = srcu_read_lock(ssp);
> +	sdp = raw_cpu_ptr(ssp->sda);
> +	spin_lock_irqsave_rcu_node(sdp, flags);
> +	rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
> +	rcu_segcblist_advance(&sdp->srcu_cblist,
> +			      rcu_seq_current(&ssp->srcu_gp_seq));
> +	s = rcu_seq_snap(&ssp->srcu_gp_seq);
> +	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
> +	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
> +		sdp->srcu_gp_seq_needed = s;
> +		needgp = true;
> +	}
> +	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
> +		sdp->srcu_gp_seq_needed_exp = s;
> +		needexp = true;
> +	}
> +	spin_unlock_irqrestore_rcu_node(sdp, flags);
> +	if (needgp)
> +		srcu_funnel_gp_start(ssp, sdp, s, do_norm);
> +	else if (needexp)
> +		srcu_funnel_exp_start(ssp, sdp->mynode, s);
> +	srcu_read_unlock(ssp, idx);
> +}
> +
> +/*
>    * Enqueue an SRCU callback on the srcu_data structure associated with
>    * the current CPU and the specified srcu_struct structure, initiating
>    * grace-period processing if it is not already running.
> @@ -838,13 +874,6 @@ static void srcu_leak_callback(struct rcu_head *rhp)
>   static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
>   			rcu_callback_t func, bool do_norm)
>   {
> -	unsigned long flags;
> -	int idx;
> -	bool needexp = false;
> -	bool needgp = false;
> -	unsigned long s;
> -	struct srcu_data *sdp;
> -
>   	check_init_srcu_struct(ssp);
>   	if (debug_rcu_head_queue(rhp)) {
>   		/* Probable double call_srcu(), so leak the callback. */
> @@ -853,28 +882,7 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
>   		return;
>   	}
>   	rhp->func = func;
> -	idx = srcu_read_lock(ssp);
> -	sdp = raw_cpu_ptr(ssp->sda);
> -	spin_lock_irqsave_rcu_node(sdp, flags);
> -	rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
> -	rcu_segcblist_advance(&sdp->srcu_cblist,
> -			      rcu_seq_current(&ssp->srcu_gp_seq));
> -	s = rcu_seq_snap(&ssp->srcu_gp_seq);
> -	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
> -	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
> -		sdp->srcu_gp_seq_needed = s;
> -		needgp = true;
> -	}
> -	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
> -		sdp->srcu_gp_seq_needed_exp = s;
> -		needexp = true;
> -	}
> -	spin_unlock_irqrestore_rcu_node(sdp, flags);
> -	if (needgp)
> -		srcu_funnel_gp_start(ssp, sdp, s, do_norm);
> -	else if (needexp)
> -		srcu_funnel_exp_start(ssp, sdp->mynode, s);
> -	srcu_read_unlock(ssp, idx);
> +	srcu_gp_start_if_needed(ssp, rhp, do_norm);
>   }
>   
>   /**
> 

-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a 
member of the Code Aurora Forum, hosted by The Linux Foundation

  reply	other threads:[~2020-11-20 11:37 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-17  0:40 [PATCH tip/core/rcu 0/5] Provide SRCU polling grace-period interfaces Paul E. McKenney
2020-11-17  0:40 ` [PATCH RFC tip/core/rcu 1/5] srcu: Make Tiny SRCU use multi-bit grace-period counter paulmck
2020-11-19  8:14   ` Neeraj Upadhyay
2020-11-19 18:00     ` Paul E. McKenney
2020-11-17  0:40 ` [PATCH RFC tip/core/rcu 2/5] srcu: Provide internal interface to start a Tiny SRCU grace period paulmck
2020-11-20 11:36   ` Neeraj Upadhyay
2020-11-17  0:40 ` [PATCH RFC tip/core/rcu 3/5] srcu: Provide internal interface to start a Tree " paulmck
2020-11-20 11:36   ` Neeraj Upadhyay [this message]
2020-11-21  0:37     ` Paul E. McKenney
2020-11-17  0:40 ` [PATCH RFC tip/core/rcu 4/5] srcu: Provide polling interfaces for Tiny SRCU grace periods paulmck
2020-11-20 11:58   ` Neeraj Upadhyay
2020-11-21  0:13     ` Paul E. McKenney
2020-11-22 14:27       ` Neeraj Upadhyay
2020-11-22 18:01         ` Paul E. McKenney
2020-11-23  4:34           ` Neeraj Upadhyay
2020-11-23 21:07             ` Paul E. McKenney
2020-11-17  0:40 ` [PATCH RFC tip/core/rcu 5/5] srcu: Provide polling interfaces for Tree " paulmck
2020-11-20 12:01   ` Neeraj Upadhyay
2020-11-21  0:16     ` Paul E. McKenney
2020-11-22 14:22       ` Neeraj Upadhyay
2020-11-21  0:58 ` [PATCH tip/core/rcu 0/5] Provide SRCU polling grace-period interfaces Paul E. McKenney
2020-11-21  1:05   ` Steven Rostedt
2020-11-21  1:12     ` Paul E. McKenney

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=69c05cd0-8187-49a7-5b2d-1a10ba42fa44@codeaurora.org \
    --to=neeraju@codeaurora.org \
    --cc=akpm@linux-foundation.org \
    --cc=dhowells@redhat.com \
    --cc=edumazet@google.com \
    --cc=fweisbec@gmail.com \
    --cc=jiangshanlai@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=josh@joshtriplett.org \
    --cc=kernel-team@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mingo@kernel.org \
    --cc=oleg@redhat.com \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rcu@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).