linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] srcu: Remove srcu_cblist_invoking member from sdp
@ 2020-11-19  5:34 qiang.zhang
  2020-11-19 18:12 ` Paul E. McKenney
  0 siblings, 1 reply; 3+ messages in thread
From: qiang.zhang @ 2020-11-19  5:34 UTC (permalink / raw)
  To: paulmck; +Cc: jiangshanlai, rostedt, josh, rcu, linux-kernel

From: Zqiang <qiang.zhang@windriver.com>

Workqueue can ensure the multiple same sdp->work sequential
execution in rcu_gp_wq, not need srcu_cblist_invoking to
prevent concurrent execution, so remove it.

Signed-off-by: Zqiang <qiang.zhang@windriver.com>
---
 include/linux/srcutree.h | 1 -
 kernel/rcu/srcutree.c    | 8 ++------
 2 files changed, 2 insertions(+), 7 deletions(-)

diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 9cfcc8a756ae..62d8312b5451 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -31,7 +31,6 @@ struct srcu_data {
 	struct rcu_segcblist srcu_cblist;	/* List of callbacks.*/
 	unsigned long srcu_gp_seq_needed;	/* Furthest future GP needed. */
 	unsigned long srcu_gp_seq_needed_exp;	/* Furthest future exp GP. */
-	bool srcu_cblist_invoking;		/* Invoking these CBs? */
 	struct timer_list delay_work;		/* Delay for CB invoking */
 	struct work_struct work;		/* Context for CB invoking. */
 	struct rcu_head srcu_barrier_head;	/* For srcu_barrier() use. */
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 3c5e2806e0b9..c4d5cd2567a6 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -134,7 +134,6 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
 		sdp = per_cpu_ptr(ssp->sda, cpu);
 		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
 		rcu_segcblist_init(&sdp->srcu_cblist);
-		sdp->srcu_cblist_invoking = false;
 		sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
 		sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
 		sdp->mynode = &snp_first[cpu / levelspread[level]];
@@ -1254,14 +1253,11 @@ static void srcu_invoke_callbacks(struct work_struct *work)
 	spin_lock_irq_rcu_node(sdp);
 	rcu_segcblist_advance(&sdp->srcu_cblist,
 			      rcu_seq_current(&ssp->srcu_gp_seq));
-	if (sdp->srcu_cblist_invoking ||
-	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
+	if (!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
 		spin_unlock_irq_rcu_node(sdp);
 		return;  /* Someone else on the job or nothing to do. */
 	}
 
-	/* We are on the job!  Extract and invoke ready callbacks. */
-	sdp->srcu_cblist_invoking = true;
 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
 	len = ready_cbs.len;
 	spin_unlock_irq_rcu_node(sdp);
@@ -1282,7 +1278,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
 	rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
 				       rcu_seq_snap(&ssp->srcu_gp_seq));
-	sdp->srcu_cblist_invoking = false;
+
 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
 	spin_unlock_irq_rcu_node(sdp);
 	if (more)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] srcu: Remove srcu_cblist_invoking member from sdp
  2020-11-19  5:34 [PATCH] srcu: Remove srcu_cblist_invoking member from sdp qiang.zhang
@ 2020-11-19 18:12 ` Paul E. McKenney
  2020-11-20  6:19   ` 回复: " Zhang, Qiang
  0 siblings, 1 reply; 3+ messages in thread
From: Paul E. McKenney @ 2020-11-19 18:12 UTC (permalink / raw)
  To: qiang.zhang; +Cc: jiangshanlai, rostedt, josh, rcu, linux-kernel

On Thu, Nov 19, 2020 at 01:34:11PM +0800, qiang.zhang@windriver.com wrote:
> From: Zqiang <qiang.zhang@windriver.com>
> 
> Workqueue can ensure the multiple same sdp->work sequential
> execution in rcu_gp_wq, not need srcu_cblist_invoking to
> prevent concurrent execution, so remove it.
> 
> Signed-off-by: Zqiang <qiang.zhang@windriver.com>

Good job analyzing the code, which is very good to see!!!

But these do have a potential purpose.  Right now, it is OK to invoke
synchronize_srcu() during early boot, that is, before the scheduler
has started.  But there is a gap from the time that the scheduler has
initialized (so that preemption and blocking are possible) and the time
that workqueues are initialized and fully functional.  Only after that
is it once again OK to use synchronize_srcu().

If synchronize_srcu() is ever required to work correctly during that
time period, it will need to directly invoke the functions that are
currently run in workqueue context.  Which means that there will then be
the possibility of two instances of these functions running just after
workqueues are available.

							Thanx, Paul

> ---
>  include/linux/srcutree.h | 1 -
>  kernel/rcu/srcutree.c    | 8 ++------
>  2 files changed, 2 insertions(+), 7 deletions(-)
> 
> diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
> index 9cfcc8a756ae..62d8312b5451 100644
> --- a/include/linux/srcutree.h
> +++ b/include/linux/srcutree.h
> @@ -31,7 +31,6 @@ struct srcu_data {
>  	struct rcu_segcblist srcu_cblist;	/* List of callbacks.*/
>  	unsigned long srcu_gp_seq_needed;	/* Furthest future GP needed. */
>  	unsigned long srcu_gp_seq_needed_exp;	/* Furthest future exp GP. */
> -	bool srcu_cblist_invoking;		/* Invoking these CBs? */
>  	struct timer_list delay_work;		/* Delay for CB invoking */
>  	struct work_struct work;		/* Context for CB invoking. */
>  	struct rcu_head srcu_barrier_head;	/* For srcu_barrier() use. */
> diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
> index 3c5e2806e0b9..c4d5cd2567a6 100644
> --- a/kernel/rcu/srcutree.c
> +++ b/kernel/rcu/srcutree.c
> @@ -134,7 +134,6 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
>  		sdp = per_cpu_ptr(ssp->sda, cpu);
>  		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
>  		rcu_segcblist_init(&sdp->srcu_cblist);
> -		sdp->srcu_cblist_invoking = false;
>  		sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
>  		sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
>  		sdp->mynode = &snp_first[cpu / levelspread[level]];
> @@ -1254,14 +1253,11 @@ static void srcu_invoke_callbacks(struct work_struct *work)
>  	spin_lock_irq_rcu_node(sdp);
>  	rcu_segcblist_advance(&sdp->srcu_cblist,
>  			      rcu_seq_current(&ssp->srcu_gp_seq));
> -	if (sdp->srcu_cblist_invoking ||
> -	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
> +	if (!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
>  		spin_unlock_irq_rcu_node(sdp);
>  		return;  /* Someone else on the job or nothing to do. */
>  	}
>  
> -	/* We are on the job!  Extract and invoke ready callbacks. */
> -	sdp->srcu_cblist_invoking = true;
>  	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
>  	len = ready_cbs.len;
>  	spin_unlock_irq_rcu_node(sdp);
> @@ -1282,7 +1278,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
>  	rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
>  	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
>  				       rcu_seq_snap(&ssp->srcu_gp_seq));
> -	sdp->srcu_cblist_invoking = false;
> +
>  	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
>  	spin_unlock_irq_rcu_node(sdp);
>  	if (more)
> -- 
> 2.17.1
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* 回复: [PATCH] srcu: Remove srcu_cblist_invoking member from sdp
  2020-11-19 18:12 ` Paul E. McKenney
@ 2020-11-20  6:19   ` Zhang, Qiang
  0 siblings, 0 replies; 3+ messages in thread
From: Zhang, Qiang @ 2020-11-20  6:19 UTC (permalink / raw)
  To: paulmck; +Cc: jiangshanlai, rostedt, josh, rcu, linux-kernel



________________________________________
发件人: Paul E. McKenney <paulmck@kernel.org>
发送时间: 2020年11月20日 2:12
收件人: Zhang, Qiang
抄送: jiangshanlai@gmail.com; rostedt@goodmis.org; josh@joshtriplett.org; rcu@vger.kernel.org; linux-kernel@vger.kernel.org
主题: Re: [PATCH] srcu: Remove srcu_cblist_invoking member from sdp

[Please note this e-mail is from an EXTERNAL e-mail address]

On Thu, Nov 19, 2020 at 01:34:11PM +0800, qiang.zhang@windriver.com wrote:
> From: Zqiang <qiang.zhang@windriver.com>
>
> Workqueue can ensure the multiple same sdp->work sequential
> execution in rcu_gp_wq, not need srcu_cblist_invoking to
> prevent concurrent execution, so remove it.
>
> Signed-off-by: Zqiang <qiang.zhang@windriver.com>

>Good job analyzing the code, which is very good to see!!!
>
>But these do have a potential purpose.  Right now, it is OK to invoke
>synchronize_srcu() during early boot, that is, before the scheduler
>has started.  But there is a gap from the time that the scheduler has
>initialized (so that preemption and blocking are possible) and the time
>that workqueues are initialized and fully functional.  Only after that
>is it once again OK to use synchronize_srcu().
>
>If synchronize_srcu() is ever required to work correctly during that
>time period, it will need to directly invoke the functions that are
>currently run in workqueue context.  Which means that there will then be
>the possibility of two instances of these functions running just after
>workqueues are available.
>
>                                                       Thanx, Paul

Thanks Paul.

> ---
>  include/linux/srcutree.h | 1 -
>  kernel/rcu/srcutree.c    | 8 ++------
>  2 files changed, 2 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
> index 9cfcc8a756ae..62d8312b5451 100644
> --- a/include/linux/srcutree.h
> +++ b/include/linux/srcutree.h
> @@ -31,7 +31,6 @@ struct srcu_data {
>       struct rcu_segcblist srcu_cblist;       /* List of callbacks.*/
>       unsigned long srcu_gp_seq_needed;       /* Furthest future GP needed. */
>       unsigned long srcu_gp_seq_needed_exp;   /* Furthest future exp GP. */
> -     bool srcu_cblist_invoking;              /* Invoking these CBs? */
>       struct timer_list delay_work;           /* Delay for CB invoking */
>       struct work_struct work;                /* Context for CB invoking. */
>       struct rcu_head srcu_barrier_head;      /* For srcu_barrier() use. */
> diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
> index 3c5e2806e0b9..c4d5cd2567a6 100644
> --- a/kernel/rcu/srcutree.c
> +++ b/kernel/rcu/srcutree.c
> @@ -134,7 +134,6 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
>               sdp = per_cpu_ptr(ssp->sda, cpu);
>               spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
>               rcu_segcblist_init(&sdp->srcu_cblist);
> -             sdp->srcu_cblist_invoking = false;
>               sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
>               sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
>               sdp->mynode = &snp_first[cpu / levelspread[level]];
> @@ -1254,14 +1253,11 @@ static void srcu_invoke_callbacks(struct work_struct *work)
>       spin_lock_irq_rcu_node(sdp);
>       rcu_segcblist_advance(&sdp->srcu_cblist,
>                             rcu_seq_current(&ssp->srcu_gp_seq));
> -     if (sdp->srcu_cblist_invoking ||
> -         !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
> +     if (!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
>               spin_unlock_irq_rcu_node(sdp);
>               return;  /* Someone else on the job or nothing to do. */
>       }
>
> -     /* We are on the job!  Extract and invoke ready callbacks. */
> -     sdp->srcu_cblist_invoking = true;
>       rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
>       len = ready_cbs.len;
>       spin_unlock_irq_rcu_node(sdp);
> @@ -1282,7 +1278,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
>       rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
>       (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
>                                      rcu_seq_snap(&ssp->srcu_gp_seq));
> -     sdp->srcu_cblist_invoking = false;
> +
>       more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
>       spin_unlock_irq_rcu_node(sdp);
>       if (more)
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-11-20  6:19 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-19  5:34 [PATCH] srcu: Remove srcu_cblist_invoking member from sdp qiang.zhang
2020-11-19 18:12 ` Paul E. McKenney
2020-11-20  6:19   ` 回复: " Zhang, Qiang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).