All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] md/raid5: avoid duplicate code
@ 2015-05-06  9:45 Yuanhan Liu
  2015-05-06  9:45 ` [PATCH 2/2] md/raid5: remove unnecessary sh->count check Yuanhan Liu
  2015-05-08  5:28 ` [PATCH 1/2] md/raid5: avoid duplicate code NeilBrown
  0 siblings, 2 replies; 4+ messages in thread
From: Yuanhan Liu @ 2015-05-06  9:45 UTC (permalink / raw)
  To: neilb; +Cc: linux-raid, linux-kernel, Yuanhan Liu

Move the code that put one idle sh(hot in cache, but happens to be
zero referenced) back to active stage to __find_stripe(). Because
that's what need to do every time you invoke __find_stripe().

Moving it there avoids duplicate code, as well as makes a bit more
sense, IMO, as it tells a whole story now.

Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
---
 drivers/md/raid5.c | 50 ++++++++++++++++++--------------------------------
 1 file changed, 18 insertions(+), 32 deletions(-)

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 77dfd72..e7fa818 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -567,8 +567,25 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
 
 	pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
 	hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
-		if (sh->sector == sector && sh->generation == generation)
+		if (sh->sector == sector && sh->generation == generation) {
+			if (!atomic_inc_not_zero(&sh->count)) {
+				spin_lock(&conf->device_lock);
+				if (!atomic_read(&sh->count)) {
+					if (!test_bit(STRIPE_HANDLE, &sh->state))
+						atomic_inc(&conf->active_stripes);
+					BUG_ON(list_empty(&sh->lru) &&
+					       !test_bit(STRIPE_EXPANDING, &sh->state));
+					list_del_init(&sh->lru);
+					if (sh->group) {
+						sh->group->stripes_cnt--;
+						sh->group = NULL;
+					}
+				}
+				atomic_inc(&sh->count);
+				spin_unlock(&conf->device_lock);
+			}
 			return sh;
+		}
 	pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
 	return NULL;
 }
@@ -698,21 +715,6 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
 				init_stripe(sh, sector, previous);
 				atomic_inc(&sh->count);
 			}
-		} else if (!atomic_inc_not_zero(&sh->count)) {
-			spin_lock(&conf->device_lock);
-			if (!atomic_read(&sh->count)) {
-				if (!test_bit(STRIPE_HANDLE, &sh->state))
-					atomic_inc(&conf->active_stripes);
-				BUG_ON(list_empty(&sh->lru) &&
-				       !test_bit(STRIPE_EXPANDING, &sh->state));
-				list_del_init(&sh->lru);
-				if (sh->group) {
-					sh->group->stripes_cnt--;
-					sh->group = NULL;
-				}
-			}
-			atomic_inc(&sh->count);
-			spin_unlock(&conf->device_lock);
 		}
 	} while (sh == NULL);
 
@@ -771,22 +773,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
 	hash = stripe_hash_locks_hash(head_sector);
 	spin_lock_irq(conf->hash_locks + hash);
 	head = __find_stripe(conf, head_sector, conf->generation);
-	if (head && !atomic_inc_not_zero(&head->count)) {
-		spin_lock(&conf->device_lock);
-		if (!atomic_read(&head->count)) {
-			if (!test_bit(STRIPE_HANDLE, &head->state))
-				atomic_inc(&conf->active_stripes);
-			BUG_ON(list_empty(&head->lru) &&
-			       !test_bit(STRIPE_EXPANDING, &head->state));
-			list_del_init(&head->lru);
-			if (head->group) {
-				head->group->stripes_cnt--;
-				head->group = NULL;
-			}
-		}
-		atomic_inc(&head->count);
-		spin_unlock(&conf->device_lock);
-	}
 	spin_unlock_irq(conf->hash_locks + hash);
 
 	if (!head)
-- 
1.9.0

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] md/raid5: remove unnecessary sh->count check
  2015-05-06  9:45 [PATCH 1/2] md/raid5: avoid duplicate code Yuanhan Liu
@ 2015-05-06  9:45 ` Yuanhan Liu
  2015-05-08  5:28 ` [PATCH 1/2] md/raid5: avoid duplicate code NeilBrown
  1 sibling, 0 replies; 4+ messages in thread
From: Yuanhan Liu @ 2015-05-06  9:45 UTC (permalink / raw)
  To: neilb; +Cc: linux-raid, linux-kernel, Yuanhan Liu

Remove the unnecessary "!atomic_read(&sh->count)" check, as the previous
"atomic_inc_not_zero(&sh->count)" check assures sh->count to be 0.

The only reason I can think of that we need such check is to consider
the lock race issue.

First of all, I doubt there is another process could modify an in-cache
but zero referenced sh while it's being protected by a hash lock. Hence,
I would say sh->count will be consistent to 0 in that  "if !atomic_inc_not_zero"
block.

Secondly, just assume there is a chance that someone outside the lock
modifies sh->count(by atomic_inc?). It could lead to some problem.

To make it clear, here I paste few lines of key code:

	if (!atomic_inc_not_zero(&sh->count)) {
		spin_lock(&conf->device_lock);
		if (!atomic_read(&sh->count)) {
			....
		}
		...
	}

At the time we enter the first if block, sh->count is zero. And just assume
someone increases sh->count from somewhere while acquiring the lock,
the following if block will not be executed then, leaving some fileds,
such as conf->active_stripes, not being set properly.

So, we should execute the second if block whenever we entered the first
if block no matter sh->count stays with 0 or not.

Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
---

Neil, I'm a bit concerned that I missed something in this patch. Please
kindly correct me if I'm wrong :)

---
 drivers/md/raid5.c | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e7fa818..17ece2a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -570,16 +570,14 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
 		if (sh->sector == sector && sh->generation == generation) {
 			if (!atomic_inc_not_zero(&sh->count)) {
 				spin_lock(&conf->device_lock);
-				if (!atomic_read(&sh->count)) {
-					if (!test_bit(STRIPE_HANDLE, &sh->state))
-						atomic_inc(&conf->active_stripes);
-					BUG_ON(list_empty(&sh->lru) &&
-					       !test_bit(STRIPE_EXPANDING, &sh->state));
-					list_del_init(&sh->lru);
-					if (sh->group) {
-						sh->group->stripes_cnt--;
-						sh->group = NULL;
-					}
+				if (!test_bit(STRIPE_HANDLE, &sh->state))
+					atomic_inc(&conf->active_stripes);
+				BUG_ON(list_empty(&sh->lru) &&
+				       !test_bit(STRIPE_EXPANDING, &sh->state));
+				list_del_init(&sh->lru);
+				if (sh->group) {
+					sh->group->stripes_cnt--;
+					sh->group = NULL;
 				}
 				atomic_inc(&sh->count);
 				spin_unlock(&conf->device_lock);
-- 
1.9.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] md/raid5: avoid duplicate code
  2015-05-06  9:45 [PATCH 1/2] md/raid5: avoid duplicate code Yuanhan Liu
  2015-05-06  9:45 ` [PATCH 2/2] md/raid5: remove unnecessary sh->count check Yuanhan Liu
@ 2015-05-08  5:28 ` NeilBrown
  2015-05-08  5:34   ` Yuanhan Liu
  1 sibling, 1 reply; 4+ messages in thread
From: NeilBrown @ 2015-05-08  5:28 UTC (permalink / raw)
  To: Yuanhan Liu; +Cc: linux-raid, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 3699 bytes --]

On Wed,  6 May 2015 17:45:49 +0800 Yuanhan Liu <yuanhan.liu@linux.intel.com>
wrote:

> Move the code that put one idle sh(hot in cache, but happens to be
> zero referenced) back to active stage to __find_stripe(). Because
> that's what need to do every time you invoke __find_stripe().
> 
> Moving it there avoids duplicate code, as well as makes a bit more
> sense, IMO, as it tells a whole story now.

Thanks for this.  It is a good cleanup.

However I don't want to make any new changes to the RAID5 code until I find a
couple of bugs that I'm hunting.  So I won't apply it just yet.
Remind me in a couple of weeks if I seem to have forgotten.

Thanks,
NeilBrown


> 
> Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> ---
>  drivers/md/raid5.c | 50 ++++++++++++++++++--------------------------------
>  1 file changed, 18 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
> index 77dfd72..e7fa818 100644
> --- a/drivers/md/raid5.c
> +++ b/drivers/md/raid5.c
> @@ -567,8 +567,25 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
>  
>  	pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
>  	hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
> -		if (sh->sector == sector && sh->generation == generation)
> +		if (sh->sector == sector && sh->generation == generation) {
> +			if (!atomic_inc_not_zero(&sh->count)) {
> +				spin_lock(&conf->device_lock);
> +				if (!atomic_read(&sh->count)) {
> +					if (!test_bit(STRIPE_HANDLE, &sh->state))
> +						atomic_inc(&conf->active_stripes);
> +					BUG_ON(list_empty(&sh->lru) &&
> +					       !test_bit(STRIPE_EXPANDING, &sh->state));
> +					list_del_init(&sh->lru);
> +					if (sh->group) {
> +						sh->group->stripes_cnt--;
> +						sh->group = NULL;
> +					}
> +				}
> +				atomic_inc(&sh->count);
> +				spin_unlock(&conf->device_lock);
> +			}
>  			return sh;
> +		}
>  	pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
>  	return NULL;
>  }
> @@ -698,21 +715,6 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
>  				init_stripe(sh, sector, previous);
>  				atomic_inc(&sh->count);
>  			}
> -		} else if (!atomic_inc_not_zero(&sh->count)) {
> -			spin_lock(&conf->device_lock);
> -			if (!atomic_read(&sh->count)) {
> -				if (!test_bit(STRIPE_HANDLE, &sh->state))
> -					atomic_inc(&conf->active_stripes);
> -				BUG_ON(list_empty(&sh->lru) &&
> -				       !test_bit(STRIPE_EXPANDING, &sh->state));
> -				list_del_init(&sh->lru);
> -				if (sh->group) {
> -					sh->group->stripes_cnt--;
> -					sh->group = NULL;
> -				}
> -			}
> -			atomic_inc(&sh->count);
> -			spin_unlock(&conf->device_lock);
>  		}
>  	} while (sh == NULL);
>  
> @@ -771,22 +773,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
>  	hash = stripe_hash_locks_hash(head_sector);
>  	spin_lock_irq(conf->hash_locks + hash);
>  	head = __find_stripe(conf, head_sector, conf->generation);
> -	if (head && !atomic_inc_not_zero(&head->count)) {
> -		spin_lock(&conf->device_lock);
> -		if (!atomic_read(&head->count)) {
> -			if (!test_bit(STRIPE_HANDLE, &head->state))
> -				atomic_inc(&conf->active_stripes);
> -			BUG_ON(list_empty(&head->lru) &&
> -			       !test_bit(STRIPE_EXPANDING, &head->state));
> -			list_del_init(&head->lru);
> -			if (head->group) {
> -				head->group->stripes_cnt--;
> -				head->group = NULL;
> -			}
> -		}
> -		atomic_inc(&head->count);
> -		spin_unlock(&conf->device_lock);
> -	}
>  	spin_unlock_irq(conf->hash_locks + hash);
>  
>  	if (!head)


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 811 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] md/raid5: avoid duplicate code
  2015-05-08  5:28 ` [PATCH 1/2] md/raid5: avoid duplicate code NeilBrown
@ 2015-05-08  5:34   ` Yuanhan Liu
  0 siblings, 0 replies; 4+ messages in thread
From: Yuanhan Liu @ 2015-05-08  5:34 UTC (permalink / raw)
  To: NeilBrown; +Cc: linux-raid, linux-kernel

On Fri, May 08, 2015 at 03:28:00PM +1000, NeilBrown wrote:
> On Wed,  6 May 2015 17:45:49 +0800 Yuanhan Liu <yuanhan.liu@linux.intel.com>
> wrote:
> 
> > Move the code that put one idle sh(hot in cache, but happens to be
> > zero referenced) back to active stage to __find_stripe(). Because
> > that's what need to do every time you invoke __find_stripe().
> > 
> > Moving it there avoids duplicate code, as well as makes a bit more
> > sense, IMO, as it tells a whole story now.
> 
> Thanks for this.  It is a good cleanup.
> 
> However I don't want to make any new changes to the RAID5 code until I find a
> couple of bugs that I'm hunting.  So I won't apply it just yet.
> Remind me in a couple of weeks if I seem to have forgotten.

Got it. Thanks.


	--yliu
> 
> > 
> > Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> > ---
> >  drivers/md/raid5.c | 50 ++++++++++++++++++--------------------------------
> >  1 file changed, 18 insertions(+), 32 deletions(-)
> > 
> > diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
> > index 77dfd72..e7fa818 100644
> > --- a/drivers/md/raid5.c
> > +++ b/drivers/md/raid5.c
> > @@ -567,8 +567,25 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
> >  
> >  	pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
> >  	hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
> > -		if (sh->sector == sector && sh->generation == generation)
> > +		if (sh->sector == sector && sh->generation == generation) {
> > +			if (!atomic_inc_not_zero(&sh->count)) {
> > +				spin_lock(&conf->device_lock);
> > +				if (!atomic_read(&sh->count)) {
> > +					if (!test_bit(STRIPE_HANDLE, &sh->state))
> > +						atomic_inc(&conf->active_stripes);
> > +					BUG_ON(list_empty(&sh->lru) &&
> > +					       !test_bit(STRIPE_EXPANDING, &sh->state));
> > +					list_del_init(&sh->lru);
> > +					if (sh->group) {
> > +						sh->group->stripes_cnt--;
> > +						sh->group = NULL;
> > +					}
> > +				}
> > +				atomic_inc(&sh->count);
> > +				spin_unlock(&conf->device_lock);
> > +			}
> >  			return sh;
> > +		}
> >  	pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
> >  	return NULL;
> >  }
> > @@ -698,21 +715,6 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
> >  				init_stripe(sh, sector, previous);
> >  				atomic_inc(&sh->count);
> >  			}
> > -		} else if (!atomic_inc_not_zero(&sh->count)) {
> > -			spin_lock(&conf->device_lock);
> > -			if (!atomic_read(&sh->count)) {
> > -				if (!test_bit(STRIPE_HANDLE, &sh->state))
> > -					atomic_inc(&conf->active_stripes);
> > -				BUG_ON(list_empty(&sh->lru) &&
> > -				       !test_bit(STRIPE_EXPANDING, &sh->state));
> > -				list_del_init(&sh->lru);
> > -				if (sh->group) {
> > -					sh->group->stripes_cnt--;
> > -					sh->group = NULL;
> > -				}
> > -			}
> > -			atomic_inc(&sh->count);
> > -			spin_unlock(&conf->device_lock);
> >  		}
> >  	} while (sh == NULL);
> >  
> > @@ -771,22 +773,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
> >  	hash = stripe_hash_locks_hash(head_sector);
> >  	spin_lock_irq(conf->hash_locks + hash);
> >  	head = __find_stripe(conf, head_sector, conf->generation);
> > -	if (head && !atomic_inc_not_zero(&head->count)) {
> > -		spin_lock(&conf->device_lock);
> > -		if (!atomic_read(&head->count)) {
> > -			if (!test_bit(STRIPE_HANDLE, &head->state))
> > -				atomic_inc(&conf->active_stripes);
> > -			BUG_ON(list_empty(&head->lru) &&
> > -			       !test_bit(STRIPE_EXPANDING, &head->state));
> > -			list_del_init(&head->lru);
> > -			if (head->group) {
> > -				head->group->stripes_cnt--;
> > -				head->group = NULL;
> > -			}
> > -		}
> > -		atomic_inc(&head->count);
> > -		spin_unlock(&conf->device_lock);
> > -	}
> >  	spin_unlock_irq(conf->hash_locks + hash);
> >  
> >  	if (!head)
> 



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-05-08  5:34 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-06  9:45 [PATCH 1/2] md/raid5: avoid duplicate code Yuanhan Liu
2015-05-06  9:45 ` [PATCH 2/2] md/raid5: remove unnecessary sh->count check Yuanhan Liu
2015-05-08  5:28 ` [PATCH 1/2] md/raid5: avoid duplicate code NeilBrown
2015-05-08  5:34   ` Yuanhan Liu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.