* [PATCH 1/2] lockref: make lockref count signed
@ 2014-08-05 19:52 Steven Noonan
2014-08-05 19:52 ` [PATCH 2/2] lockref: replace lockref_get_not_zero with lockref_get_active Steven Noonan
2014-08-05 21:44 ` [PATCH 1/2] lockref: make lockref count signed NeilBrown
0 siblings, 2 replies; 5+ messages in thread
From: Steven Noonan @ 2014-08-05 19:52 UTC (permalink / raw)
To: lkml, linux-fsdevel; +Cc: Steven Noonan, NeilBrown, Al Viro
There are numerous places where this is casted to a signed value anyway, for
comparisons checking that the value hasn't been set to the 'dead' value of
-128. This change turns the count value into a signed integer, which is how
it's already being treated anyway. This reduces the chance for developer errors
when making those comparisons.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Steven Noonan <steven@uplinklabs.net>
---
fs/dcache.c | 6 +++---
include/linux/lockref.h | 2 +-
lib/lockref.c | 4 ++--
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/fs/dcache.c b/fs/dcache.c
index 06f6585..f7a592e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -479,7 +479,7 @@ static void __dentry_kill(struct dentry *dentry)
* dentry_iput drops the locks, at which point nobody (except
* transient RCU lookups) can reach this dentry.
*/
- BUG_ON((int)dentry->d_lockref.count > 0);
+ BUG_ON(dentry->d_lockref.count > 0);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
@@ -532,7 +532,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
struct dentry *parent = dentry->d_parent;
if (IS_ROOT(dentry))
return NULL;
- if (unlikely((int)dentry->d_lockref.count < 0))
+ if (unlikely(dentry->d_lockref.count < 0))
return NULL;
if (likely(spin_trylock(&parent->d_lock)))
return parent;
@@ -848,7 +848,7 @@ static void shrink_dentry_list(struct list_head *list)
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free it.
*/
- if ((int)dentry->d_lockref.count > 0) {
+ if (dentry->d_lockref.count > 0) {
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 4bfde0e..8558ff1 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -28,7 +28,7 @@ struct lockref {
#endif
struct {
spinlock_t lock;
- unsigned int count;
+ int count;
};
};
};
diff --git a/lib/lockref.c b/lib/lockref.c
index d2233de..e4c4255 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -158,7 +158,7 @@ int lockref_get_not_dead(struct lockref *lockref)
CMPXCHG_LOOP(
new.count++;
- if ((int)old.count < 0)
+ if (old.count < 0)
return 0;
,
return 1;
@@ -166,7 +166,7 @@ int lockref_get_not_dead(struct lockref *lockref)
spin_lock(&lockref->lock);
retval = 0;
- if ((int) lockref->count >= 0) {
+ if (lockref->count >= 0) {
lockref->count++;
retval = 1;
}
--
2.0.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/2] lockref: replace lockref_get_not_zero with lockref_get_active
2014-08-05 19:52 [PATCH 1/2] lockref: make lockref count signed Steven Noonan
@ 2014-08-05 19:52 ` Steven Noonan
2014-08-05 21:44 ` [PATCH 1/2] lockref: make lockref count signed NeilBrown
1 sibling, 0 replies; 5+ messages in thread
From: Steven Noonan @ 2014-08-05 19:52 UTC (permalink / raw)
To: lkml, linux-fsdevel; +Cc: Steven Noonan, NeilBrown, Al Viro
The new lockref_get_active ensures the count is nonzero and that the lockref is
not dead (i.e, count > 0). Simply comparing to zero was risky for the only
caller of this function (dget_parent), as it wasn't holding the lockref->lock.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Steven Noonan <steven@uplinklabs.net>
---
fs/dcache.c | 2 +-
include/linux/lockref.h | 2 +-
lib/lockref.c | 13 +++++++------
3 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/fs/dcache.c b/fs/dcache.c
index f7a592e..66ee98e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -699,7 +699,7 @@ struct dentry *dget_parent(struct dentry *dentry)
*/
rcu_read_lock();
ret = ACCESS_ONCE(dentry->d_parent);
- gotref = lockref_get_not_zero(&ret->d_lockref);
+ gotref = lockref_get_active(&ret->d_lockref);
rcu_read_unlock();
if (likely(gotref)) {
if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 8558ff1..1a9827e 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -34,7 +34,7 @@ struct lockref {
};
extern void lockref_get(struct lockref *);
-extern int lockref_get_not_zero(struct lockref *);
+extern int lockref_get_active(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
diff --git a/lib/lockref.c b/lib/lockref.c
index e4c4255..318bef6 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -60,17 +60,18 @@ void lockref_get(struct lockref *lockref)
EXPORT_SYMBOL(lockref_get);
/**
- * lockref_get_not_zero - Increments count unless the count is 0
+ * lockref_get_active - Increments count unless the count is 0 or ref is dead
* @lockref: pointer to lockref structure
- * Return: 1 if count updated successfully or 0 if count was zero
+ * Return: 1 if count updated successfully or 0 if count was zero or lockref
+ * was dead
*/
-int lockref_get_not_zero(struct lockref *lockref)
+int lockref_get_active(struct lockref *lockref)
{
int retval;
CMPXCHG_LOOP(
new.count++;
- if (!old.count)
+ if (old.count < 1)
return 0;
,
return 1;
@@ -78,14 +79,14 @@ int lockref_get_not_zero(struct lockref *lockref)
spin_lock(&lockref->lock);
retval = 0;
- if (lockref->count) {
+ if (lockref->count >= 1) {
lockref->count++;
retval = 1;
}
spin_unlock(&lockref->lock);
return retval;
}
-EXPORT_SYMBOL(lockref_get_not_zero);
+EXPORT_SYMBOL(lockref_get_active);
/**
* lockref_get_or_lock - Increments count unless the count is 0
--
2.0.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] lockref: make lockref count signed
2014-08-05 19:52 [PATCH 1/2] lockref: make lockref count signed Steven Noonan
2014-08-05 19:52 ` [PATCH 2/2] lockref: replace lockref_get_not_zero with lockref_get_active Steven Noonan
@ 2014-08-05 21:44 ` NeilBrown
2014-08-05 21:46 ` Steven Noonan
1 sibling, 1 reply; 5+ messages in thread
From: NeilBrown @ 2014-08-05 21:44 UTC (permalink / raw)
To: Steven Noonan; +Cc: lkml, linux-fsdevel, Al Viro
[-- Attachment #1: Type: text/plain, Size: 3143 bytes --]
On Tue, 5 Aug 2014 12:52:27 -0700 Steven Noonan <steven@uplinklabs.net>
wrote:
> There are numerous places where this is casted to a signed value anyway, for
> comparisons checking that the value hasn't been set to the 'dead' value of
> -128. This change turns the count value into a signed integer, which is how
> it's already being treated anyway. This reduces the chance for developer errors
> when making those comparisons.
>
> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
> Cc: NeilBrown <neilb@suse.de>
> Cc: Al Viro <viro@ZenIV.linux.org.uk>
> Signed-off-by: Steven Noonan <steven@uplinklabs.net>
Thanks! But you missed one "(int)" removal :-)
fs/autofs4/root.c: if ((int) d_count(active) <= 0)
NeilBrown
> ---
> fs/dcache.c | 6 +++---
> include/linux/lockref.h | 2 +-
> lib/lockref.c | 4 ++--
> 3 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/fs/dcache.c b/fs/dcache.c
> index 06f6585..f7a592e 100644
> --- a/fs/dcache.c
> +++ b/fs/dcache.c
> @@ -479,7 +479,7 @@ static void __dentry_kill(struct dentry *dentry)
> * dentry_iput drops the locks, at which point nobody (except
> * transient RCU lookups) can reach this dentry.
> */
> - BUG_ON((int)dentry->d_lockref.count > 0);
> + BUG_ON(dentry->d_lockref.count > 0);
> this_cpu_dec(nr_dentry);
> if (dentry->d_op && dentry->d_op->d_release)
> dentry->d_op->d_release(dentry);
> @@ -532,7 +532,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
> struct dentry *parent = dentry->d_parent;
> if (IS_ROOT(dentry))
> return NULL;
> - if (unlikely((int)dentry->d_lockref.count < 0))
> + if (unlikely(dentry->d_lockref.count < 0))
> return NULL;
> if (likely(spin_trylock(&parent->d_lock)))
> return parent;
> @@ -848,7 +848,7 @@ static void shrink_dentry_list(struct list_head *list)
> * We found an inuse dentry which was not removed from
> * the LRU because of laziness during lookup. Do not free it.
> */
> - if ((int)dentry->d_lockref.count > 0) {
> + if (dentry->d_lockref.count > 0) {
> spin_unlock(&dentry->d_lock);
> if (parent)
> spin_unlock(&parent->d_lock);
> diff --git a/include/linux/lockref.h b/include/linux/lockref.h
> index 4bfde0e..8558ff1 100644
> --- a/include/linux/lockref.h
> +++ b/include/linux/lockref.h
> @@ -28,7 +28,7 @@ struct lockref {
> #endif
> struct {
> spinlock_t lock;
> - unsigned int count;
> + int count;
> };
> };
> };
> diff --git a/lib/lockref.c b/lib/lockref.c
> index d2233de..e4c4255 100644
> --- a/lib/lockref.c
> +++ b/lib/lockref.c
> @@ -158,7 +158,7 @@ int lockref_get_not_dead(struct lockref *lockref)
>
> CMPXCHG_LOOP(
> new.count++;
> - if ((int)old.count < 0)
> + if (old.count < 0)
> return 0;
> ,
> return 1;
> @@ -166,7 +166,7 @@ int lockref_get_not_dead(struct lockref *lockref)
>
> spin_lock(&lockref->lock);
> retval = 0;
> - if ((int) lockref->count >= 0) {
> + if (lockref->count >= 0) {
> lockref->count++;
> retval = 1;
> }
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 828 bytes --]
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] lockref: make lockref count signed
2014-08-05 21:44 ` [PATCH 1/2] lockref: make lockref count signed NeilBrown
@ 2014-08-05 21:46 ` Steven Noonan
2014-08-05 21:54 ` [PATCH 1/2 v2] " Steven Noonan
0 siblings, 1 reply; 5+ messages in thread
From: Steven Noonan @ 2014-08-05 21:46 UTC (permalink / raw)
To: NeilBrown; +Cc: lkml, linux-fsdevel, Al Viro
On Tue, Aug 5, 2014 at 2:44 PM, NeilBrown <neilb@suse.de> wrote:
> On Tue, 5 Aug 2014 12:52:27 -0700 Steven Noonan <steven@uplinklabs.net>
> wrote:
>
>> There are numerous places where this is casted to a signed value anyway, for
>> comparisons checking that the value hasn't been set to the 'dead' value of
>> -128. This change turns the count value into a signed integer, which is how
>> it's already being treated anyway. This reduces the chance for developer errors
>> when making those comparisons.
>>
>> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
>> Cc: NeilBrown <neilb@suse.de>
>> Cc: Al Viro <viro@ZenIV.linux.org.uk>
>> Signed-off-by: Steven Noonan <steven@uplinklabs.net>
>
> Thanks! But you missed one "(int)" removal :-)
>
> fs/autofs4/root.c: if ((int) d_count(active) <= 0)
Ahh, yeah. The return type of d_count() also needs to be fixed up.
I'll send a new version in a few minutes...
> NeilBrown
>
>> ---
>> fs/dcache.c | 6 +++---
>> include/linux/lockref.h | 2 +-
>> lib/lockref.c | 4 ++--
>> 3 files changed, 6 insertions(+), 6 deletions(-)
>>
>> diff --git a/fs/dcache.c b/fs/dcache.c
>> index 06f6585..f7a592e 100644
>> --- a/fs/dcache.c
>> +++ b/fs/dcache.c
>> @@ -479,7 +479,7 @@ static void __dentry_kill(struct dentry *dentry)
>> * dentry_iput drops the locks, at which point nobody (except
>> * transient RCU lookups) can reach this dentry.
>> */
>> - BUG_ON((int)dentry->d_lockref.count > 0);
>> + BUG_ON(dentry->d_lockref.count > 0);
>> this_cpu_dec(nr_dentry);
>> if (dentry->d_op && dentry->d_op->d_release)
>> dentry->d_op->d_release(dentry);
>> @@ -532,7 +532,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
>> struct dentry *parent = dentry->d_parent;
>> if (IS_ROOT(dentry))
>> return NULL;
>> - if (unlikely((int)dentry->d_lockref.count < 0))
>> + if (unlikely(dentry->d_lockref.count < 0))
>> return NULL;
>> if (likely(spin_trylock(&parent->d_lock)))
>> return parent;
>> @@ -848,7 +848,7 @@ static void shrink_dentry_list(struct list_head *list)
>> * We found an inuse dentry which was not removed from
>> * the LRU because of laziness during lookup. Do not free it.
>> */
>> - if ((int)dentry->d_lockref.count > 0) {
>> + if (dentry->d_lockref.count > 0) {
>> spin_unlock(&dentry->d_lock);
>> if (parent)
>> spin_unlock(&parent->d_lock);
>> diff --git a/include/linux/lockref.h b/include/linux/lockref.h
>> index 4bfde0e..8558ff1 100644
>> --- a/include/linux/lockref.h
>> +++ b/include/linux/lockref.h
>> @@ -28,7 +28,7 @@ struct lockref {
>> #endif
>> struct {
>> spinlock_t lock;
>> - unsigned int count;
>> + int count;
>> };
>> };
>> };
>> diff --git a/lib/lockref.c b/lib/lockref.c
>> index d2233de..e4c4255 100644
>> --- a/lib/lockref.c
>> +++ b/lib/lockref.c
>> @@ -158,7 +158,7 @@ int lockref_get_not_dead(struct lockref *lockref)
>>
>> CMPXCHG_LOOP(
>> new.count++;
>> - if ((int)old.count < 0)
>> + if (old.count < 0)
>> return 0;
>> ,
>> return 1;
>> @@ -166,7 +166,7 @@ int lockref_get_not_dead(struct lockref *lockref)
>>
>> spin_lock(&lockref->lock);
>> retval = 0;
>> - if ((int) lockref->count >= 0) {
>> + if (lockref->count >= 0) {
>> lockref->count++;
>> retval = 1;
>> }
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/2 v2] lockref: make lockref count signed
2014-08-05 21:46 ` Steven Noonan
@ 2014-08-05 21:54 ` Steven Noonan
0 siblings, 0 replies; 5+ messages in thread
From: Steven Noonan @ 2014-08-05 21:54 UTC (permalink / raw)
To: lkml, linux-fsdevel; +Cc: Steven Noonan, NeilBrown, Al Viro
There are numerous places where this is casted to a signed value anyway, for
comparisons checking that the value hasn't been set to the 'dead' value of
-128. This change turns the count value into a signed integer, which is how
it's already being treated anyway. This reduces the chance for developer errors
when making those comparisons.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Steven Noonan <steven@uplinklabs.net>
---
v2: d_count() function was unsigned and there was another cast inside autofs4.
Fixed those as well.
fs/autofs4/root.c | 2 +-
fs/dcache.c | 6 +++---
include/linux/dcache.h | 2 +-
include/linux/lockref.h | 2 +-
lib/lockref.c | 4 ++--
5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index cc87c1a..c4583c8 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
spin_lock(&active->d_lock);
/* Already gone? */
- if ((int) d_count(active) <= 0)
+ if (d_count(active) <= 0)
goto next;
qstr = &active->d_name;
diff --git a/fs/dcache.c b/fs/dcache.c
index 06f6585..f7a592e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -479,7 +479,7 @@ static void __dentry_kill(struct dentry *dentry)
* dentry_iput drops the locks, at which point nobody (except
* transient RCU lookups) can reach this dentry.
*/
- BUG_ON((int)dentry->d_lockref.count > 0);
+ BUG_ON(dentry->d_lockref.count > 0);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
@@ -532,7 +532,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
struct dentry *parent = dentry->d_parent;
if (IS_ROOT(dentry))
return NULL;
- if (unlikely((int)dentry->d_lockref.count < 0))
+ if (unlikely(dentry->d_lockref.count < 0))
return NULL;
if (likely(spin_trylock(&parent->d_lock)))
return parent;
@@ -848,7 +848,7 @@ static void shrink_dentry_list(struct list_head *list)
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free it.
*/
- if ((int)dentry->d_lockref.count > 0) {
+ if (dentry->d_lockref.count > 0) {
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 3c7ec32..7531470 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -320,7 +320,7 @@ extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
const struct qstr *name, unsigned *seq);
-static inline unsigned d_count(const struct dentry *dentry)
+static inline int d_count(const struct dentry *dentry)
{
return dentry->d_lockref.count;
}
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 4bfde0e..8558ff1 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -28,7 +28,7 @@ struct lockref {
#endif
struct {
spinlock_t lock;
- unsigned int count;
+ int count;
};
};
};
diff --git a/lib/lockref.c b/lib/lockref.c
index d2233de..e4c4255 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -158,7 +158,7 @@ int lockref_get_not_dead(struct lockref *lockref)
CMPXCHG_LOOP(
new.count++;
- if ((int)old.count < 0)
+ if (old.count < 0)
return 0;
,
return 1;
@@ -166,7 +166,7 @@ int lockref_get_not_dead(struct lockref *lockref)
spin_lock(&lockref->lock);
retval = 0;
- if ((int) lockref->count >= 0) {
+ if (lockref->count >= 0) {
lockref->count++;
retval = 1;
}
--
2.0.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2014-08-05 21:54 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-08-05 19:52 [PATCH 1/2] lockref: make lockref count signed Steven Noonan
2014-08-05 19:52 ` [PATCH 2/2] lockref: replace lockref_get_not_zero with lockref_get_active Steven Noonan
2014-08-05 21:44 ` [PATCH 1/2] lockref: make lockref count signed NeilBrown
2014-08-05 21:46 ` Steven Noonan
2014-08-05 21:54 ` [PATCH 1/2 v2] " Steven Noonan
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.