All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] locking/qrwlock: Change "queue rwlock" to "queued rwlock"
@ 2022-05-10 19:21 Waiman Long
  2022-05-10 19:21 ` [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context Waiman Long
  2022-05-11 19:39 ` [tip: locking/core] locking/qrwlock: Change "queue rwlock" to "queued rwlock" tip-bot2 for Waiman Long
  0 siblings, 2 replies; 9+ messages in thread
From: Waiman Long @ 2022-05-10 19:21 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng, Arnd Bergmann
  Cc: linux-kernel, Waiman Long

Queued rwlock was originally named "queue rwlock" which wasn't quite
grammatically correct. However there are still some "queue rwlock"
references in the code. Change those to "queued rwlock" for consistency.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 include/asm-generic/qrwlock.h       | 28 ++++++++++++++--------------
 include/asm-generic/qrwlock_types.h |  2 +-
 kernel/locking/qrwlock.c            |  8 ++++----
 3 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 7ae0ece07b4e..d4cd4cc4389c 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -33,8 +33,8 @@ extern void queued_read_lock_slowpath(struct qrwlock *lock);
 extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
- * queued_read_trylock - try to acquire read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_trylock - try to acquire read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
 static inline int queued_read_trylock(struct qrwlock *lock)
@@ -52,8 +52,8 @@ static inline int queued_read_trylock(struct qrwlock *lock)
 }
 
 /**
- * queued_write_trylock - try to acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_trylock - try to acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
 static inline int queued_write_trylock(struct qrwlock *lock)
@@ -68,8 +68,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
 				_QW_LOCKED));
 }
 /**
- * queued_read_lock - acquire read lock of a queue rwlock
- * @lock: Pointer to queue rwlock structure
+ * queued_read_lock - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
  */
 static inline void queued_read_lock(struct qrwlock *lock)
 {
@@ -84,8 +84,8 @@ static inline void queued_read_lock(struct qrwlock *lock)
 }
 
 /**
- * queued_write_lock - acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_lock - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  */
 static inline void queued_write_lock(struct qrwlock *lock)
 {
@@ -98,8 +98,8 @@ static inline void queued_write_lock(struct qrwlock *lock)
 }
 
 /**
- * queued_read_unlock - release read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_unlock - release read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  */
 static inline void queued_read_unlock(struct qrwlock *lock)
 {
@@ -110,8 +110,8 @@ static inline void queued_read_unlock(struct qrwlock *lock)
 }
 
 /**
- * queued_write_unlock - release write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_unlock - release write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  */
 static inline void queued_write_unlock(struct qrwlock *lock)
 {
@@ -120,7 +120,7 @@ static inline void queued_write_unlock(struct qrwlock *lock)
 
 /**
  * queued_rwlock_is_contended - check if the lock is contended
- * @lock : Pointer to queue rwlock structure
+ * @lock : Pointer to queued rwlock structure
  * Return: 1 if lock contended, 0 otherwise
  */
 static inline int queued_rwlock_is_contended(struct qrwlock *lock)
@@ -130,7 +130,7 @@ static inline int queued_rwlock_is_contended(struct qrwlock *lock)
 
 /*
  * Remapping rwlock architecture specific functions to the corresponding
- * queue rwlock functions.
+ * queued rwlock functions.
  */
 #define arch_read_lock(l)		queued_read_lock(l)
 #define arch_write_lock(l)		queued_write_lock(l)
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index c36f1d5a2572..12392c14c4d0 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -7,7 +7,7 @@
 #include <asm/spinlock_types.h>
 
 /*
- * The queue read/write lock data structure
+ * The queued read/write lock data structure
  */
 
 typedef struct qrwlock {
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 7f42e52a648f..2e1600906c9f 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -15,8 +15,8 @@
 #include <trace/events/lock.h>
 
 /**
- * queued_read_lock_slowpath - acquire read lock of a queue rwlock
- * @lock: Pointer to queue rwlock structure
+ * queued_read_lock_slowpath - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
  */
 void queued_read_lock_slowpath(struct qrwlock *lock)
 {
@@ -60,8 +60,8 @@ void queued_read_lock_slowpath(struct qrwlock *lock)
 EXPORT_SYMBOL(queued_read_lock_slowpath);
 
 /**
- * queued_write_lock_slowpath - acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_lock_slowpath - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  */
 void queued_write_lock_slowpath(struct qrwlock *lock)
 {
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context
  2022-05-10 19:21 [PATCH 1/2] locking/qrwlock: Change "queue rwlock" to "queued rwlock" Waiman Long
@ 2022-05-10 19:21 ` Waiman Long
  2022-05-11  7:20   ` Arnd Bergmann
  2022-05-11  8:30   ` Peter Zijlstra
  2022-05-11 19:39 ` [tip: locking/core] locking/qrwlock: Change "queue rwlock" to "queued rwlock" tip-bot2 for Waiman Long
  1 sibling, 2 replies; 9+ messages in thread
From: Waiman Long @ 2022-05-10 19:21 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng, Arnd Bergmann
  Cc: linux-kernel, Waiman Long

Even though qrwlock is supposed to be a fair lock, it does allow readers
from interrupt context to spin on the lock until it can acquire it making
it not as fair. This exception was added due to the requirement to allow
recursive read lock in interrupt context. This can also be achieved by
just ignoring the writer waiting bit without spinning on the lock.

By making this change, we make qrwlock a bit more fair and eliminating
the problem of cacheline bouncing for rwlocks that are used heavily in
interrupt context, like the networking stack. This should also reduce
the chance of lock starvation for those interrupt context rwlocks.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 include/asm-generic/qrwlock.h |  6 +++---
 kernel/locking/qrwlock.c      | 17 ++++++-----------
 2 files changed, 9 insertions(+), 14 deletions(-)

diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index d4cd4cc4389c..9d40cf016e0c 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -29,7 +29,7 @@
 /*
  * External function declarations
  */
-extern void queued_read_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock, int cnts);
 extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
@@ -80,7 +80,7 @@ static inline void queued_read_lock(struct qrwlock *lock)
 		return;
 
 	/* The slowpath will decrement the reader count, if necessary. */
-	queued_read_lock_slowpath(lock);
+	queued_read_lock_slowpath(lock, cnts);
 }
 
 /**
@@ -90,7 +90,7 @@ static inline void queued_read_lock(struct qrwlock *lock)
 static inline void queued_write_lock(struct qrwlock *lock)
 {
 	int cnts = 0;
-	/* Optimize for the unfair lock case where the fair flag is 0. */
+
 	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
 		return;
 
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 2e1600906c9f..d52d13e95600 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -18,21 +18,16 @@
  * queued_read_lock_slowpath - acquire read lock of a queued rwlock
  * @lock: Pointer to queued rwlock structure
  */
-void queued_read_lock_slowpath(struct qrwlock *lock)
+void queued_read_lock_slowpath(struct qrwlock *lock, int cnts)
 {
 	/*
-	 * Readers come here when they cannot get the lock without waiting
+	 * Readers come here when they cannot get the lock without waiting.
+	 * Readers in interrupt context can steal the lock immediately
+	 * if the writer is just waiting (not holding the lock yet).
 	 */
-	if (unlikely(in_interrupt())) {
-		/*
-		 * Readers in interrupt context will get the lock immediately
-		 * if the writer is just waiting (not holding the lock yet),
-		 * so spin with ACQUIRE semantics until the lock is available
-		 * without waiting in the queue.
-		 */
-		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
+	if (unlikely(!(cnts & _QW_LOCKED) && in_interrupt()))
 		return;
-	}
+
 	atomic_sub(_QR_BIAS, &lock->cnts);
 
 	trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context
  2022-05-10 19:21 ` [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context Waiman Long
@ 2022-05-11  7:20   ` Arnd Bergmann
  2022-05-11 12:01     ` Waiman Long
  2022-05-11  8:30   ` Peter Zijlstra
  1 sibling, 1 reply; 9+ messages in thread
From: Arnd Bergmann @ 2022-05-11  7:20 UTC (permalink / raw)
  To: Waiman Long
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Arnd Bergmann, Linux Kernel Mailing List

On Tue, May 10, 2022 at 9:21 PM Waiman Long <longman@redhat.com> wrote:
>
> Even though qrwlock is supposed to be a fair lock, it does allow readers
> from interrupt context to spin on the lock until it can acquire it making
> it not as fair. This exception was added due to the requirement to allow
> recursive read lock in interrupt context. This can also be achieved by
> just ignoring the writer waiting bit without spinning on the lock.
>
> By making this change, we make qrwlock a bit more fair and eliminating
> the problem of cacheline bouncing for rwlocks that are used heavily in
> interrupt context, like the networking stack. This should also reduce
> the chance of lock starvation for those interrupt context rwlocks.
>
> Signed-off-by: Waiman Long <longman@redhat.com>
> ---
>  include/asm-generic/qrwlock.h |  6 +++---
>  kernel/locking/qrwlock.c      | 17 ++++++-----------
>  2 files changed, 9 insertions(+), 14 deletions(-)

I have no opinion on the change itself, but I made sure this does
not conflict with the generic ticket spinlock changes that I merged
in the asm-generic tree, since those also touch the comments in
qrwlock.h [1]

I assume you are merging both patches through the tip tree, so

Acked-by: Arnd Bergmann <arnd@arndb.de> # for asm-generic

If you want me to pick them up in the asm-generic tree instead,
just let me know.

        Arnd

[1] https://lore.kernel.org/lkml/20220509222956.2886-4-palmer@rivosinc.com/

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context
  2022-05-10 19:21 ` [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context Waiman Long
  2022-05-11  7:20   ` Arnd Bergmann
@ 2022-05-11  8:30   ` Peter Zijlstra
  2022-05-11 12:44     ` Waiman Long
  1 sibling, 1 reply; 9+ messages in thread
From: Peter Zijlstra @ 2022-05-11  8:30 UTC (permalink / raw)
  To: Waiman Long
  Cc: Ingo Molnar, Will Deacon, Boqun Feng, Arnd Bergmann, linux-kernel

On Tue, May 10, 2022 at 03:21:34PM -0400, Waiman Long wrote:
> Even though qrwlock is supposed to be a fair lock, it does allow readers
> from interrupt context to spin on the lock until it can acquire it making
> it not as fair. This exception was added due to the requirement to allow
> recursive read lock in interrupt context. This can also be achieved by
> just ignoring the writer waiting bit without spinning on the lock.
> 
> By making this change, we make qrwlock a bit more fair and eliminating
> the problem of cacheline bouncing for rwlocks that are used heavily in
> interrupt context, like the networking stack. This should also reduce
> the chance of lock starvation for those interrupt context rwlocks.

> diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
> index 2e1600906c9f..d52d13e95600 100644
> --- a/kernel/locking/qrwlock.c
> +++ b/kernel/locking/qrwlock.c
> @@ -18,21 +18,16 @@
>   * queued_read_lock_slowpath - acquire read lock of a queued rwlock
>   * @lock: Pointer to queued rwlock structure
>   */
> -void queued_read_lock_slowpath(struct qrwlock *lock)
> +void queued_read_lock_slowpath(struct qrwlock *lock, int cnts)
>  {
>  	/*
> -	 * Readers come here when they cannot get the lock without waiting
> +	 * Readers come here when they cannot get the lock without waiting.
> +	 * Readers in interrupt context can steal the lock immediately
> +	 * if the writer is just waiting (not holding the lock yet).
>  	 */
> -	if (unlikely(in_interrupt())) {
> -		/*
> -		 * Readers in interrupt context will get the lock immediately
> -		 * if the writer is just waiting (not holding the lock yet),
> -		 * so spin with ACQUIRE semantics until the lock is available
> -		 * without waiting in the queue.
> -		 */
> -		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
> +	if (unlikely(!(cnts & _QW_LOCKED) && in_interrupt()))
>  		return;
> -	}
> +
>  	atomic_sub(_QR_BIAS, &lock->cnts);
>  
>  	trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);

I'm confused; prior to this change:

	CPU0			CPU1

	write_lock_irq(&l)
				read_lock(&l)
				<INRQ>
				  read_lock(&l)
				  ...

was not deadlock, but now it would AFAICT.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context
  2022-05-11  7:20   ` Arnd Bergmann
@ 2022-05-11 12:01     ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2022-05-11 12:01 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Peter Zijlstra, Ingo Molnar, Will Deacon, Boqun Feng,
	Linux Kernel Mailing List

On 5/11/22 03:20, Arnd Bergmann wrote:
> On Tue, May 10, 2022 at 9:21 PM Waiman Long <longman@redhat.com> wrote:
>> Even though qrwlock is supposed to be a fair lock, it does allow readers
>> from interrupt context to spin on the lock until it can acquire it making
>> it not as fair. This exception was added due to the requirement to allow
>> recursive read lock in interrupt context. This can also be achieved by
>> just ignoring the writer waiting bit without spinning on the lock.
>>
>> By making this change, we make qrwlock a bit more fair and eliminating
>> the problem of cacheline bouncing for rwlocks that are used heavily in
>> interrupt context, like the networking stack. This should also reduce
>> the chance of lock starvation for those interrupt context rwlocks.
>>
>> Signed-off-by: Waiman Long <longman@redhat.com>
>> ---
>>   include/asm-generic/qrwlock.h |  6 +++---
>>   kernel/locking/qrwlock.c      | 17 ++++++-----------
>>   2 files changed, 9 insertions(+), 14 deletions(-)
> I have no opinion on the change itself, but I made sure this does
> not conflict with the generic ticket spinlock changes that I merged
> in the asm-generic tree, since those also touch the comments in
> qrwlock.h [1]
>
> I assume you are merging both patches through the tip tree, so

Yes, it is based on the latest tip tree.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context
  2022-05-11  8:30   ` Peter Zijlstra
@ 2022-05-11 12:44     ` Waiman Long
  2022-05-11 13:34       ` Peter Zijlstra
  0 siblings, 1 reply; 9+ messages in thread
From: Waiman Long @ 2022-05-11 12:44 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Ingo Molnar, Will Deacon, Boqun Feng, Arnd Bergmann, linux-kernel

On 5/11/22 04:30, Peter Zijlstra wrote:
> On Tue, May 10, 2022 at 03:21:34PM -0400, Waiman Long wrote:
>> Even though qrwlock is supposed to be a fair lock, it does allow readers
>> from interrupt context to spin on the lock until it can acquire it making
>> it not as fair. This exception was added due to the requirement to allow
>> recursive read lock in interrupt context. This can also be achieved by
>> just ignoring the writer waiting bit without spinning on the lock.
>>
>> By making this change, we make qrwlock a bit more fair and eliminating
>> the problem of cacheline bouncing for rwlocks that are used heavily in
>> interrupt context, like the networking stack. This should also reduce
>> the chance of lock starvation for those interrupt context rwlocks.
>> diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
>> index 2e1600906c9f..d52d13e95600 100644
>> --- a/kernel/locking/qrwlock.c
>> +++ b/kernel/locking/qrwlock.c
>> @@ -18,21 +18,16 @@
>>    * queued_read_lock_slowpath - acquire read lock of a queued rwlock
>>    * @lock: Pointer to queued rwlock structure
>>    */
>> -void queued_read_lock_slowpath(struct qrwlock *lock)
>> +void queued_read_lock_slowpath(struct qrwlock *lock, int cnts)
>>   {
>>   	/*
>> -	 * Readers come here when they cannot get the lock without waiting
>> +	 * Readers come here when they cannot get the lock without waiting.
>> +	 * Readers in interrupt context can steal the lock immediately
>> +	 * if the writer is just waiting (not holding the lock yet).
>>   	 */
>> -	if (unlikely(in_interrupt())) {
>> -		/*
>> -		 * Readers in interrupt context will get the lock immediately
>> -		 * if the writer is just waiting (not holding the lock yet),
>> -		 * so spin with ACQUIRE semantics until the lock is available
>> -		 * without waiting in the queue.
>> -		 */
>> -		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
>> +	if (unlikely(!(cnts & _QW_LOCKED) && in_interrupt()))
>>   		return;
>> -	}
>> +
>>   	atomic_sub(_QR_BIAS, &lock->cnts);
>>   
>>   	trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);
> I'm confused; prior to this change:
>
> 	CPU0			CPU1
>
> 	write_lock_irq(&l)
> 				read_lock(&l)
> 				<INRQ>
> 				  read_lock(&l)
> 				  ...
>
> was not deadlock, but now it would AFAICT.

Oh you are right. I missed that scenario in my analysis. My bad.

Please scrap this patch. Patch 1 is just an update to the comment and so 
is still applicable.

Thanks,
Longman


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context
  2022-05-11 12:44     ` Waiman Long
@ 2022-05-11 13:34       ` Peter Zijlstra
  2022-05-11 16:00         ` Waiman Long
  0 siblings, 1 reply; 9+ messages in thread
From: Peter Zijlstra @ 2022-05-11 13:34 UTC (permalink / raw)
  To: Waiman Long
  Cc: Ingo Molnar, Will Deacon, Boqun Feng, Arnd Bergmann, linux-kernel

On Wed, May 11, 2022 at 08:44:55AM -0400, Waiman Long wrote:

> > I'm confused; prior to this change:
> > 
> > 	CPU0			CPU1
> > 
> > 	write_lock_irq(&l)
> > 				read_lock(&l)
> > 				<INRQ>
> > 				  read_lock(&l)
> > 				  ...
> > 
> > was not deadlock, but now it would AFAICT.
> 
> Oh you are right. I missed that scenario in my analysis. My bad.

No worries; I suppose we can also still do something like:

void queued_read_lock_slowpath(struct qrwlock *lock, int cnts)
{
	/*
	 * the big comment
	 */
	if (unlikely(in_interrupt())) {
		/*
		 * If not write-locked, insta-grant the reader
		 */
		if (!(cnts & _QW_LOCKED))
			return;

		/*
		 * otherwise, wait for the writer to go away.
		 */
		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
		return;
	}

	...
}

Which saves one load in some cases... not sure it's worth it though.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context
  2022-05-11 13:34       ` Peter Zijlstra
@ 2022-05-11 16:00         ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2022-05-11 16:00 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Ingo Molnar, Will Deacon, Boqun Feng, Arnd Bergmann, linux-kernel

On 5/11/22 09:34, Peter Zijlstra wrote:
> On Wed, May 11, 2022 at 08:44:55AM -0400, Waiman Long wrote:
>
>>> I'm confused; prior to this change:
>>>
>>> 	CPU0			CPU1
>>>
>>> 	write_lock_irq(&l)
>>> 				read_lock(&l)
>>> 				<INRQ>
>>> 				  read_lock(&l)
>>> 				  ...
>>>
>>> was not deadlock, but now it would AFAICT.
>> Oh you are right. I missed that scenario in my analysis. My bad.
> No worries; I suppose we can also still do something like:
>
> void queued_read_lock_slowpath(struct qrwlock *lock, int cnts)
> {
> 	/*
> 	 * the big comment
> 	 */
> 	if (unlikely(in_interrupt())) {
> 		/*
> 		 * If not write-locked, insta-grant the reader
> 		 */
> 		if (!(cnts & _QW_LOCKED))
> 			return;
>
> 		/*
> 		 * otherwise, wait for the writer to go away.
> 		 */
> 		atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
> 		return;
> 	}
>
> 	...
> }
>
> Which saves one load in some cases... not sure it's worth it though.

Yes, it is a micro-optimization that can be done. The gain, if any, 
should be minor though.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [tip: locking/core] locking/qrwlock: Change "queue rwlock" to "queued rwlock"
  2022-05-10 19:21 [PATCH 1/2] locking/qrwlock: Change "queue rwlock" to "queued rwlock" Waiman Long
  2022-05-10 19:21 ` [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context Waiman Long
@ 2022-05-11 19:39 ` tip-bot2 for Waiman Long
  1 sibling, 0 replies; 9+ messages in thread
From: tip-bot2 for Waiman Long @ 2022-05-11 19:39 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: Waiman Long, Peter Zijlstra (Intel), x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     434e09e7575b02e014931bc5672289fabd7a825c
Gitweb:        https://git.kernel.org/tip/434e09e7575b02e014931bc5672289fabd7a825c
Author:        Waiman Long <longman@redhat.com>
AuthorDate:    Tue, 10 May 2022 15:21:33 -04:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 11 May 2022 16:27:04 +02:00

locking/qrwlock: Change "queue rwlock" to "queued rwlock"

Queued rwlock was originally named "queue rwlock" which wasn't quite
grammatically correct. However there are still some "queue rwlock"
references in the code. Change those to "queued rwlock" for consistency.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220510192134.434753-1-longman@redhat.com
---
 include/asm-generic/qrwlock.h       | 28 ++++++++++++++--------------
 include/asm-generic/qrwlock_types.h |  2 +-
 kernel/locking/qrwlock.c            |  8 ++++----
 3 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 7ae0ece..d4cd4cc 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -33,8 +33,8 @@ extern void queued_read_lock_slowpath(struct qrwlock *lock);
 extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
- * queued_read_trylock - try to acquire read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_trylock - try to acquire read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
 static inline int queued_read_trylock(struct qrwlock *lock)
@@ -52,8 +52,8 @@ static inline int queued_read_trylock(struct qrwlock *lock)
 }
 
 /**
- * queued_write_trylock - try to acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_trylock - try to acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
 static inline int queued_write_trylock(struct qrwlock *lock)
@@ -68,8 +68,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
 				_QW_LOCKED));
 }
 /**
- * queued_read_lock - acquire read lock of a queue rwlock
- * @lock: Pointer to queue rwlock structure
+ * queued_read_lock - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
  */
 static inline void queued_read_lock(struct qrwlock *lock)
 {
@@ -84,8 +84,8 @@ static inline void queued_read_lock(struct qrwlock *lock)
 }
 
 /**
- * queued_write_lock - acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_lock - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  */
 static inline void queued_write_lock(struct qrwlock *lock)
 {
@@ -98,8 +98,8 @@ static inline void queued_write_lock(struct qrwlock *lock)
 }
 
 /**
- * queued_read_unlock - release read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_unlock - release read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  */
 static inline void queued_read_unlock(struct qrwlock *lock)
 {
@@ -110,8 +110,8 @@ static inline void queued_read_unlock(struct qrwlock *lock)
 }
 
 /**
- * queued_write_unlock - release write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_unlock - release write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  */
 static inline void queued_write_unlock(struct qrwlock *lock)
 {
@@ -120,7 +120,7 @@ static inline void queued_write_unlock(struct qrwlock *lock)
 
 /**
  * queued_rwlock_is_contended - check if the lock is contended
- * @lock : Pointer to queue rwlock structure
+ * @lock : Pointer to queued rwlock structure
  * Return: 1 if lock contended, 0 otherwise
  */
 static inline int queued_rwlock_is_contended(struct qrwlock *lock)
@@ -130,7 +130,7 @@ static inline int queued_rwlock_is_contended(struct qrwlock *lock)
 
 /*
  * Remapping rwlock architecture specific functions to the corresponding
- * queue rwlock functions.
+ * queued rwlock functions.
  */
 #define arch_read_lock(l)		queued_read_lock(l)
 #define arch_write_lock(l)		queued_write_lock(l)
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index c36f1d5..12392c1 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -7,7 +7,7 @@
 #include <asm/spinlock_types.h>
 
 /*
- * The queue read/write lock data structure
+ * The queued read/write lock data structure
  */
 
 typedef struct qrwlock {
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 7f42e52..2e16009 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -15,8 +15,8 @@
 #include <trace/events/lock.h>
 
 /**
- * queued_read_lock_slowpath - acquire read lock of a queue rwlock
- * @lock: Pointer to queue rwlock structure
+ * queued_read_lock_slowpath - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
  */
 void queued_read_lock_slowpath(struct qrwlock *lock)
 {
@@ -60,8 +60,8 @@ void queued_read_lock_slowpath(struct qrwlock *lock)
 EXPORT_SYMBOL(queued_read_lock_slowpath);
 
 /**
- * queued_write_lock_slowpath - acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_lock_slowpath - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
  */
 void queued_write_lock_slowpath(struct qrwlock *lock)
 {

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2022-05-11 19:40 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-10 19:21 [PATCH 1/2] locking/qrwlock: Change "queue rwlock" to "queued rwlock" Waiman Long
2022-05-10 19:21 ` [PATCH 2/2] locking/qrwlock: Reduce cacheline contention for rwlocks used in interrupt context Waiman Long
2022-05-11  7:20   ` Arnd Bergmann
2022-05-11 12:01     ` Waiman Long
2022-05-11  8:30   ` Peter Zijlstra
2022-05-11 12:44     ` Waiman Long
2022-05-11 13:34       ` Peter Zijlstra
2022-05-11 16:00         ` Waiman Long
2022-05-11 19:39 ` [tip: locking/core] locking/qrwlock: Change "queue rwlock" to "queued rwlock" tip-bot2 for Waiman Long

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.