linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] kernel/locking: make __down_common use flags previously saved
@ 2021-04-24 14:28 Hyeonggon Yoo
  2021-05-11  2:23 ` Hyeonggon Yoo
  2021-05-12 19:09 ` Ingo Molnar
  0 siblings, 2 replies; 4+ messages in thread
From: Hyeonggon Yoo @ 2021-04-24 14:28 UTC (permalink / raw)
  To: peterz, mingo, will; +Cc: linux-kernel, Hyeonggon Yoo

down, down_interruptible, down_killable, and down_timeout
call raw_spin_lock_irqsave that saves current status to flags.

but in __down_common, that is called by functions above, calls
raw_spin_lock_irq and raw_spin_unlock_irq regardless of flags previously saved.

this mismatch can potentially cause problem.
so made __down_common use raw_spin_lock_irqsave and raw_spin_unlock_irqrestore.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 kernel/locking/semaphore.c | 42 +++++++++++++++++++++-----------------
 1 file changed, 23 insertions(+), 19 deletions(-)

diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
index 9aa855a96c4a..0ea174223441 100644
--- a/kernel/locking/semaphore.c
+++ b/kernel/locking/semaphore.c
@@ -33,10 +33,10 @@
 #include <linux/spinlock.h>
 #include <linux/ftrace.h>
 
-static noinline void __down(struct semaphore *sem);
-static noinline int __down_interruptible(struct semaphore *sem);
-static noinline int __down_killable(struct semaphore *sem);
-static noinline int __down_timeout(struct semaphore *sem, long timeout);
+static noinline void __down(struct semaphore *sem, unsigned long flags);
+static noinline int __down_interruptible(struct semaphore *sem, unsigned long flags);
+static noinline int __down_killable(struct semaphore *sem, unsigned long flags);
+static noinline int __down_timeout(struct semaphore *sem, long timeout, unsigned long flags);
 static noinline void __up(struct semaphore *sem);
 
 /**
@@ -58,7 +58,7 @@ void down(struct semaphore *sem)
 	if (likely(sem->count > 0))
 		sem->count--;
 	else
-		__down(sem);
+		__down(sem, flags);
 	raw_spin_unlock_irqrestore(&sem->lock, flags);
 }
 EXPORT_SYMBOL(down);
@@ -81,7 +81,7 @@ int down_interruptible(struct semaphore *sem)
 	if (likely(sem->count > 0))
 		sem->count--;
 	else
-		result = __down_interruptible(sem);
+		result = __down_interruptible(sem, flags);
 	raw_spin_unlock_irqrestore(&sem->lock, flags);
 
 	return result;
@@ -107,7 +107,7 @@ int down_killable(struct semaphore *sem)
 	if (likely(sem->count > 0))
 		sem->count--;
 	else
-		result = __down_killable(sem);
+		result = __down_killable(sem, flags);
 	raw_spin_unlock_irqrestore(&sem->lock, flags);
 
 	return result;
@@ -161,7 +161,7 @@ int down_timeout(struct semaphore *sem, long timeout)
 	if (likely(sem->count > 0))
 		sem->count--;
 	else
-		result = __down_timeout(sem, timeout);
+		result = __down_timeout(sem, timeout, flags);
 	raw_spin_unlock_irqrestore(&sem->lock, flags);
 
 	return result;
@@ -202,7 +202,7 @@ struct semaphore_waiter {
  * 'timeout' parameter for the cases without timeouts.
  */
 static inline int __sched __down_common(struct semaphore *sem, long state,
-								long timeout)
+					long timeout, unsigned long flags)
 {
 	struct semaphore_waiter waiter;
 
@@ -216,9 +216,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
 		if (unlikely(timeout <= 0))
 			goto timed_out;
 		__set_current_state(state);
-		raw_spin_unlock_irq(&sem->lock);
+		raw_spin_unlock_irqrestore(&sem->lock, flags);
 		timeout = schedule_timeout(timeout);
-		raw_spin_lock_irq(&sem->lock);
+		raw_spin_lock_irqsave(&sem->lock, flags);
 		if (waiter.up)
 			return 0;
 	}
@@ -232,24 +232,28 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
 	return -EINTR;
 }
 
-static noinline void __sched __down(struct semaphore *sem)
+static noinline void __sched __down(struct semaphore *sem, unsigned long flags)
 {
-	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+	__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT, flags);
 }
 
-static noinline int __sched __down_interruptible(struct semaphore *sem)
+static noinline int __sched __down_interruptible(struct semaphore *sem,
+							unsigned long flags)
 {
-	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+	return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT,
+									flags);
 }
 
-static noinline int __sched __down_killable(struct semaphore *sem)
+static noinline int __sched __down_killable(struct semaphore *sem,
+							unsigned long flags)
 {
-	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
+	return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT, flags);
 }
 
-static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
+static noinline int __sched __down_timeout(struct semaphore *sem,
+					long timeout, unsigned long flags)
 {
-	return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
+	return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout, flags);
 }
 
 static noinline void __sched __up(struct semaphore *sem)
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] kernel/locking: make __down_common use flags previously saved
  2021-04-24 14:28 [PATCH] kernel/locking: make __down_common use flags previously saved Hyeonggon Yoo
@ 2021-05-11  2:23 ` Hyeonggon Yoo
  2021-05-12 19:09 ` Ingo Molnar
  1 sibling, 0 replies; 4+ messages in thread
From: Hyeonggon Yoo @ 2021-05-11  2:23 UTC (permalink / raw)
  To: peterz, mingo, will; +Cc: linux-kernel

Hello, is there someone?

can you tell me if my approach was wrong, or Can I improve anything?
it was weird to me that __down_common disable / restores regardless of flag
that was previously saved

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] kernel/locking: make __down_common use flags previously saved
  2021-04-24 14:28 [PATCH] kernel/locking: make __down_common use flags previously saved Hyeonggon Yoo
  2021-05-11  2:23 ` Hyeonggon Yoo
@ 2021-05-12 19:09 ` Ingo Molnar
  2021-05-17 14:36   ` Hyeonggon Yoo
  1 sibling, 1 reply; 4+ messages in thread
From: Ingo Molnar @ 2021-05-12 19:09 UTC (permalink / raw)
  To: Hyeonggon Yoo; +Cc: peterz, mingo, will, linux-kernel, Thomas Gleixner


* Hyeonggon Yoo <42.hyeyoo@gmail.com> wrote:

> down, down_interruptible, down_killable, and down_timeout
> call raw_spin_lock_irqsave that saves current status to flags.
> 
> but in __down_common, that is called by functions above, calls
> raw_spin_lock_irq and raw_spin_unlock_irq regardless of flags previously saved.

Yes, this is intentional, because if we get into __down_common() we have to 
schedule, so we have to enable interrupts.

> this mismatch can potentially cause problem.

What problems?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] kernel/locking: make __down_common use flags previously saved
  2021-05-12 19:09 ` Ingo Molnar
@ 2021-05-17 14:36   ` Hyeonggon Yoo
  0 siblings, 0 replies; 4+ messages in thread
From: Hyeonggon Yoo @ 2021-05-17 14:36 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: peterz, mingo, will, linux-kernel, Thomas Gleixner

On Wed, May 12, 2021 at 09:09:34PM +0200, Ingo Molnar wrote:
> Yes, this is intentional, because if we get into __down_common() we have to 
> schedule, so we have to enable interrupts.

when I sent this patch I thought we have to match XXXX_irqsave with
XXXX_irqrestore, but that was my misunderstanding. I'm sorry for sending
patch saying that it's wrong without explicit evidence.

now I know it's wrong so I won't do that again.

thanks,
Hyeonggon

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-05-17 16:06 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-24 14:28 [PATCH] kernel/locking: make __down_common use flags previously saved Hyeonggon Yoo
2021-05-11  2:23 ` Hyeonggon Yoo
2021-05-12 19:09 ` Ingo Molnar
2021-05-17 14:36   ` Hyeonggon Yoo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).