All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>, Will Deacon <will@kernel.org>,
	Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: [patch V2 12/15] locking/rtmutex: Consolidate the fast/slowpath invocation
Date: Fri, 26 Mar 2021 16:29:41 +0100	[thread overview]
Message-ID: <20210326153944.247927548@linutronix.de> (raw)
In-Reply-To: 20210326152929.709289883@linutronix.de

The indirection via a function pointer (which is at least optimized into a
tail call by the compiler) is making the code hard to read.

Clean it up and move the futex related trylock functions down to the futex
section.

Move the wake_q wakeup into rt_mutex_slowunlock(). No point in handing it
to the caller. The futex code uses a different function.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V2: Make lockdep work by design and not just by chance
---
 kernel/locking/rtmutex.c |  144 +++++++++++++++++++----------------------------
 1 file changed, 59 insertions(+), 85 deletions(-)

--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1299,13 +1299,24 @@ static int __sched rt_mutex_slowtrylock(
 }
 
 /*
+ * Performs the wakeup of the top-waiter and re-enables preemption.
+ */
+void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
+{
+	wake_up_q(wake_q);
+
+	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
+	preempt_enable();
+}
+
+/*
  * Slow path to release a rt-mutex.
  *
  * Return whether the current task needs to call rt_mutex_postunlock().
  */
-static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
-					struct wake_q_head *wake_q)
+static void __sched rt_mutex_slowunlock(struct rt_mutex *lock)
 {
+	DEFINE_WAKE_Q(wake_q);
 	unsigned long flags;
 
 	/* irqsave required to support early boot calls */
@@ -1347,7 +1358,7 @@ static bool __sched rt_mutex_slowunlock(
 	while (!rt_mutex_has_waiters(lock)) {
 		/* Drops lock->wait_lock ! */
 		if (unlock_rt_mutex_safe(lock, flags) == true)
-			return false;
+			return;
 		/* Relock the rtmutex and try again */
 		raw_spin_lock_irqsave(&lock->wait_lock, flags);
 	}
@@ -1358,10 +1369,10 @@ static bool __sched rt_mutex_slowunlock(
 	 *
 	 * Queue the next waiter for wakeup once we release the wait_lock.
 	 */
-	mark_wakeup_next_waiter(wake_q, lock);
+	mark_wakeup_next_waiter(&wake_q, lock);
 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
-	return true; /* call rt_mutex_postunlock() */
+	rt_mutex_postunlock(&wake_q);
 }
 
 /*
@@ -1370,60 +1381,21 @@ static bool __sched rt_mutex_slowunlock(
  * The atomic acquire/release ops are compiled away, when either the
  * architecture does not support cmpxchg or when debugging is enabled.
  */
-static __always_inline int
-rt_mutex_fastlock(struct rt_mutex *lock, int state,
-		  int (*slowfn)(struct rt_mutex *lock, int state,
-				struct hrtimer_sleeper *timeout,
-				enum rtmutex_chainwalk chwalk))
+static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state,
+					   unsigned int subclass)
 {
-	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-		return 0;
+	int ret;
 
-	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
-}
+	might_sleep();
+	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 
-static __always_inline int
-rt_mutex_fasttrylock(struct rt_mutex *lock,
-		     int (*slowfn)(struct rt_mutex *lock))
-{
 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-		return 1;
-
-	return slowfn(lock);
-}
-
-/*
- * Performs the wakeup of the top-waiter and re-enables preemption.
- */
-void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
-{
-	wake_up_q(wake_q);
-
-	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
-	preempt_enable();
-}
-
-static __always_inline void
-rt_mutex_fastunlock(struct rt_mutex *lock,
-		    bool (*slowfn)(struct rt_mutex *lock,
-				   struct wake_q_head *wqh))
-{
-	DEFINE_WAKE_Q(wake_q);
-
-	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
-		return;
-
-	if (slowfn(lock, &wake_q))
-		rt_mutex_postunlock(&wake_q);
-}
-
-static __always_inline void __rt_mutex_lock(struct rt_mutex *lock,
-					    unsigned int subclass)
-{
-	might_sleep();
+		return 0;
 
-	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+	ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
+	if (ret)
+		mutex_release(&lock->dep_map, _RET_IP_);
+	return ret;
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -1435,7 +1407,7 @@ static __always_inline void __rt_mutex_l
  */
 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
 {
-	__rt_mutex_lock(lock, subclass);
+	__rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
 
@@ -1448,7 +1420,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
  */
 void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
-	__rt_mutex_lock(lock, 0);
+	__rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
 #endif
@@ -1464,42 +1436,21 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
  */
 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
-	int ret;
-
-	might_sleep();
-
-	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
-	if (ret)
-		mutex_release(&lock->dep_map, _RET_IP_);
-
-	return ret;
+	return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE, 0);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
-/*
- * Futex variant, must not use fastpath.
- */
-int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
-{
-	return rt_mutex_slowtrylock(lock);
-}
-
-int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
-{
-	return __rt_mutex_slowtrylock(lock);
-}
-
 /**
  * rt_mutex_trylock - try to lock a rt_mutex
  *
  * @lock:	the rt_mutex to be locked
  *
- * This function can only be called in thread context. It's safe to
- * call it from atomic regions, but not from hard interrupt or soft
- * interrupt context.
+ * This function can only be called in thread context. It's safe to call it
+ * from atomic regions, but not from hard or soft interrupt context.
  *
- * Returns 1 on success and 0 on contention
+ * Returns:
+ *  1 on success
+ *  0 on contention
  */
 int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
@@ -1508,7 +1459,14 @@ int __sched rt_mutex_trylock(struct rt_m
 	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
 		return 0;
 
-	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+	/*
+	 * No lockdep annotation required because lockdep disables the fast
+	 * path.
+	 */
+	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+		return 1;
+
+	ret = rt_mutex_slowtrylock(lock);
 	if (ret)
 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 
@@ -1524,10 +1482,26 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 void __sched rt_mutex_unlock(struct rt_mutex *lock)
 {
 	mutex_release(&lock->dep_map, _RET_IP_);
-	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
+	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
+		return;
+
+	rt_mutex_slowunlock(lock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 
+/*
+ * Futex variants, must not use fastpath.
+ */
+int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
+{
+	return rt_mutex_slowtrylock(lock);
+}
+
+int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+{
+	return __rt_mutex_slowtrylock(lock);
+}
+
 /**
  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
  * do not use the fast-path, can be simple and will not need to retry.


  parent reply	other threads:[~2021-03-26 16:02 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-26 15:29 [patch V2 00/15] locking/rtmutex: Spring cleaning Thomas Gleixner
2021-03-26 15:29 ` [patch V2 01/15] locking/rtmutex: Remove rt_mutex_timed_lock() Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Sebastian Andrzej Siewior
2021-03-26 15:29 ` [patch V2 02/15] locking/rtmutex: Remove rtmutex deadlock tester leftovers Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Sebastian Andrzej Siewior
2021-03-26 15:29 ` [patch V2 03/15] locking/rtmutex: Remove output from deadlock detector Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Sebastian Andrzej Siewior
2021-03-26 15:29 ` [patch V2 04/15] locking/rtmutex: Consolidate rt_mutex_init() Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Sebastian Andrzej Siewior
2021-03-26 15:29 ` [patch V2 05/15] locking/rtmutex: Remove empty and unused debug stubs Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 06/15] locking/rtmutex: Move rt_mutex_debug_task_free() to rtmutex.c Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 07/15] locking/rtmutex: Inline chainwalk depth check Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 08/15] locking/rtmutex: Remove pointless CONFIG_RT_MUTEXES=n stubs Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 09/15] locking/rtmutex: Decrapify __rt_mutex_init() Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 10/15] locking/rtmutex: Move debug functions as inlines into common header Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 11/15] locking/rtmutex: Make text section and inlining consistent Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` Thomas Gleixner [this message]
2021-03-29 16:24   ` [tip: locking/core] locking/rtmutex: Consolidate the fast/slowpath invocation tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 13/15] locking/rtmutex: Fix misleading comment in rt_mutex_postunlock() Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 14/15] locking/rtmutex: Restrict the trylock WARN_ON() to debug Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] " tip-bot2 for Thomas Gleixner
2021-03-26 15:29 ` [patch V2 15/15] locking/rtmutex: Cleanup signal handling in __rt_mutex_slowlock() Thomas Gleixner
2021-03-29 16:24   ` [tip: locking/core] locking/rtmutex: Clean up " tip-bot2 for Thomas Gleixner
2021-03-29  9:24 ` [patch V2 00/15] locking/rtmutex: Spring cleaning Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210326153944.247927548@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=bigeasy@linutronix.de \
    --cc=boqun.feng@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.