linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>,
	Juri Lelli <juri.lelli@redhat.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Will Deacon <will@kernel.org>, Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Davidlohr Bueso <dave@stgolabs.net>
Subject: [patch 36/50] locking/mutex: Replace struct mutex in core code
Date: Tue, 13 Jul 2021 17:11:30 +0200	[thread overview]
Message-ID: <20210713160749.452934858@linutronix.de> (raw)
In-Reply-To: 20210713151054.700719949@linutronix.de

From: Thomas Gleixner <tglx@linutronix.de>

PREEMPT_RT replaces 'struct mutex' with a rtmutex based variant so all
mutex operations are included into the priority inheritance scheme, but
wants to utilize the ww_mutex specific part of the regular mutex
implementation as is.

As the regular mutex and ww_mutex implementation are tightly coupled
(ww_mutex has a 'struct mutex' inside) and share a lot of code (ww_mutex is
mostly an extension) a simple replacement of 'struct mutex' does not work.

'struct mutex' has a typedef '_mutex_t' associated. Replace all 'struct
mutex' references in the mutex code code with '_mutex_t' which allows to
have a RT specific 'struct mutex' in the final step.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/locking/mutex-debug.c |   12 ++++-----
 kernel/locking/mutex.c       |   52 +++++++++++++++++++++----------------------
 kernel/locking/mutex.h       |   14 +++++------
 3 files changed, 39 insertions(+), 39 deletions(-)
---
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -25,14 +25,14 @@
 /*
  * Must be called with lock->wait_lock held.
  */
-void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
+void debug_mutex_lock_common(_mutex_t *lock, struct mutex_waiter *waiter)
 {
 	memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
 	waiter->magic = waiter;
 	INIT_LIST_HEAD(&waiter->list);
 }
 
-void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+void debug_mutex_wake_waiter(_mutex_t *lock, struct mutex_waiter *waiter)
 {
 	lockdep_assert_held(&lock->wait_lock);
 	DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
@@ -46,7 +46,7 @@ void debug_mutex_free_waiter(struct mute
 	memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
 }
 
-void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_add_waiter(_mutex_t *lock, struct mutex_waiter *waiter,
 			    struct task_struct *task)
 {
 	lockdep_assert_held(&lock->wait_lock);
@@ -55,7 +55,7 @@ void debug_mutex_add_waiter(struct mutex
 	task->blocked_on = waiter;
 }
 
-void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(_mutex_t *lock, struct mutex_waiter *waiter,
 			 struct task_struct *task)
 {
 	DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -67,7 +67,7 @@ void debug_mutex_remove_waiter(struct mu
 	waiter->task = NULL;
 }
 
-void debug_mutex_unlock(struct mutex *lock)
+void debug_mutex_unlock(_mutex_t *lock)
 {
 	if (likely(debug_locks)) {
 		DEBUG_LOCKS_WARN_ON(lock->magic != lock);
@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
 	}
 }
 
-void debug_mutex_init(struct mutex *lock, const char *name,
+void debug_mutex_init(_mutex_t *lock, const char *name,
 		      struct lock_class_key *key)
 {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -66,7 +66,7 @@ EXPORT_SYMBOL(__mutex_t_init);
  *
  * DO NOT USE (outside of mutex code).
  */
-static inline struct task_struct *__mutex_owner(struct mutex *lock)
+static inline struct task_struct *__mutex_owner(_mutex_t *lock)
 {
 	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
 }
@@ -90,7 +90,7 @@ static inline unsigned long __owner_flag
 /*
  * Trylock variant that returns the owning task on failure.
  */
-static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
+static inline struct task_struct *__mutex_trylock_or_owner(_mutex_t *lock)
 {
 	unsigned long owner, curr = (unsigned long)current;
 
@@ -133,7 +133,7 @@ static inline struct task_struct *__mute
 /*
  * Actual trylock that will work on any unlocked state.
  */
-static inline bool __mutex_trylock(struct mutex *lock)
+static inline bool __mutex_trylock(_mutex_t *lock)
 {
 	return !__mutex_trylock_or_owner(lock);
 }
@@ -149,7 +149,7 @@ static inline bool __mutex_trylock(struc
  * Optimistic trylock that only works in the uncontended case. Make sure to
  * follow with a __mutex_trylock() before failing.
  */
-static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
+static __always_inline bool __mutex_trylock_fast(_mutex_t *lock)
 {
 	unsigned long curr = (unsigned long)current;
 	unsigned long zero = 0UL;
@@ -160,7 +160,7 @@ static __always_inline bool __mutex_tryl
 	return false;
 }
 
-static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
+static __always_inline bool __mutex_unlock_fast(_mutex_t *lock)
 {
 	unsigned long curr = (unsigned long)current;
 
@@ -171,17 +171,17 @@ static __always_inline bool __mutex_unlo
 }
 #endif
 
-static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
+static inline void __mutex_set_flag(_mutex_t *lock, unsigned long flag)
 {
 	atomic_long_or(flag, &lock->owner);
 }
 
-static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
+static inline void __mutex_clear_flag(_mutex_t *lock, unsigned long flag)
 {
 	atomic_long_andnot(flag, &lock->owner);
 }
 
-static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
+static inline bool __mutex_waiter_is_first(_mutex_t *lock, struct mutex_waiter *waiter)
 {
 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
 }
@@ -191,7 +191,7 @@ static inline bool __mutex_waiter_is_fir
  * FLAG_WAITERS flag if it's the first waiter.
  */
 static void
-__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+__mutex_add_waiter(_mutex_t *lock, struct mutex_waiter *waiter,
 		   struct list_head *list)
 {
 	debug_mutex_add_waiter(lock, waiter, current);
@@ -202,7 +202,7 @@ static void
 }
 
 static void
-__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+__mutex_remove_waiter(_mutex_t *lock, struct mutex_waiter *waiter)
 {
 	list_del(&waiter->list);
 	if (likely(list_empty(&lock->wait_list)))
@@ -217,7 +217,7 @@ static void
  * WAITERS. Provides RELEASE semantics like a regular unlock, the
  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
  */
-static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
+static void __mutex_handoff(_mutex_t *lock, struct task_struct *task)
 {
 	unsigned long owner = atomic_long_read(&lock->owner);
 
@@ -360,7 +360,7 @@ static inline bool __sched
  * __ww_mutex_check_kill() wake any but the earliest context.
  */
 static bool __sched
-__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
+__ww_mutex_die(_mutex_t *lock, struct mutex_waiter *waiter,
 	       struct ww_acquire_ctx *ww_ctx)
 {
 	if (!ww_ctx->is_wait_die)
@@ -382,7 +382,7 @@ static bool __sched
  * the lock holders. Even if multiple waiters may wound the lock holder,
  * it's sufficient that only one does.
  */
-static bool __ww_mutex_wound(struct mutex *lock,
+static bool __ww_mutex_wound(_mutex_t *lock,
 			     struct ww_acquire_ctx *ww_ctx,
 			     struct ww_acquire_ctx *hold_ctx)
 {
@@ -437,7 +437,7 @@ static bool __ww_mutex_wound(struct mute
  * The current task must not be on the wait list.
  */
 static void __sched
-__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_check_waiters(_mutex_t *lock, struct ww_acquire_ctx *ww_ctx)
 {
 	struct mutex_waiter *cur;
 
@@ -495,7 +495,7 @@ ww_mutex_set_context_fastpath(struct ww_
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 
 static inline
-bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+bool ww_mutex_spin_on_owner(_mutex_t *lock, struct ww_acquire_ctx *ww_ctx,
 			    struct mutex_waiter *waiter)
 {
 	struct ww_mutex *ww;
@@ -543,7 +543,7 @@ bool ww_mutex_spin_on_owner(struct mutex
  * "noinline" so that this function shows up on perf profiles.
  */
 static noinline
-bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
+bool mutex_spin_on_owner(_mutex_t *lock, struct task_struct *owner,
 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
 {
 	bool ret = true;
@@ -582,7 +582,7 @@ bool mutex_spin_on_owner(struct mutex *l
 /*
  * Initial check for entering the mutex spinning loop
  */
-static inline int mutex_can_spin_on_owner(struct mutex *lock)
+static inline int mutex_can_spin_on_owner(_mutex_t *lock)
 {
 	struct task_struct *owner;
 	int retval = 1;
@@ -631,7 +631,7 @@ static inline int mutex_can_spin_on_owne
  * changed to itself.
  */
 static __always_inline bool
-mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+mutex_optimistic_spin(_mutex_t *lock, struct ww_acquire_ctx *ww_ctx,
 		      struct mutex_waiter *waiter)
 {
 	if (!waiter) {
@@ -707,14 +707,14 @@ mutex_optimistic_spin(struct mutex *lock
 }
 #else
 static __always_inline bool
-mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+mutex_optimistic_spin(_mutex_t *lock, struct ww_acquire_ctx *ww_ctx,
 		      struct mutex_waiter *waiter)
 {
 	return false;
 }
 #endif
 
-static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
+static noinline void __sched __mutex_unlock_slowpath(_mutex_t *lock, unsigned long ip);
 
 /**
  * mutex_unlock - release the mutex
@@ -769,7 +769,7 @@ EXPORT_SYMBOL(ww_mutex_unlock);
 
 
 static __always_inline int __sched
-__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_kill(_mutex_t *lock, struct ww_acquire_ctx *ww_ctx)
 {
 	if (ww_ctx->acquired > 0) {
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -798,7 +798,7 @@ static __always_inline int __sched
  * look at waiters before us in the wait-list.
  */
 static inline int __sched
-__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
+__ww_mutex_check_kill(_mutex_t *lock, struct mutex_waiter *waiter,
 		      struct ww_acquire_ctx *ctx)
 {
 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
@@ -846,7 +846,7 @@ static inline int __sched
  */
 static inline int __sched
 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
-		      struct mutex *lock,
+		      _mutex_t *lock,
 		      struct ww_acquire_ctx *ww_ctx)
 {
 	struct mutex_waiter *cur;
@@ -919,7 +919,7 @@ static inline int __sched
  * Lock a mutex (possibly interruptible), slowpath:
  */
 static __always_inline int __sched
-__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
+__mutex_lock_common(_mutex_t *lock, unsigned int state, unsigned int subclass,
 		    struct lockdep_map *nest_lock, unsigned long ip,
 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
@@ -1101,7 +1101,7 @@ static int __sched
 }
 
 static int __sched
-__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
+__ww_mutex_lock(_mutex_t *lock, unsigned int state, unsigned int subclass,
 		struct lockdep_map *nest_lock, unsigned long ip,
 		struct ww_acquire_ctx *ww_ctx)
 {
@@ -1216,7 +1216,7 @@ EXPORT_SYMBOL_GPL(ww_mutex_lock_interrup
 /*
  * Release the lock, slowpath:
  */
-static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
+static noinline void __sched __mutex_unlock_slowpath(_mutex_t *lock, unsigned long ip)
 {
 	struct task_struct *next = NULL;
 	DEFINE_WAKE_Q(wake_q);
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -21,18 +21,18 @@ struct mutex_waiter {
 };
 
 #ifdef CONFIG_DEBUG_MUTEXES
-extern void debug_mutex_lock_common(struct mutex *lock,
+extern void debug_mutex_lock_common(_mutex_t *lock,
 				    struct mutex_waiter *waiter);
-extern void debug_mutex_wake_waiter(struct mutex *lock,
+extern void debug_mutex_wake_waiter(_mutex_t *lock,
 				    struct mutex_waiter *waiter);
 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
-extern void debug_mutex_add_waiter(struct mutex *lock,
+extern void debug_mutex_add_waiter(_mutex_t *lock,
 				   struct mutex_waiter *waiter,
 				   struct task_struct *task);
-extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(_mutex_t *lock, struct mutex_waiter *waiter,
 				      struct task_struct *task);
-extern void debug_mutex_unlock(struct mutex *lock);
-extern void debug_mutex_init(struct mutex *lock, const char *name,
+extern void debug_mutex_unlock(_mutex_t *lock);
+extern void debug_mutex_init(_mutex_t *lock, const char *name,
 			     struct lock_class_key *key);
 #else /* CONFIG_DEBUG_MUTEXES */
 
@@ -44,7 +44,7 @@ extern void debug_mutex_init(struct mute
 #define debug_mutex_init(lock, name, key)		do { } while (0)
 
 static inline void
-debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
+debug_mutex_lock_common(_mutex_t *lock, struct mutex_waiter *waiter)
 {
 }
 #endif /* !CONFIG_DEBUG_MUTEXES */


  parent reply	other threads:[~2021-07-13 16:14 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-13 15:10 [patch 00/50] locking, sched: The PREEMPT-RT locking infrastructure Thomas Gleixner
2021-07-13 15:10 ` [patch 01/50] sched: Split out the wakeup state check Thomas Gleixner
2021-07-13 15:10 ` [patch 02/50] sched: Introduce TASK_RTLOCK_WAIT Thomas Gleixner
2021-07-13 15:10 ` [patch 03/50] sched: Prepare for RT sleeping spin/rwlocks Thomas Gleixner
2021-07-13 19:52   ` Waiman Long
2021-07-14 23:20   ` Valentin Schneider
2021-07-15  9:27     ` Peter Zijlstra
2021-07-15 14:08       ` Valentin Schneider
2021-07-13 15:10 ` [patch 04/50] sched: Rework the __schedule() preempt argument Thomas Gleixner
2021-07-13 20:04   ` Waiman Long
2021-07-13 15:10 ` [patch 05/50] sched: Provide schedule point for RT locks Thomas Gleixner
2021-07-14  8:28   ` Peter Zijlstra
2021-07-14  9:49     ` Thomas Gleixner
2021-07-14 10:17       ` Peter Zijlstra
2021-07-14 11:32         ` Thomas Gleixner
2021-07-13 15:11 ` [patch 06/50] sched/wake_q: Provide WAKE_Q_HEAD_INITIALIZER Thomas Gleixner
2021-07-13 15:11 ` [patch 07/50] rtmutex: Convert macros to inlines Thomas Gleixner
2021-07-13 15:11 ` [patch 08/50] rtmutex: Switch to try_cmpxchg() Thomas Gleixner
2021-07-13 15:11 ` [patch 09/50] rtmutex: Split API and implementation Thomas Gleixner
2021-07-13 15:11 ` [patch 10/50] locking/rtmutex: Provide rt_mutex_slowlock_locked() Thomas Gleixner
2021-07-27 17:20   ` Valentin Schneider
2021-07-27 19:02     ` Thomas Gleixner
2021-07-13 15:11 ` [patch 11/50] locking/rtmutex: Provide lockdep less variants of rtmutex interfaces Thomas Gleixner
2021-07-13 15:11 ` [patch 12/50] locking: Add base code for RT rw_semaphore and rwlock Thomas Gleixner
2021-07-13 15:11 ` [patch 13/50] locking/rwsem: Add rtmutex based R/W semaphore implementation Thomas Gleixner
2021-07-13 15:11 ` [patch 14/50] locking/rtmutex: Add wake_state to rt_mutex_waiter Thomas Gleixner
2021-07-14 10:18   ` Peter Zijlstra
2021-07-14 11:33     ` Thomas Gleixner
2021-07-13 15:11 ` [patch 15/50] locking/rtmutex: Provide rt_mutex_wake_q and helpers Thomas Gleixner
2021-07-13 15:11 ` [patch 16/50] locking/rtmutex: Use rt_mutex_wake_q_head Thomas Gleixner
2021-07-14  8:55   ` Peter Zijlstra
2021-07-14  9:51     ` Thomas Gleixner
2021-07-13 15:11 ` [patch 17/50] locking/rtmutex: Prepare RT rt_mutex_wake_q for RT locks Thomas Gleixner
2021-07-14  8:55   ` Peter Zijlstra
2021-07-14  9:52     ` Thomas Gleixner
2021-07-13 15:11 ` [patch 18/50] locking/rtmutex: Guard regular sleeping locks specific functions Thomas Gleixner
2021-07-13 15:11 ` [patch 19/50] locking/spinlock: Split the lock types header Thomas Gleixner
2021-07-13 15:11 ` [patch 20/50] locking/rtmutex: Prevent future include recursion hell Thomas Gleixner
2021-07-13 15:11 ` [patch 21/50] locking/lockdep: Reduce includes in debug_locks.h Thomas Gleixner
2021-07-13 15:11 ` [patch 22/50] rbtree: Split out the rbtree type definitions Thomas Gleixner
2021-07-14  9:24   ` Peter Zijlstra
2021-07-14  9:31     ` Arnaldo
2021-07-13 15:11 ` [patch 23/50] locking/rtmutex: Include only rbtree types Thomas Gleixner
2021-07-13 15:11 ` [patch 24/50] locking/spinlock: Provide RT specific spinlock type Thomas Gleixner
2021-07-14  9:54   ` Peter Zijlstra
2021-07-14 10:07     ` [PATCH] media/atomisp: Use lockdep instead of *mutex_is_locked() Peter Zijlstra
2021-07-13 15:11 ` [patch 25/50] locking/spinlock: Provide RT variant header Thomas Gleixner
2021-07-13 15:11 ` [patch 26/50] locking/rtmutex: Provide the spin/rwlock core lock function Thomas Gleixner
2021-07-14 10:59   ` Peter Zijlstra
2021-07-13 15:11 ` [patch 27/50] locking/spinlock: Provide RT variant Thomas Gleixner
2021-07-14 11:25   ` Peter Zijlstra
2021-07-13 15:11 ` [patch 28/50] locking/rwlock: " Thomas Gleixner
2021-07-13 15:11 ` [patch 29/50] locking/mutex: Consolidate core headers Thomas Gleixner
2021-07-13 15:11 ` [patch 30/50] locking/mutex: Move waiter to core header Thomas Gleixner
2021-07-13 15:11 ` [patch 31/50] locking/ww_mutex: Move ww_mutex declarations into ww_mutex.h Thomas Gleixner
2021-07-13 15:11 ` [patch 32/50] locking/mutex: Make mutex::wait_lock raw Thomas Gleixner
2021-07-13 15:11 ` [patch 33/50] locking/mutex: Introduce _mutex_t Thomas Gleixner
2021-07-15 17:31   ` Peter Zijlstra
2021-07-13 15:11 ` [patch 34/50] locking/mutex: Rename the ww_mutex relevant functions Thomas Gleixner
2021-07-13 15:11 ` [patch 35/50] locking/ww_mutex: Switch to _mutex_t Thomas Gleixner
2021-07-13 15:11 ` Thomas Gleixner [this message]
2021-07-13 15:11 ` [patch 37/50] locking/mutex: Rearrange items in mutex.h Thomas Gleixner
2021-07-13 15:11 ` [patch 38/50] locking/mutex: Exclude non-ww_mutex API for RT Thomas Gleixner
2021-07-13 15:11 ` [patch 39/50] locking/rtmutex: Add mutex variant " Thomas Gleixner
2021-07-13 15:11 ` [patch 40/50] lib/test_lockup: Adapt to changed variables Thomas Gleixner
2021-07-13 15:11 ` [patch 41/50] futex: Validate waiter correctly in futex_proxy_trylock_atomic() Thomas Gleixner
2021-07-13 15:11 ` [patch 42/50] futex: Cleanup stale comments Thomas Gleixner
2021-07-28 23:28   ` André Almeida
2021-07-13 15:11 ` [patch 43/50] futex: Correct the number of requeued waiters for PI Thomas Gleixner
2021-07-13 15:11 ` [patch 44/50] futex: Restructure futex_requeue() Thomas Gleixner
2021-07-13 15:11 ` [patch 45/50] futex: Clarify comment in futex_requeue() Thomas Gleixner
2021-07-13 15:11 ` [patch 46/50] futex: Prevent requeue_pi() lock nesting issue on RT Thomas Gleixner
2021-07-13 15:11 ` [patch 47/50] rtmutex: Prevent lockdep false positive with PI futexes Thomas Gleixner
2021-07-13 15:11 ` [patch 48/50] preempt: Adjust PREEMPT_LOCK_OFFSET for RT Thomas Gleixner
2021-07-13 15:11 ` [patch 49/50] locking/rtmutex: Implement equal priority lock stealing Thomas Gleixner
2021-07-13 15:11 ` [patch 50/50] locking/rtmutex: Add adaptive spinwait mechanism Thomas Gleixner
2021-07-27 17:19 ` [patch 00/50] locking, sched: The PREEMPT-RT locking infrastructure Valentin Schneider

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210713160749.452934858@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=bigeasy@linutronix.de \
    --cc=boqun.feng@gmail.com \
    --cc=bristot@redhat.com \
    --cc=dave@stgolabs.net \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).