All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>,
	Juri Lelli <juri.lelli@redhat.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Will Deacon <will@kernel.org>, Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Davidlohr Bueso <dave@stgolabs.net>,
	Mike Galbraith <efault@gmx.de>
Subject: [patch V4 04/68] sched: Prepare for RT sleeping spin/rwlocks
Date: Wed, 11 Aug 2021 14:22:35 +0200 (CEST)	[thread overview]
Message-ID: <20210811121414.362712953@linutronix.de> (raw)
In-Reply-To: 20210811120348.855823694@linutronix.de

From: Thomas Gleixner <tglx@linutronix.de>

Waiting for spinlocks and rwlocks on non RT enabled kernels is task::state
preserving. Any wakeup which matches the state is valid.

RT enabled kernels substitutes them with 'sleeping' spinlocks. This creates
an issue vs. task::state.

In order to block on the lock the task has to overwrite task::state and a
consecutive wakeup issued by the unlocker sets the state back to
TASK_RUNNING. As a consequence the task loses the state which was set
before the lock acquire and also any regular wakeup targeted at the task
while it is blocked on the lock.

To handle this gracefully add a 'saved_state' member to task_struct which
is used in the following way:

 1) When a task blocks on a 'sleeping' spinlock, the current state is saved
    in task::saved_state before it is set to TASK_RTLOCK_WAIT.

 2) When the task unblocks and after acquiring the lock, it restores the saved
    state.

 3) When a regular wakeup happens for a task while it is blocked then the
    state change of that wakeup is redirected to operate on task::saved_state.

    This is also required when the task state is running because the task
    might have been woken up from the lock wait and has not yet restored
    the saved state.

To make it complete provide the necessary helpers to save and restore the
saved state along with the necessary documentation how the RT lock blocking
is supposed to work.

For non-RT kernels there is no functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/sched.h |   66 ++++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/core.c   |   33 +++++++++++++++++++++++++
 2 files changed, 99 insertions(+)
---
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -143,9 +143,22 @@ struct task_group;
 		current->task_state_change = _THIS_IP_;			\
 	} while (0)
 
+# define debug_rtlock_wait_set_state()					\
+	do {								 \
+		current->saved_state_change = current->task_state_change;\
+		current->task_state_change = _THIS_IP_;			 \
+	} while (0)
+
+# define debug_rtlock_wait_restore_state()				\
+	do {								 \
+		current->task_state_change = current->saved_state_change;\
+	} while (0)
+
 #else
 # define debug_normal_state_change(cond)	do { } while (0)
 # define debug_special_state_change(cond)	do { } while (0)
+# define debug_rtlock_wait_set_state()		do { } while (0)
+# define debug_rtlock_wait_restore_state()	do { } while (0)
 #endif
 
 /*
@@ -213,6 +226,51 @@ struct task_group;
 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
 	} while (0)
 
+/*
+ * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
+ *
+ * RT's spin/rwlock substitutions are state preserving. The state of the
+ * task when blocking on the lock is saved in task_struct::saved_state and
+ * restored after the lock has been acquired.  These operations are
+ * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
+ * lock related wakeups while the task is blocked on the lock are
+ * redirected to operate on task_struct::saved_state to ensure that these
+ * are not dropped. On restore task_struct::saved_state is set to
+ * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
+ *
+ * The lock operation looks like this:
+ *
+ *	current_save_and_set_rtlock_wait_state();
+ *	for (;;) {
+ *		if (try_lock())
+ *			break;
+ *		raw_spin_unlock_irq(&lock->wait_lock);
+ *		schedule_rtlock();
+ *		raw_spin_lock_irq(&lock->wait_lock);
+ *		set_current_state(TASK_RTLOCK_WAIT);
+ *	}
+ *	current_restore_rtlock_saved_state();
+ */
+#define current_save_and_set_rtlock_wait_state()			\
+	do {								\
+		lockdep_assert_irqs_disabled();				\
+		raw_spin_lock(&current->pi_lock);			\
+		current->saved_state = current->__state;		\
+		debug_rtlock_wait_set_state();				\
+		WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);		\
+		raw_spin_unlock(&current->pi_lock);			\
+	} while (0);
+
+#define current_restore_rtlock_saved_state()				\
+	do {								\
+		lockdep_assert_irqs_disabled();				\
+		raw_spin_lock(&current->pi_lock);			\
+		debug_rtlock_wait_restore_state();			\
+		WRITE_ONCE(current->__state, current->saved_state);	\
+		current->saved_state = TASK_RUNNING;			\
+		raw_spin_unlock(&current->pi_lock);			\
+	} while (0);
+
 #define get_current_state()	READ_ONCE(current->__state)
 
 /* Task command name length: */
@@ -668,6 +726,11 @@ struct task_struct {
 #endif
 	unsigned int			__state;
 
+#ifdef CONFIG_PREEMPT_RT
+	/* saved state for "spinlock sleepers" */
+	unsigned int			saved_state;
+#endif
+
 	/*
 	 * This begins the randomizable portion of task_struct. Only
 	 * scheduling-critical items should be added above here.
@@ -1357,6 +1420,9 @@ struct task_struct {
 	struct kmap_ctrl		kmap_ctrl;
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 	unsigned long			task_state_change;
+# ifdef CONFIG_PREEMPT_RT
+	unsigned long			saved_state_change;
+# endif
 #endif
 	int				pagefault_disabled;
 #ifdef CONFIG_MMU
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3566,14 +3566,47 @@ static void ttwu_queue(struct task_struc
  *
  * The caller holds p::pi_lock if p != current or has preemption
  * disabled when p == current.
+ *
+ * The rules of PREEMPT_RT saved_state:
+ *
+ *   The related locking code always holds p::pi_lock when updating
+ *   p::saved_state, which means the code is fully serialized in both cases.
+ *
+ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
+ *   bits set. This allows to distinguish all wakeup scenarios.
  */
 static __always_inline
 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
 {
+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
+		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
+			     state != TASK_RTLOCK_WAIT);
+	}
+
 	if (READ_ONCE(p->__state) & state) {
 		*success = 1;
 		return true;
 	}
+
+#ifdef CONFIG_PREEMPT_RT
+	/*
+	 * Saved state preserves the task state across blocking on
+	 * a RT lock.  If the state matches, set p::saved_state to
+	 * TASK_RUNNING, but do not wake the task because it waits
+	 * for a lock wakeup. Also indicate success because from
+	 * the regular waker's point of view this has succeeded.
+	 *
+	 * After acquiring the lock the task will restore p::state
+	 * from p::saved_state which ensures that the regular
+	 * wakeup is not lost. The restore will also set
+	 * p::saved_state to TASK_RUNNING so any further tests will
+	 * not result in false positives vs. @success
+	 */
+	if (p->saved_state & state) {
+		p->saved_state = TASK_RUNNING;
+		*success = 1;
+	}
+#endif
 	return false;
 }
 


  parent reply	other threads:[~2021-08-11 12:23 UTC|newest]

Thread overview: 70+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-11 12:22 [patch V4 00/68] locking, sched: The PREEMPT-RT locking infrastructure Thomas Gleixner
2021-08-11 12:22 ` [patch V4 01/68] sched: Split out the wakeup state check Thomas Gleixner
2021-08-11 12:22 ` [patch V4 02/68] sched: Introduce TASK_RTLOCK_WAIT Thomas Gleixner
2021-08-11 12:22 ` [patch V4 03/68] sched: Reorganize current::__state helpers Thomas Gleixner
2021-08-11 12:22 ` Thomas Gleixner [this message]
2021-08-11 12:22 ` [patch V4 05/68] sched: Rework the __schedule() preempt argument Thomas Gleixner
2021-08-11 12:22 ` [patch V4 06/68] sched: Provide schedule point for RT locks Thomas Gleixner
2021-08-11 12:22 ` [patch V4 07/68] sched/wake_q: Provide WAKE_Q_HEAD_INITIALIZER Thomas Gleixner
2021-08-11 12:22 ` [patch V4 08/68] media/atomisp: Use lockdep instead of *mutex_is_locked() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 09/68] rtmutex: Remove rt_mutex_is_locked() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 10/68] rtmutex: Convert macros to inlines Thomas Gleixner
2021-08-11 12:22 ` [patch V4 11/68] rtmutex: Switch to try_cmpxchg() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 12/68] rtmutex: Split API and implementation Thomas Gleixner
2021-08-11 12:22 ` [patch V4 13/68] rtmutex: Split out the inner parts of struct rtmutex Thomas Gleixner
2021-08-11 12:22 ` [patch V4 14/68] locking/rtmutex: Provide rt_mutex_slowlock_locked() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 15/68] rtmutex: Provide rt_mutex_base_is_locked() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 16/68] locking: Add base code for RT rw_semaphore and rwlock Thomas Gleixner
2021-08-11 12:22 ` [patch V4 17/68] locking/rwsem: Add rtmutex based R/W semaphore implementation Thomas Gleixner
2021-08-11 12:22 ` [patch V4 18/68] locking/rtmutex: Add wake_state to rt_mutex_waiter Thomas Gleixner
2021-08-11 12:22 ` [patch V4 19/68] locking/rtmutex: Provide rt_wake_q and helpers Thomas Gleixner
2021-08-11 12:22 ` [patch V4 20/68] locking/rtmutex: Use rt_mutex_wake_q_head Thomas Gleixner
2021-08-11 12:22 ` [patch V4 21/68] locking/rtmutex: Prepare RT rt_mutex_wake_q for RT locks Thomas Gleixner
2021-08-11 12:22 ` [patch V4 22/68] locking/rtmutex: Guard regular sleeping locks specific functions Thomas Gleixner
2021-08-11 12:22 ` [patch V4 23/68] locking/spinlock: Split the lock types header Thomas Gleixner
2021-08-11 12:23 ` [patch V4 24/68] locking/rtmutex: Prevent future include recursion hell Thomas Gleixner
2021-08-11 12:23 ` [patch V4 25/68] locking/lockdep: Reduce includes in debug_locks.h Thomas Gleixner
2021-08-11 12:23 ` [patch V4 26/68] rbtree: Split out the rbtree type definitions Thomas Gleixner
2021-08-11 12:23 ` [patch V4 27/68] locking/rtmutex: Include only rbtree types Thomas Gleixner
2021-08-11 12:23 ` [patch V4 28/68] locking/spinlock: Provide RT specific spinlock type Thomas Gleixner
2021-08-11 12:23 ` [patch V4 29/68] locking/spinlock: Provide RT variant header Thomas Gleixner
2021-08-11 12:23 ` [patch V4 30/68] locking/rtmutex: Provide the spin/rwlock core lock function Thomas Gleixner
2021-08-11 12:23 ` [patch V4 31/68] locking/spinlock: Provide RT variant Thomas Gleixner
2021-08-11 12:23 ` [patch V4 32/68] locking/rwlock: " Thomas Gleixner
2021-08-11 12:23 ` [patch V4 33/68] locking/rtmutex: Squash !RT tasks to DEFAULT_PRIO Thomas Gleixner
2021-08-11 12:23 ` [patch V4 34/68] locking/mutex: Consolidate core headers Thomas Gleixner
2021-08-11 12:23 ` [patch V4 35/68] locking/mutex: Move waiter to core header Thomas Gleixner
2021-08-11 12:23 ` [patch V4 36/68] locking/ww_mutex: Move ww_mutex declarations into ww_mutex.h Thomas Gleixner
2021-08-11 12:23 ` [patch V4 37/68] locking/mutex: Make mutex::wait_lock raw Thomas Gleixner
2021-08-11 12:23 ` [patch V4 38/68] locking/ww_mutex: Simplify lockdep annotation Thomas Gleixner
2021-08-11 12:23 ` [patch V4 39/68] locking/ww_mutex: Gather mutex_waiter initialization Thomas Gleixner
2021-08-11 12:23 ` [patch V4 40/68] locking/ww_mutex: Split up ww_mutex_unlock() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 41/68] locking/ww_mutex: Split W/W implementation logic Thomas Gleixner
2021-08-11 12:23 ` [patch V4 42/68] locking/ww_mutex: Remove __sched annotation Thomas Gleixner
2021-08-11 12:23 ` [patch V4 43/68] locking/ww_mutex: Abstract waiter iteration Thomas Gleixner
2021-08-11 12:23 ` [patch V4 44/68] locking/ww_mutex: Abstract waiter enqueueing Thomas Gleixner
2021-08-11 12:23 ` [patch V4 45/68] locking/ww_mutex: Abstract mutex accessors Thomas Gleixner
2021-08-11 12:23 ` [patch V4 46/68] locking/ww_mutex: Abstract mutex types Thomas Gleixner
2021-08-11 12:23 ` [patch V4 47/68] locking/ww_mutex: Abstract internal lock access Thomas Gleixner
2021-08-11 12:23 ` [patch V4 48/68] locking/ww_mutex: Implement rt_mutex accessors Thomas Gleixner
2021-08-11 12:23 ` [patch V4 49/68] locking/ww_mutex: Add RT priority to W/W order Thomas Gleixner
2021-08-11 12:23 ` [patch V4 50/68] locking/ww_mutex: Add rt_mutex based lock type and accessors Thomas Gleixner
2021-08-11 12:23 ` [patch V4 51/68] locking/rtmutex: Extend the rtmutex core to support ww_mutex Thomas Gleixner
2021-08-11 12:23 ` [patch V4 52/68] locking/ww_mutex: Implement rtmutex based ww_mutex API functions Thomas Gleixner
2021-08-11 12:23 ` [patch V4 53/68] locking/rtmutex: Add mutex variant for RT Thomas Gleixner
2021-08-11 12:23 ` [patch V4 54/68] lib/test_lockup: Adapt to changed variables Thomas Gleixner
2021-08-11 12:23 ` [patch V4 55/68] futex: Validate waiter correctly in futex_proxy_trylock_atomic() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 56/68] futex: Cleanup stale comments Thomas Gleixner
2021-08-11 12:23 ` [patch V4 57/68] futex: Clarify futex_requeue() PI handling Thomas Gleixner
2021-08-11 12:23 ` [patch V4 58/68] futex: Remove bogus condition for requeue PI Thomas Gleixner
2021-08-11 12:23 ` [patch V4 59/68] futex: Correct the number of requeued waiters for PI Thomas Gleixner
2021-08-11 12:23 ` [patch V4 60/68] futex: Restructure futex_requeue() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 61/68] futex: Clarify comment in futex_requeue() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 62/68] futex: Reorder sanity checks " Thomas Gleixner
2021-08-11 12:23 ` [patch V4 63/68] futex: Simplify handle_early_requeue_pi_wakeup() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 64/68] futex: Prevent requeue_pi() lock nesting issue on RT Thomas Gleixner
2021-08-11 12:23 ` [patch V4 65/68] rtmutex: Prevent lockdep false positive with PI futexes Thomas Gleixner
2021-08-11 12:23 ` [patch V4 66/68] preempt: Adjust PREEMPT_LOCK_OFFSET for RT Thomas Gleixner
2021-08-11 12:23 ` [patch V4 67/68] locking/rtmutex: Implement equal priority lock stealing Thomas Gleixner
2021-08-11 12:23 ` [patch V4 68/68] locking/rtmutex: Add adaptive spinwait mechanism Thomas Gleixner
2021-08-13  8:05 ` [patch V4 69/68] locking/rt: Add missing __might_sleep() to spin/rwlocks Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210811121414.362712953@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=bigeasy@linutronix.de \
    --cc=boqun.feng@gmail.com \
    --cc=bristot@redhat.com \
    --cc=dave@stgolabs.net \
    --cc=efault@gmx.de \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.