linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>,
	Juri Lelli <juri.lelli@redhat.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Will Deacon <will@kernel.org>, Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Davidlohr Bueso <dave@stgolabs.net>,
	Mike Galbraith <efault@gmx.de>
Subject: [patch V3 04/64] sched: Prepare for RT sleeping spin/rwlocks
Date: Thu, 05 Aug 2021 17:13:04 +0200	[thread overview]
Message-ID: <20210805153953.034726466@linutronix.de> (raw)
In-Reply-To: 20210805151300.330412127@linutronix.de

From: Thomas Gleixner <tglx@linutronix.de>

Waiting for spinlocks and rwlocks on non RT enabled kernels is task::state
preserving. Any wakeup which matches the state is valid.

RT enabled kernels substitutes them with 'sleeping' spinlocks. This creates
an issue vs. task::state.

In order to block on the lock the task has to overwrite task::state and a
consecutive wakeup issued by the unlocker sets the state back to
TASK_RUNNING. As a consequence the task loses the state which was set
before the lock acquire and also any regular wakeup targeted at the task
while it is blocked on the lock.

To handle this gracefully add a 'saved_state' member to task_struct which
is used in the following way:

 1) When a task blocks on a 'sleeping' spinlock, the current state is saved
    in task::saved_state before it is set to TASK_RTLOCK_WAIT.

 2) When the task unblocks and after acquiring the lock, it restores the saved
    state.

 3) When a regular wakeup happens for a task while it is blocked then the
    state change of that wakeup is redirected to operate on task::saved_state.

    This is also required when the task state is running because the task
    might have been woken up from the lock wait and has not yet restored
    the saved state.

To make it complete provide the necessary helpers to save and restore the
saved state along with the necessary documentation how the RT lock blocking
is supposed to work.

For non-RT kernels there is no functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/sched.h |   66 ++++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/core.c   |   33 +++++++++++++++++++++++++
 2 files changed, 99 insertions(+)
---
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -143,9 +143,22 @@ struct task_group;
 		current->task_state_change = _THIS_IP_;			\
 	} while (0)
 
+# define debug_rtlock_wait_set_state()					\
+	do {								 \
+		current->saved_state_change = current->task_state_change;\
+		current->task_state_change = _THIS_IP_;			 \
+	} while (0)
+
+# define debug_rtlock_wait_restore_state()				\
+	do {								 \
+		current->task_state_change = current->saved_state_change;\
+	} while (0)
+
 #else
 # define debug_normal_state_change(cond)	do { } while (0)
 # define debug_special_state_change(cond)	do { } while (0)
+# define debug_rtlock_wait_set_state()		do { } while (0)
+# define debug_rtlock_wait_restore_state()	do { } while (0)
 #endif
 
 /*
@@ -213,6 +226,51 @@ struct task_group;
 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
 	} while (0)
 
+/*
+ * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
+ *
+ * RT's spin/rwlock substitutions are state preserving. The state of the
+ * task when blocking on the lock is saved in task_struct::saved_state and
+ * restored after the lock has been acquired.  These operations are
+ * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
+ * lock related wakeups while the task is blocked on the lock are
+ * redirected to operate on task_struct::saved_state to ensure that these
+ * are not dropped. On restore task_struct::saved_state is set to
+ * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
+ *
+ * The lock operation looks like this:
+ *
+ *	current_save_and_set_rtlock_wait_state();
+ *	for (;;) {
+ *		if (try_lock())
+ *			break;
+ *		raw_spin_unlock_irq(&lock->wait_lock);
+ *		schedule_rtlock();
+ *		raw_spin_lock_irq(&lock->wait_lock);
+ *		set_current_state(TASK_RTLOCK_WAIT);
+ *	}
+ *	current_restore_rtlock_saved_state();
+ */
+#define current_save_and_set_rtlock_wait_state()			\
+	do {								\
+		lockdep_assert_irqs_disabled();				\
+		raw_spin_lock(&current->pi_lock);			\
+		current->saved_state = current->__state;		\
+		debug_rtlock_wait_set_state();				\
+		WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);		\
+		raw_spin_unlock(&current->pi_lock);			\
+	} while (0);
+
+#define current_restore_rtlock_saved_state()				\
+	do {								\
+		lockdep_assert_irqs_disabled();				\
+		raw_spin_lock(&current->pi_lock);			\
+		debug_rtlock_wait_restore_state();			\
+		WRITE_ONCE(current->__state, current->saved_state);	\
+		current->saved_state = TASK_RUNNING;			\
+		raw_spin_unlock(&current->pi_lock);			\
+	} while (0);
+
 #define get_current_state()	READ_ONCE(current->__state)
 
 /* Task command name length: */
@@ -668,6 +726,11 @@ struct task_struct {
 #endif
 	unsigned int			__state;
 
+#ifdef CONFIG_PREEMPT_RT
+	/* saved state for "spinlock sleepers" */
+	unsigned int			saved_state;
+#endif
+
 	/*
 	 * This begins the randomizable portion of task_struct. Only
 	 * scheduling-critical items should be added above here.
@@ -1357,6 +1420,9 @@ struct task_struct {
 	struct kmap_ctrl		kmap_ctrl;
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 	unsigned long			task_state_change;
+# ifdef CONFIG_PREEMPT_RT
+	unsigned long			saved_state_change;
+# endif
 #endif
 	int				pagefault_disabled;
 #ifdef CONFIG_MMU
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3568,14 +3568,47 @@ static void ttwu_queue(struct task_struc
  *
  * The caller holds p::pi_lock if p != current or has preemption
  * disabled when p == current.
+ *
+ * The rules of PREEMPT_RT saved_state:
+ *
+ *   The related locking code always holds p::pi_lock when updating
+ *   p::saved_state, which means the code is fully serialized in both cases.
+ *
+ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
+ *   bits set. This allows to distinguish all wakeup scenarios.
  */
 static __always_inline
 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
 {
+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
+		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
+			     state != TASK_RTLOCK_WAIT);
+	}
+
 	if (READ_ONCE(p->__state) & state) {
 		*success = 1;
 		return true;
 	}
+
+#ifdef CONFIG_PREEMPT_RT
+	/*
+	 * Saved state preserves the task state across blocking on
+	 * a RT lock.  If the state matches, set p::saved_state to
+	 * TASK_RUNNING, but do not wake the task because it waits
+	 * for a lock wakeup. Also indicate success because from
+	 * the regular waker's point of view this has succeeded.
+	 *
+	 * After acquiring the lock the task will restore p::state
+	 * from p::saved_state which ensures that the regular
+	 * wakeup is not lost. The restore will also set
+	 * p::saved_state to TASK_RUNNING so any further tests will
+	 * not result in false positives vs. @success
+	 */
+	if (p->saved_state & state) {
+		p->saved_state = TASK_RUNNING;
+		*success = 1;
+	}
+#endif
 	return false;
 }
 


  parent reply	other threads:[~2021-08-05 15:41 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-05 15:13 [patch V3 00/64] locking, sched: The PREEMPT-RT locking infrastructure Thomas Gleixner
2021-08-05 15:13 ` [patch V3 01/64] sched: Split out the wakeup state check Thomas Gleixner
2021-08-05 15:13 ` [patch V3 02/64] sched: Introduce TASK_RTLOCK_WAIT Thomas Gleixner
2021-08-05 15:13 ` [patch V3 03/64] sched: Reorganize current::__state helpers Thomas Gleixner
2021-08-05 15:13 ` Thomas Gleixner [this message]
2021-08-05 15:13 ` [patch V3 05/64] sched: Rework the __schedule() preempt argument Thomas Gleixner
2021-08-05 15:13 ` [patch V3 06/64] sched: Provide schedule point for RT locks Thomas Gleixner
2021-08-05 15:13 ` [patch V3 07/64] sched/wake_q: Provide WAKE_Q_HEAD_INITIALIZER Thomas Gleixner
2021-08-05 15:13 ` [patch V3 08/64] media/atomisp: Use lockdep instead of *mutex_is_locked() Thomas Gleixner
2021-08-05 15:13 ` [patch V3 09/64] rtmutex: Remove rt_mutex_is_locked() Thomas Gleixner
2021-08-05 15:13 ` [patch V3 10/64] rtmutex: Convert macros to inlines Thomas Gleixner
2021-08-05 15:13 ` [patch V3 11/64] rtmutex: Switch to try_cmpxchg() Thomas Gleixner
2021-08-05 15:13 ` [patch V3 12/64] rtmutex: Split API and implementation Thomas Gleixner
2021-08-05 15:13 ` [patch V3 13/64] rtmutex: Split out the inner parts of struct rtmutex Thomas Gleixner
2021-08-05 15:13 ` [patch V3 14/64] locking/rtmutex: Provide rt_mutex_slowlock_locked() Thomas Gleixner
2021-08-05 15:13 ` [patch V3 15/64] rtmutex: Provide rt_mutex_base_is_locked() Thomas Gleixner
2021-08-08 20:41   ` Davidlohr Bueso
2021-08-09 10:18     ` Thomas Gleixner
2021-08-05 15:13 ` [patch V3 16/64] locking: Add base code for RT rw_semaphore and rwlock Thomas Gleixner
2021-08-05 15:13 ` [patch V3 17/64] locking/rwsem: Add rtmutex based R/W semaphore implementation Thomas Gleixner
2021-08-05 15:13 ` [patch V3 18/64] locking/rtmutex: Add wake_state to rt_mutex_waiter Thomas Gleixner
2021-08-05 15:13 ` [patch V3 19/64] locking/rtmutex: Provide rt_wake_q and helpers Thomas Gleixner
2021-08-05 15:13 ` [patch V3 20/64] locking/rtmutex: Use rt_mutex_wake_q_head Thomas Gleixner
2021-08-05 15:13 ` [patch V3 21/64] locking/rtmutex: Prepare RT rt_mutex_wake_q for RT locks Thomas Gleixner
2021-08-05 15:13 ` [patch V3 22/64] locking/rtmutex: Guard regular sleeping locks specific functions Thomas Gleixner
2021-08-05 15:13 ` [patch V3 23/64] locking/spinlock: Split the lock types header Thomas Gleixner
2021-08-05 15:13 ` [patch V3 24/64] locking/rtmutex: Prevent future include recursion hell Thomas Gleixner
2021-08-05 15:13 ` [patch V3 25/64] locking/lockdep: Reduce includes in debug_locks.h Thomas Gleixner
2021-08-05 15:13 ` [patch V3 26/64] rbtree: Split out the rbtree type definitions Thomas Gleixner
2021-08-05 15:13 ` [patch V3 27/64] locking/rtmutex: Include only rbtree types Thomas Gleixner
2021-08-05 15:13 ` [patch V3 28/64] locking/spinlock: Provide RT specific spinlock type Thomas Gleixner
2021-08-05 15:13 ` [patch V3 29/64] locking/spinlock: Provide RT variant header Thomas Gleixner
2021-08-05 15:13 ` [patch V3 30/64] locking/rtmutex: Provide the spin/rwlock core lock function Thomas Gleixner
2021-08-05 15:13 ` [patch V3 31/64] locking/spinlock: Provide RT variant Thomas Gleixner
2021-08-05 15:13 ` [patch V3 32/64] locking/rwlock: " Thomas Gleixner
2021-08-05 15:13 ` [patch V3 33/64] locking/mutex: Consolidate core headers Thomas Gleixner
2021-08-05 15:13 ` [patch V3 34/64] locking/mutex: Move waiter to core header Thomas Gleixner
2021-08-05 15:13 ` [patch V3 35/64] locking/ww_mutex: Move ww_mutex declarations into ww_mutex.h Thomas Gleixner
2021-08-05 15:13 ` [patch V3 36/64] locking/mutex: Make mutex::wait_lock raw Thomas Gleixner
2021-08-05 15:13 ` [patch V3 37/64] locking/ww_mutex: Simplify lockdep annotation Thomas Gleixner
2021-08-05 15:13 ` [patch V3 38/64] locking/ww_mutex: Gather mutex_waiter initialization Thomas Gleixner
2021-08-05 15:13 ` [patch V3 39/64] locking/ww_mutex: Split up ww_mutex_unlock() Thomas Gleixner
2021-08-05 15:13 ` [patch V3 40/64] locking/ww_mutex: Split W/W implementation logic Thomas Gleixner
2021-08-05 15:13 ` [patch V3 41/64] locking/ww_mutex: Remove __sched annotation Thomas Gleixner
2021-08-05 15:13 ` [patch V3 42/64] locking/ww_mutex: Abstract waiter iteration Thomas Gleixner
2021-08-05 15:13 ` [patch V3 43/64] locking/ww_mutex: Abstract waiter enqueueing Thomas Gleixner
2021-08-05 15:13 ` [patch V3 44/64] locking/ww_mutex: Abstract mutex accessors Thomas Gleixner
2021-08-05 15:13 ` [patch V3 45/64] locking/ww_mutex: Abstract mutex types Thomas Gleixner
2021-08-05 15:13 ` [patch V3 46/64] locking/ww_mutex: Abstract internal lock access Thomas Gleixner
2021-08-05 15:13 ` [patch V3 47/64] locking/ww_mutex: Implement rt_mutex accessors Thomas Gleixner
2021-08-05 15:13 ` [patch V3 48/64] locking/ww_mutex: Add RT priority to W/W order Thomas Gleixner
2021-08-06 10:48   ` Peter Zijlstra
2021-08-06 11:50     ` Thomas Gleixner
2021-08-05 15:13 ` [patch V3 49/64] locking/ww_mutex: Add rt_mutex based lock type and accessors Thomas Gleixner
2021-08-05 15:13 ` [patch V3 50/64] locking/rtmutex: Extend the rtmutex core to support ww_mutex Thomas Gleixner
2021-08-06 11:00   ` Peter Zijlstra
2021-08-06 11:19     ` Thomas Gleixner
2021-08-06 13:48     ` Peter Zijlstra
2021-08-07 20:07       ` Thomas Gleixner
2021-08-05 15:13 ` [patch V3 51/64] locking/ww_mutex: Implement rtmutex based ww_mutex API functions Thomas Gleixner
2021-08-05 15:13 ` [patch V3 52/64] locking/rtmutex: Add mutex variant for RT Thomas Gleixner
2021-08-05 15:13 ` [patch V3 53/64] lib/test_lockup: Adapt to changed variables Thomas Gleixner
2021-08-05 15:13 ` [patch V3 54/64] futex: Validate waiter correctly in futex_proxy_trylock_atomic() Thomas Gleixner
2021-08-05 15:13 ` [patch V3 55/64] futex: Cleanup stale comments Thomas Gleixner
2021-08-05 15:13 ` [patch V3 56/64] futex: Correct the number of requeued waiters for PI Thomas Gleixner
2021-08-08 17:05   ` Davidlohr Bueso
2021-08-09  8:18     ` Thomas Gleixner
2021-08-09 10:52       ` Thomas Gleixner
2021-08-05 15:13 ` [patch V3 57/64] futex: Restructure futex_requeue() Thomas Gleixner
2021-08-05 15:13 ` [patch V3 58/64] futex: Clarify comment in futex_requeue() Thomas Gleixner
2021-08-08 18:43   ` Davidlohr Bueso
2021-08-09  8:18     ` Thomas Gleixner
2021-08-05 15:13 ` [patch V3 59/64] futex: Simplify handle_early_requeue_pi_wakeup() Thomas Gleixner
2021-08-05 15:14 ` [patch V3 60/64] futex: Prevent requeue_pi() lock nesting issue on RT Thomas Gleixner
2021-08-05 15:14 ` [patch V3 61/64] rtmutex: Prevent lockdep false positive with PI futexes Thomas Gleixner
2021-08-05 15:14 ` [patch V3 62/64] preempt: Adjust PREEMPT_LOCK_OFFSET for RT Thomas Gleixner
2021-08-05 15:14 ` [patch V3 63/64] locking/rtmutex: Implement equal priority lock stealing Thomas Gleixner
2021-08-05 15:14 ` [patch V3 64/64] locking/rtmutex: Add adaptive spinwait mechanism Thomas Gleixner
2021-08-09 20:28   ` Davidlohr Bueso

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210805153953.034726466@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=bigeasy@linutronix.de \
    --cc=boqun.feng@gmail.com \
    --cc=bristot@redhat.com \
    --cc=dave@stgolabs.net \
    --cc=efault@gmx.de \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).