All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juri Lelli <juri.lelli@redhat.com>
To: peterz@infradead.org, mingo@redhat.com
Cc: rostedt@goodmis.org, tglx@linutronix.de,
	linux-kernel@vger.kernel.org, luca.abeni@santannapisa.it,
	claudio@evidence.eu.com, tommaso.cucinotta@santannapisa.it,
	alessio.balsini@gmail.com, bristot@redhat.com,
	will.deacon@arm.com, andrea.parri@amarulasolutions.com,
	dietmar.eggemann@arm.com, patrick.bellasi@arm.com,
	henrik@austad.us, linux-rt-users@vger.kernel.org,
	Juri Lelli <juri.lelli@redhat.com>
Subject: [RFD/RFC PATCH 2/8] locking/mutex: Removes wakeups from under mutex::wait_lock
Date: Tue,  9 Oct 2018 11:24:28 +0200	[thread overview]
Message-ID: <20181009092434.26221-3-juri.lelli@redhat.com> (raw)
In-Reply-To: <20181009092434.26221-1-juri.lelli@redhat.com>

From: Peter Zijlstra <peterz@infradead.org>

In preparation to nest mutex::wait_lock under rq::lock we need to remove
wakeups from under it.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
[Heavily changed after 55f036ca7e74 ("locking: WW mutex cleanup") and
08295b3b5bee ("locking: Implement an algorithm choice for Wound-Wait
mutexes")]
Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
---
 kernel/locking/mutex.c | 43 +++++++++++++++++++++++++++---------------
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index df34ce70fcde..f37402cd8496 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -338,7 +338,7 @@ __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
  */
 static bool __sched
 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
-	       struct ww_acquire_ctx *ww_ctx)
+	       struct ww_acquire_ctx *ww_ctx, struct wake_q_head *wake_q)
 {
 	if (!ww_ctx->is_wait_die)
 		return false;
@@ -346,7 +346,7 @@ __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
 	if (waiter->ww_ctx->acquired > 0 &&
 			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
 		debug_mutex_wake_waiter(lock, waiter);
-		wake_up_process(waiter->task);
+		wake_q_add(wake_q, waiter->task); // XXX
 	}
 
 	return true;
@@ -361,7 +361,8 @@ __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
  */
 static bool __ww_mutex_wound(struct mutex *lock,
 			     struct ww_acquire_ctx *ww_ctx,
-			     struct ww_acquire_ctx *hold_ctx)
+			     struct ww_acquire_ctx *hold_ctx,
+			     struct wake_q_head *wake_q)
 {
 	struct task_struct *owner = __mutex_owner(lock);
 
@@ -393,7 +394,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
 		 * wakeup pending to re-read the wounded state.
 		 */
 		if (owner != current)
-			wake_up_process(owner);
+			wake_q_add(wake_q, owner); // XXX
 
 		return true;
 	}
@@ -414,7 +415,9 @@ static bool __ww_mutex_wound(struct mutex *lock,
  * The current task must not be on the wait list.
  */
 static void __sched
-__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_check_waiters(struct mutex *lock,
+			 struct ww_acquire_ctx *ww_ctx,
+			 struct wake_q_head *wake_q)
 {
 	struct mutex_waiter *cur;
 
@@ -424,8 +427,8 @@ __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
 		if (!cur->ww_ctx)
 			continue;
 
-		if (__ww_mutex_die(lock, cur, ww_ctx) ||
-		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
+		if (__ww_mutex_die(lock, cur, ww_ctx, wake_q) ||
+		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx, wake_q))
 			break;
 	}
 }
@@ -437,6 +440,8 @@ __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
 static __always_inline void
 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
+	DEFINE_WAKE_Q(wake_q);
+
 	ww_mutex_lock_acquired(lock, ctx);
 
 	/*
@@ -465,8 +470,10 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 	 * die or wound us.
 	 */
 	raw_spin_lock(&lock->base.wait_lock);
-	__ww_mutex_check_waiters(&lock->base, ctx);
+	__ww_mutex_check_waiters(&lock->base, ctx, &wake_q);
 	raw_spin_unlock(&lock->base.wait_lock);
+
+	wake_up_q(&wake_q);
 }
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -824,7 +831,8 @@ __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
 static inline int __sched
 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
 		      struct mutex *lock,
-		      struct ww_acquire_ctx *ww_ctx)
+		      struct ww_acquire_ctx *ww_ctx,
+		      struct wake_q_head *wake_q)
 {
 	struct mutex_waiter *cur;
 	struct list_head *pos;
@@ -868,7 +876,7 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
 		pos = &cur->list;
 
 		/* Wait-Die: ensure younger waiters die. */
-		__ww_mutex_die(lock, cur, ww_ctx);
+		__ww_mutex_die(lock, cur, ww_ctx, wake_q);
 	}
 
 	__mutex_add_waiter(lock, waiter, pos);
@@ -886,7 +894,7 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
 		 * such that either we or the fastpath will wound @ww->ctx.
 		 */
 		smp_mb();
-		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
+		__ww_mutex_wound(lock, ww_ctx, ww->ctx, wake_q);
 	}
 
 	return 0;
@@ -900,6 +908,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		    struct lockdep_map *nest_lock, unsigned long ip,
 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 {
+	DEFINE_WAKE_Q(wake_q);
 	struct mutex_waiter waiter;
 	bool first = false;
 	struct ww_mutex *ww;
@@ -940,7 +949,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	 */
 	if (__mutex_trylock(lock)) {
 		if (use_ww_ctx && ww_ctx)
-			__ww_mutex_check_waiters(lock, ww_ctx);
+			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
 
 		goto skip_wait;
 	}
@@ -962,7 +971,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * Add in stamp order, waking up waiters that must kill
 		 * themselves.
 		 */
-		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
+		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q);
 		if (ret)
 			goto err_early_kill;
 
@@ -1034,7 +1043,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 */
 		if (!ww_ctx->is_wait_die &&
 		    !__mutex_waiter_is_first(lock, &waiter))
-			__ww_mutex_check_waiters(lock, ww_ctx);
+			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
 	}
 
 	mutex_remove_waiter(lock, &waiter, current);
@@ -1051,6 +1060,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		ww_mutex_lock_acquired(ww, ww_ctx);
 
 	raw_spin_unlock(&lock->wait_lock);
+	wake_up_q(&wake_q);
 	preempt_enable();
 	return 0;
 
@@ -1061,6 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	raw_spin_unlock(&lock->wait_lock);
 	debug_mutex_free_waiter(&waiter);
 	mutex_release(&lock->dep_map, 1, ip);
+	wake_up_q(&wake_q);
 	preempt_enable();
 	return ret;
 }
@@ -1244,9 +1255,11 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
 	if (owner & MUTEX_FLAG_HANDOFF)
 		__mutex_handoff(lock, next);
 
+	preempt_disable(); // XXX unlock->wakeup inversion like
 	raw_spin_unlock(&lock->wait_lock);
 
-	wake_up_q(&wake_q);
+	wake_up_q(&wake_q); // XXX must force resched on proxy
+	preempt_enable();
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
-- 
2.17.1


  parent reply	other threads:[~2018-10-09  9:25 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-09  9:24 [RFD/RFC PATCH 0/8] Towards implementing proxy execution Juri Lelli
2018-10-09  9:24 ` [RFD/RFC PATCH 1/8] locking/mutex: Convert mutex::wait_lock to raw_spinlock_t Juri Lelli
2018-10-09  9:24 ` Juri Lelli [this message]
2018-10-09  9:24 ` [RFD/RFC PATCH 3/8] locking/mutex: Rework task_struct::blocked_on Juri Lelli
2018-10-10 10:43   ` luca abeni
2018-10-10 11:06     ` Juri Lelli
2018-10-09  9:24 ` [RFD/RFC PATCH 4/8] sched: Split scheduler execution context Juri Lelli
2019-05-06 11:06   ` Claudio Scordino
2018-10-09  9:24 ` [RFD/RFC PATCH 5/8] sched: Add proxy execution Juri Lelli
2018-10-10 11:10   ` luca abeni
2018-10-11 12:34     ` Juri Lelli
2018-10-11 12:53       ` Peter Zijlstra
2018-10-11 13:42         ` Juri Lelli
2018-10-12  7:22         ` luca abeni
2018-10-12  8:30           ` Juri Lelli
2018-10-09  9:24 ` [RFD/RFC PATCH 6/8] locking/mutex: make mutex::wait_lock irq safe Juri Lelli
2018-10-09  9:24 ` [RFD/RFC PATCH 7/8] sched: Ensure blocked_on is always guarded by blocked_lock Juri Lelli
2018-10-09  9:24 ` [RFD/RFC PATCH 8/8] sched: Fixup task CPUs for potential proxies Juri Lelli
2018-10-09  9:44 ` [RFD/RFC PATCH 0/8] Towards implementing proxy execution Peter Zijlstra
2018-10-09  9:58   ` Juri Lelli
2018-10-09 10:51 ` Sebastian Andrzej Siewior
2018-10-09 11:56   ` Daniel Bristot de Oliveira
2018-10-09 12:35     ` Juri Lelli
2018-10-10 10:34 ` luca abeni
2018-10-10 10:57   ` Peter Zijlstra
2018-10-10 11:16     ` luca abeni
2018-10-10 11:23       ` Peter Zijlstra
2018-10-10 12:27         ` Juri Lelli
2018-10-10 11:56 ` Henrik Austad
2018-10-10 12:24   ` Peter Zijlstra
2018-10-10 13:48     ` Daniel Bristot de Oliveira
2018-10-10 12:36   ` Juri Lelli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181009092434.26221-3-juri.lelli@redhat.com \
    --to=juri.lelli@redhat.com \
    --cc=alessio.balsini@gmail.com \
    --cc=andrea.parri@amarulasolutions.com \
    --cc=bristot@redhat.com \
    --cc=claudio@evidence.eu.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=henrik@austad.us \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=luca.abeni@santannapisa.it \
    --cc=mingo@redhat.com \
    --cc=patrick.bellasi@arm.com \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=tommaso.cucinotta@santannapisa.it \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.