All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nicholas Piggin <npiggin@gmail.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>,
	Ingo Molnar <mingo@redhat.com>, Will Deacon <will@kernel.org>,
	Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	"linux-kernel @ vger . kernel . org"
	<linux-kernel@vger.kernel.org>
Subject: [PATCH v2 05/12] locking/qspinlock: be less clever with the preprocessor
Date: Wed, 13 Jul 2022 17:06:57 +1000	[thread overview]
Message-ID: <20220713070704.308394-6-npiggin@gmail.com> (raw)
In-Reply-To: <20220713070704.308394-1-npiggin@gmail.com>

Stop qspinlock.c including itself and avoid most of the function
renaming with the preprocessor.

This is mostly done by having the common slowpath code take a 'bool
paravirt' argument and adjusting code based on that. __always_inline
ensures the paravirt and non-paravirt cases are kept separate and
the compiler can constant-fold the 'paravirt' tests.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 kernel/locking/qspinlock.c          | 116 ++++++++++++----------------
 kernel/locking/qspinlock_paravirt.h |  10 +--
 2 files changed, 52 insertions(+), 74 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 95bf24d276c3..037bd5440cfd 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -11,8 +11,6 @@
  *          Peter Zijlstra <peterz@infradead.org>
  */
 
-#ifndef _GEN_PV_LOCK_SLOWPATH
-
 #include <linux/smp.h>
 #include <linux/bug.h>
 #include <linux/cpumask.h>
@@ -287,35 +285,21 @@ static __always_inline void set_locked(struct qspinlock *lock)
 	WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
 }
 
-
-/*
- * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
- * all the PV callbacks.
- */
-
-static __always_inline void __pv_init_node(struct qnode *node) { }
-static __always_inline void __pv_wait_node(struct qnode *node,
-					   struct qnode *prev) { }
-static __always_inline void __pv_kick_node(struct qspinlock *lock,
-					   struct qnode *node) { }
-static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
-						   struct qnode *node)
-						   { return 0; }
-
-#define pv_enabled()		false
-
-#define pv_init_node		__pv_init_node
-#define pv_wait_node		__pv_wait_node
-#define pv_kick_node		__pv_kick_node
-#define pv_wait_head_or_lock	__pv_wait_head_or_lock
-
 #ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
-#endif
-
-#endif /* _GEN_PV_LOCK_SLOWPATH */
+#include "qspinlock_paravirt.h"
+#else /* CONFIG_PARAVIRT_SPINLOCKS */
+static __always_inline void pv_init_node(struct qnode *node) { }
+static __always_inline void pv_wait_node(struct qnode *node,
+					 struct qnode *prev) { }
+static __always_inline void pv_kick_node(struct qspinlock *lock,
+					 struct qnode *node) { }
+static __always_inline u32  pv_wait_head_or_lock(struct qspinlock *lock,
+						 struct qnode *node)
+						   { return 0; }
+static __always_inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) { BUILD_BUG(); }
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
 
-static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
+static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)
 {
 	struct qnode *prev, *next, *node;
 	u32 val, old, tail;
@@ -340,8 +324,13 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
 	 */
 	if (unlikely(idx >= MAX_NODES)) {
 		lockevent_inc(lock_no_node);
-		while (!queued_spin_trylock(lock))
-			cpu_relax();
+		if (paravirt) {
+			while (!pv_hybrid_queued_unfair_trylock(lock))
+				cpu_relax();
+		} else {
+			while (!queued_spin_trylock(lock))
+				cpu_relax();
+		}
 		goto release;
 	}
 
@@ -361,15 +350,21 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
 
 	node->locked = 0;
 	node->next = NULL;
-	pv_init_node(node);
+	if (paravirt)
+		pv_init_node(node);
 
 	/*
 	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
 	 * attempt the trylock once more in the hope someone let go while we
 	 * weren't watching.
 	 */
-	if (queued_spin_trylock(lock))
-		goto release;
+	if (paravirt) {
+		if (pv_hybrid_queued_unfair_trylock(lock))
+			goto release;
+	} else {
+		if (queued_spin_trylock(lock))
+			goto release;
+	}
 
 	/*
 	 * Ensure that the initialisation of @node is complete before we
@@ -398,7 +393,8 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
 		/* Link @node into the waitqueue. */
 		WRITE_ONCE(prev->next, node);
 
-		pv_wait_node(node, prev);
+		if (paravirt)
+			pv_wait_node(node, prev);
 		/* Wait for mcs node lock to be released */
 		smp_cond_load_acquire(&node->locked, VAL);
 
@@ -434,8 +430,10 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
 	 * If PV isn't active, 0 will be returned instead.
 	 *
 	 */
-	if ((val = pv_wait_head_or_lock(lock, node)))
-		goto locked;
+	if (paravirt) {
+		if ((val = pv_wait_head_or_lock(lock, node)))
+			goto locked;
+	}
 
 	val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
 
@@ -480,7 +478,8 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
 		next = smp_cond_load_relaxed(&node->next, (VAL));
 
 	smp_store_release(&next->locked, 1); /* unlock the mcs node lock */
-	pv_kick_node(lock, next);
+	if (paravirt)
+		pv_kick_node(lock, next);
 
 release:
 	trace_contention_end(lock, 0);
@@ -512,13 +511,12 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
  * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
  *   queue               :         ^--'                             :
  */
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
+#endif
+
 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 {
-	if (pv_enabled()) {
-		queued_spin_lock_mcs_queue(lock);
-		return;
-	}
-
 	if (virt_spin_lock(lock))
 		return;
 
@@ -592,31 +590,17 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 	 */
 queue:
 	lockevent_inc(lock_slowpath);
-	queued_spin_lock_mcs_queue(lock);
+	queued_spin_lock_mcs_queue(lock, false);
 }
 EXPORT_SYMBOL(queued_spin_lock_slowpath);
 
-/*
- * Generate the paravirt code for queued_spin_unlock_slowpath().
- */
-#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-#define _GEN_PV_LOCK_SLOWPATH
-
-#undef  pv_enabled
-#define pv_enabled()	true
-
-#undef pv_init_node
-#undef pv_wait_node
-#undef pv_kick_node
-#undef pv_wait_head_or_lock
-
-#define queued_spin_lock_mcs_queue	__pv_queued_spin_lock_mcs_queue
-
-#undef  queued_spin_lock_slowpath
-#define queued_spin_lock_slowpath	__pv_queued_spin_lock_slowpath
-
-#include "qspinlock_paravirt.h"
-#include "qspinlock.c"
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#undef queued_spin_lock_slowpath
+void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+	queued_spin_lock_mcs_queue(lock, true);
+}
+EXPORT_SYMBOL(__pv_queued_spin_lock_slowpath);
 
 bool nopvspin __initdata;
 static __init int parse_nopvspin(char *arg)
@@ -625,4 +609,4 @@ static __init int parse_nopvspin(char *arg)
 	return 0;
 }
 early_param("nopvspin", parse_nopvspin);
-#endif
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 97385861adc2..f1922e3a0f7d 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -1,8 +1,4 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GEN_PV_LOCK_SLOWPATH
-#error "do not include this file"
-#endif
-
 #include <linux/hash.h>
 #include <linux/memblock.h>
 #include <linux/debug_locks.h>
@@ -50,9 +46,8 @@ enum vcpu_state {
 /*
  * Hybrid PV queued/unfair lock
  *
- * By replacing the regular queued_spin_trylock() with the function below,
- * it will be called once when a lock waiter enter the PV slowpath before
- * being queued.
+ * This function is called once when a lock waiter enters the PV slowpath
+ * before being queued.
  *
  * The pending bit is set by the queue head vCPU of the MCS wait queue in
  * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
@@ -71,7 +66,6 @@ enum vcpu_state {
  * queued lock (no lock starvation) and an unfair lock (good performance
  * on not heavily contended locks).
  */
-#define queued_spin_trylock(l)	pv_hybrid_queued_unfair_trylock(l)
 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
 {
 	/*
-- 
2.35.1


  parent reply	other threads:[~2022-07-13  7:07 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-13  7:06 [PATCH v2 00/12] locking/qspinlock: simplify code generation Nicholas Piggin
2022-07-13  7:06 ` [PATCH v2 01/12] locking/qspinlock: remove pv_node abstraction Nicholas Piggin
2022-07-13  7:06 ` [PATCH v2 02/12] locking/qspinlock: inline mcs_spinlock functions into qspinlock Nicholas Piggin
2022-07-13  7:06 ` [PATCH v2 03/12] locking/qspinlock: split common mcs queueing code into its own function Nicholas Piggin
2022-07-13  7:06 ` [PATCH v2 04/12] locking/qspinlock: move pv lock word helpers into qspinlock.c Nicholas Piggin
2022-07-13  7:06 ` Nicholas Piggin [this message]
2022-07-13  7:06 ` [PATCH v2 06/12] locking/qspinlock: merge qspinlock_paravirt.h " Nicholas Piggin
2022-07-14 14:16   ` kernel test robot
2022-07-29  3:49     ` Nicholas Piggin
2022-07-29  3:49       ` Nicholas Piggin
2022-07-14 16:42   ` kernel test robot
2022-07-14 20:28   ` kernel test robot
2022-07-13  7:06 ` [PATCH v2 07/12] locking/qspinlock: remove arch qspinlock_paravirt.h includes Nicholas Piggin
2022-07-14 13:14   ` kernel test robot
2022-07-14 16:21   ` kernel test robot
2022-07-13  7:07 ` [PATCH v2 08/12] locking/qspinlock: stop renaming queued_spin_lock_slowpath to native_queued_spin_lock_slowpath Nicholas Piggin
2022-07-13  7:07 ` [PATCH v2 09/12] locking/qspinlock: rename __pv_init_lock_hash to pv_spinlocks_init Nicholas Piggin
2022-07-13  7:07 ` [PATCH v2 10/12] locking/qspinlock: paravirt use simple trylock in case idx overflows Nicholas Piggin
2022-07-13  7:07 ` [PATCH v2 11/12] locking/qspinlock: separate pv_wait_node from the non-paravirt path Nicholas Piggin
2022-07-13  7:07 ` [PATCH v2 12/12] locking/qspinlock: simplify pv_wait_head_or_lock calling scheme Nicholas Piggin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220713070704.308394-6-npiggin@gmail.com \
    --to=npiggin@gmail.com \
    --cc=boqun.feng@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.