All of lore.kernel.org
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Jan Beulich <jbeulich@suse.com>, Julien Grall <julien@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>, Wei Liu <wl@xen.org>
Subject: [PATCH v4 08/12] xen/spinlock: add another function level
Date: Tue, 12 Dec 2023 10:47:21 +0100	[thread overview]
Message-ID: <20231212094725.22184-9-jgross@suse.com> (raw)
In-Reply-To: <20231212094725.22184-1-jgross@suse.com>

Add another function level in spinlock.c hiding the spinlock_t layout
from the low level locking code.

This is done in preparation of introducing rspinlock_t for recursive
locks without having to duplicate all of the locking code.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- new patch
---
 xen/common/spinlock.c      | 104 +++++++++++++++++++++++--------------
 xen/include/xen/spinlock.h |   1 +
 2 files changed, 65 insertions(+), 40 deletions(-)

diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 7d611d3d7d..31d12b1006 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -261,29 +261,31 @@ void spin_debug_disable(void)
 
 #ifdef CONFIG_DEBUG_LOCK_PROFILE
 
+#define LOCK_PROFILE_PAR lock->profile
 #define LOCK_PROFILE_REL                                                     \
-    if ( lock->profile )                                                     \
+    if ( profile )                                                           \
     {                                                                        \
-        lock->profile->time_hold += NOW() - lock->profile->time_locked;      \
-        lock->profile->lock_cnt++;                                           \
+        profile->time_hold += NOW() - profile->time_locked;                  \
+        profile->lock_cnt++;                                                 \
     }
 #define LOCK_PROFILE_VAR(var, val)    s_time_t var = (val)
 #define LOCK_PROFILE_BLOCK(var)       var = var ? : NOW()
 #define LOCK_PROFILE_BLKACC(tst, val)                                        \
     if ( tst )                                                               \
     {                                                                        \
-        lock->profile->time_block += lock->profile->time_locked - (val);     \
-        lock->profile->block_cnt++;                                          \
+        profile->time_block += profile->time_locked - (val);                 \
+        profile->block_cnt++;                                                \
     }
 #define LOCK_PROFILE_GOT(val)                                                \
-    if ( lock->profile )                                                     \
+    if ( profile )                                                           \
     {                                                                        \
-        lock->profile->time_locked = NOW();                                  \
+        profile->time_locked = NOW();                                        \
         LOCK_PROFILE_BLKACC(val, val);                                       \
     }
 
 #else
 
+#define LOCK_PROFILE_PAR NULL
 #define LOCK_PROFILE_REL
 #define LOCK_PROFILE_VAR(var, val)
 #define LOCK_PROFILE_BLOCK(var)
@@ -307,17 +309,18 @@ static always_inline uint16_t observe_head(const spinlock_tickets_t *t)
     return read_atomic(&t->head);
 }
 
-static void always_inline spin_lock_common(spinlock_t *lock,
+static void always_inline spin_lock_common(spinlock_tickets_t *t,
+                                           union lock_debug *debug,
+                                           struct lock_profile *profile,
                                            void (*cb)(void *data), void *data)
 {
     spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
     LOCK_PROFILE_VAR(block, 0);
 
-    check_lock(&lock->debug, false);
+    check_lock(debug, false);
     preempt_disable();
-    tickets.head_tail = arch_fetch_and_add(&lock->tickets.head_tail,
-                                           tickets.head_tail);
-    while ( tickets.tail != observe_head(&lock->tickets) )
+    tickets.head_tail = arch_fetch_and_add(&t->head_tail, tickets.head_tail);
+    while ( tickets.tail != observe_head(t) )
     {
         LOCK_PROFILE_BLOCK(block);
         if ( cb )
@@ -325,18 +328,19 @@ static void always_inline spin_lock_common(spinlock_t *lock,
         arch_lock_relax();
     }
     arch_lock_acquire_barrier();
-    got_lock(&lock->debug);
+    got_lock(debug);
     LOCK_PROFILE_GOT(block);
 }
 
 void _spin_lock(spinlock_t *lock)
 {
-    spin_lock_common(lock, NULL, NULL);
+    spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL,
+                     NULL);
 }
 
 void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *data), void *data)
 {
-    spin_lock_common(lock, cb, data);
+    spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, cb, data);
 }
 
 void _spin_lock_irq(spinlock_t *lock)
@@ -355,16 +359,23 @@ unsigned long _spin_lock_irqsave(spinlock_t *lock)
     return flags;
 }
 
-void _spin_unlock(spinlock_t *lock)
+static void always_inline spin_unlock_common(spinlock_tickets_t *t,
+                                             union lock_debug *debug,
+                                             struct lock_profile *profile)
 {
     LOCK_PROFILE_REL;
-    rel_lock(&lock->debug);
+    rel_lock(debug);
     arch_lock_release_barrier();
-    add_sized(&lock->tickets.head, 1);
+    add_sized(&t->head, 1);
     arch_lock_signal();
     preempt_enable();
 }
 
+void _spin_unlock(spinlock_t *lock)
+{
+    spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
 void _spin_unlock_irq(spinlock_t *lock)
 {
     _spin_unlock(lock);
@@ -377,25 +388,25 @@ void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
     local_irq_restore(flags);
 }
 
+static int always_inline spin_is_locked_common(const spinlock_tickets_t *t)
+{
+    return t->head != t->tail;
+}
+
 int _spin_is_locked(const spinlock_t *lock)
 {
-    /*
-     * Recursive locks may be locked by another CPU, yet we return
-     * "false" here, making this function suitable only for use in
-     * ASSERT()s and alike.
-     */
-    return lock->recurse_cpu == SPINLOCK_NO_CPU
-           ? lock->tickets.head != lock->tickets.tail
-           : lock->recurse_cpu == smp_processor_id();
+    return spin_is_locked_common(&lock->tickets);
 }
 
-int _spin_trylock(spinlock_t *lock)
+static int always_inline spin_trylock_common(spinlock_tickets_t *t,
+                                             union lock_debug *debug,
+                                             struct lock_profile *profile)
 {
     spinlock_tickets_t old, new;
 
     preempt_disable();
-    check_lock(&lock->debug, true);
-    old = observe_lock(&lock->tickets);
+    check_lock(debug, true);
+    old = observe_lock(t);
     if ( old.head != old.tail )
     {
         preempt_enable();
@@ -403,8 +414,7 @@ int _spin_trylock(spinlock_t *lock)
     }
     new = old;
     new.tail++;
-    if ( cmpxchg(&lock->tickets.head_tail,
-                 old.head_tail, new.head_tail) != old.head_tail )
+    if ( cmpxchg(&t->head_tail, old.head_tail, new.head_tail) != old.head_tail )
     {
         preempt_enable();
         return 0;
@@ -413,29 +423,41 @@ int _spin_trylock(spinlock_t *lock)
      * cmpxchg() is a full barrier so no need for an
      * arch_lock_acquire_barrier().
      */
-    got_lock(&lock->debug);
+    got_lock(debug);
     LOCK_PROFILE_GOT(0);
 
     return 1;
 }
 
-void _spin_barrier(spinlock_t *lock)
+int _spin_trylock(spinlock_t *lock)
+{
+    return spin_trylock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
+static void always_inline spin_barrier_common(spinlock_tickets_t *t,
+                                              union lock_debug *debug,
+                                              struct lock_profile *profile)
 {
     spinlock_tickets_t sample;
     LOCK_PROFILE_VAR(block, NOW());
 
-    check_barrier(&lock->debug);
+    check_barrier(debug);
     smp_mb();
-    sample = observe_lock(&lock->tickets);
+    sample = observe_lock(t);
     if ( sample.head != sample.tail )
     {
-        while ( observe_head(&lock->tickets) == sample.head )
+        while ( observe_head(t) == sample.head )
             arch_lock_relax();
-        LOCK_PROFILE_BLKACC(lock->profile, block);
+        LOCK_PROFILE_BLKACC(profile, block);
     }
     smp_mb();
 }
 
+void _spin_barrier(spinlock_t *lock)
+{
+    spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
 int rspin_trylock(rspinlock_t *lock)
 {
     unsigned int cpu = smp_processor_id();
@@ -448,7 +470,8 @@ int rspin_trylock(rspinlock_t *lock)
 
     if ( likely(lock->recurse_cpu != cpu) )
     {
-        if ( !spin_trylock(lock) )
+        if ( !spin_trylock_common(&lock->tickets, &lock->debug,
+                                  LOCK_PROFILE_PAR) )
             return 0;
         lock->recurse_cpu = cpu;
     }
@@ -466,7 +489,8 @@ void rspin_lock(rspinlock_t *lock)
 
     if ( likely(lock->recurse_cpu != cpu) )
     {
-        _spin_lock(lock);
+        spin_lock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR, NULL,
+                         NULL);
         lock->recurse_cpu = cpu;
     }
 
@@ -490,7 +514,7 @@ void rspin_unlock(rspinlock_t *lock)
     if ( likely(--lock->recurse_cnt == 0) )
     {
         lock->recurse_cpu = SPINLOCK_NO_CPU;
-        spin_unlock(lock);
+        spin_unlock_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
     }
 }
 
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 82ef99d3b6..d6f4b66613 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -163,6 +163,7 @@ extern void cf_check spinlock_profile_reset(unsigned char key);
 #else
 
 struct lock_profile_qhead { };
+struct lock_profile { };
 
 #define SPIN_LOCK_UNLOCKED {                                                  \
     .recurse_cpu = SPINLOCK_NO_CPU,                                           \
-- 
2.35.3



  parent reply	other threads:[~2023-12-12  9:48 UTC|newest]

Thread overview: 73+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-12  9:47 [PATCH v4 00/12] xen/spinlock: make recursive spinlocks a dedicated type Juergen Gross
2023-12-12  9:47 ` [PATCH v4 01/12] xen/spinlock: reduce lock profile ifdefs Juergen Gross
2023-12-12 12:44   ` Julien Grall
2023-12-12  9:47 ` [PATCH v4 02/12] xen/spinlock: make spinlock initializers more readable Juergen Gross
2023-12-12  9:47 ` [PATCH v4 03/12] xen/spinlock: introduce new type for recursive spinlocks Juergen Gross
2023-12-12 12:57   ` Julien Grall
2023-12-12 13:04     ` Juergen Gross
2023-12-12 13:07       ` Julien Grall
2023-12-21 10:34     ` Jan Beulich
2023-12-21 11:06       ` Juergen Gross
2023-12-21 11:07         ` Jan Beulich
2023-12-12  9:47 ` [PATCH v4 04/12] xen/spinlock: rename recursive lock functions Juergen Gross
2023-12-12 12:59   ` Julien Grall
2024-02-28 14:59   ` Jan Beulich
2023-12-12  9:47 ` [PATCH v4 05/12] xen/spinlock: add rspin_[un]lock_irq[save|restore]() Juergen Gross
2023-12-12 13:03   ` Julien Grall
2023-12-12 14:16     ` Juergen Gross
2024-02-28 15:09   ` Jan Beulich
2024-02-28 15:21     ` Jürgen Groß
2023-12-12  9:47 ` [PATCH v4 06/12] xen/spinlock: make struct lock_profile rspinlock_t aware Juergen Gross
2023-12-12 18:42   ` Julien Grall
2023-12-13  6:05     ` Juergen Gross
2023-12-13  8:32       ` Julien Grall
2023-12-13  8:36       ` Jan Beulich
2023-12-13  9:07         ` Juergen Gross
2024-02-28 15:19   ` Jan Beulich
2024-02-28 15:43     ` Jürgen Groß
2024-02-28 16:02       ` Jan Beulich
2024-02-28 16:22         ` Jürgen Groß
2023-12-12  9:47 ` [PATCH v4 07/12] xen/spinlock: add explicit non-recursive locking functions Juergen Gross
2023-12-12 18:49   ` Julien Grall
2023-12-13  6:17     ` Juergen Gross
2023-12-13  8:36       ` Julien Grall
2023-12-13  9:11         ` Juergen Gross
2024-02-29 13:49   ` Jan Beulich
2024-02-29 13:56     ` Juergen Gross
2023-12-12  9:47 ` Juergen Gross [this message]
2023-12-12 19:10   ` [PATCH v4 08/12] xen/spinlock: add another function level Julien Grall
2023-12-13  6:23     ` Juergen Gross
2023-12-13  8:43       ` Julien Grall
2023-12-13  9:17         ` Juergen Gross
2023-12-13  9:48           ` Julien Grall
2023-12-13  9:55             ` Juergen Gross
2023-12-13 10:06               ` Jan Beulich
2023-12-13 10:04             ` Jan Beulich
2024-02-29 13:59   ` Jan Beulich
2023-12-12  9:47 ` [PATCH v4 09/12] xen/spinlock: add missing rspin_is_locked() and rspin_barrier() Juergen Gross
2024-02-29 14:14   ` Jan Beulich
2024-02-29 14:18     ` Jürgen Groß
2023-12-12  9:47 ` [PATCH v4 10/12] xen/spinlock: split recursive spinlocks from normal ones Juergen Gross
2024-02-29 15:32   ` Jan Beulich
2024-02-29 15:45     ` Jürgen Groß
2024-03-01 14:37     ` Juergen Gross
2024-03-04  7:25       ` Jan Beulich
2024-03-04  7:43         ` Jürgen Groß
2023-12-12  9:47 ` [PATCH v4 11/12] xen/spinlock: remove indirection through macros for spin_*() functions Juergen Gross
2024-02-29 15:35   ` Jan Beulich
2023-12-12  9:47 ` [PATCH v4 12/12] xen/spinlock: support higher number of cpus Juergen Gross
2023-12-12 10:10   ` Julien Grall
2023-12-12 11:09     ` Juergen Gross
2023-12-12 11:40       ` Julien Grall
2023-12-12 12:11         ` Juergen Gross
2023-12-12 12:22           ` Julien Grall
2023-12-12 12:39   ` Julien Grall
2023-12-12 13:08     ` Juergen Gross
2023-12-12 14:04       ` Julien Grall
2024-02-29 15:46   ` Jan Beulich
2024-02-29 16:29     ` Jürgen Groß
2024-02-29 16:31       ` Jan Beulich
2024-02-29 16:45         ` Juergen Gross
2024-02-29 16:54           ` Jan Beulich
2024-02-29 17:04             ` Jürgen Groß
2024-02-29 17:07               ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231212094725.22184-9-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien@xen.org \
    --cc=sstabellini@kernel.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.