All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv2 0/4] spinlock: queue read-write locks
@ 2016-01-26 16:25 David Vrabel
  2016-01-26 16:25 ` [PATCHv2 1/4] atomic: replace atomic_compareandswap() with atomic_cmpxchg() David Vrabel
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: David Vrabel @ 2016-01-26 16:25 UTC (permalink / raw)
  To: xen-devel
  Cc: Andrew Cooper, Jennifer Herbert, David Vrabel, Jan Beulich, Ian Campbell

This series replaces the current read-write lock implementation with
the queue read-write locks from Linux.  These are fair; under
contention both readers and writers will be queued and obtain the lock
in FIFO order (due to the fairness of the internal ticket lock).

The implementation is all in C and thus architecture independent.

Compared to the Linux implementation some of the memory barrier
primitives were changed.  The ARM maintainers may want to give this
area a careful review.

Significant changes in v2:

- atomic_cmpxchg() is now arch-dependent as arm/arm64 already provided
  their own.
- Moved rwlocks into their own file.
- Removed the special casing of in_irq() in the slow read path.

David

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCHv2 1/4] atomic: replace atomic_compareandswap() with atomic_cmpxchg()
  2016-01-26 16:25 [PATCHv2 0/4] spinlock: queue read-write locks David Vrabel
@ 2016-01-26 16:25 ` David Vrabel
  2016-01-26 17:09   ` Ian Campbell
  2016-01-26 16:25 ` [PATCHv2 2/4] spinlock: shrink struct lock_debug David Vrabel
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 6+ messages in thread
From: David Vrabel @ 2016-01-26 16:25 UTC (permalink / raw)
  To: xen-devel
  Cc: Andrew Cooper, Jennifer Herbert, David Vrabel, Jan Beulich, Ian Campbell

atomic_compareandswap() used atomic_t as the new, old and returned
values which is less convinient than using just int.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
---
v2:
- arm/arm64 already provided atomic_cmpxchg()
---
 xen/common/domain.c          |  5 +----
 xen/include/asm-arm/atomic.h |  8 --------
 xen/include/asm-x86/atomic.h | 24 ++++++++++++++++--------
 xen/include/xen/sched.h      |  9 ++++-----
 4 files changed, 21 insertions(+), 25 deletions(-)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index 2979c1b..93e77f5 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -857,14 +857,11 @@ static void complete_domain_destroy(struct rcu_head *head)
 void domain_destroy(struct domain *d)
 {
     struct domain **pd;
-    atomic_t old = ATOMIC_INIT(0);
-    atomic_t new = ATOMIC_INIT(DOMAIN_DESTROYED);
 
     BUG_ON(!d->is_dying);
 
     /* May be already destroyed, or get_domain() can race us. */
-    old = atomic_compareandswap(old, new, &d->refcnt);
-    if ( _atomic_read(old) != 0 )
+    if ( atomic_cmpxchg(&d->refcnt, 0, DOMAIN_DESTROYED) != 0 )
         return;
 
     cpupool_rm_domain(d);
diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h
index 5a38c67..29ab265 100644
--- a/xen/include/asm-arm/atomic.h
+++ b/xen/include/asm-arm/atomic.h
@@ -138,14 +138,6 @@ static inline void _atomic_set(atomic_t *v, int i)
 # error "unknown ARM variant"
 #endif
 
-static inline atomic_t atomic_compareandswap(
-    atomic_t old, atomic_t new, atomic_t *v)
-{
-    atomic_t rc;
-    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
-    return rc;
-}
-
 #endif /* __ARCH_ARM_ATOMIC__ */
 /*
  * Local variables:
diff --git a/xen/include/asm-x86/atomic.h b/xen/include/asm-x86/atomic.h
index 2b8c877..d246b70 100644
--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -135,6 +135,10 @@ static inline void _atomic_set(atomic_t *v, int i)
     v->counter = i;
 }
 
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+    return cmpxchg(&v->counter, old, new);
+}
 
 /**
  * atomic_add - add integer to atomic variable
@@ -152,6 +156,18 @@ static inline void atomic_add(int i, atomic_t *v)
 }
 
 /**
+ * atomic_add_return - add integer and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+    return i + arch_fetch_and_add(&v->counter, i);
+}
+
+/**
  * atomic_sub - subtract the atomic variable
  * @i: integer value to subtract
  * @v: pointer of type atomic_t
@@ -272,12 +288,4 @@ static inline int atomic_add_negative(int i, atomic_t *v)
     return c;
 }
 
-static inline atomic_t atomic_compareandswap(
-    atomic_t old, atomic_t new, atomic_t *v)
-{
-    atomic_t rc;
-    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
-    return rc;
-}
-
 #endif /* __ARCH_X86_ATOMIC__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 82b6dd1..5870745 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -483,16 +483,15 @@ extern struct vcpu *idle_vcpu[NR_CPUS];
  */
 static always_inline int get_domain(struct domain *d)
 {
-    atomic_t old, new, seen = d->refcnt;
+    int old, seen = atomic_read(&d->refcnt);
     do
     {
         old = seen;
-        if ( unlikely(_atomic_read(old) & DOMAIN_DESTROYED) )
+        if ( unlikely(old & DOMAIN_DESTROYED) )
             return 0;
-        _atomic_set(&new, _atomic_read(old) + 1);
-        seen = atomic_compareandswap(old, new, &d->refcnt);
+        seen = atomic_cmpxchg(&d->refcnt, old, old + 1);
     }
-    while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
+    while ( unlikely(seen != old) );
     return 1;
 }
 
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCHv2 2/4] spinlock: shrink struct lock_debug
  2016-01-26 16:25 [PATCHv2 0/4] spinlock: queue read-write locks David Vrabel
  2016-01-26 16:25 ` [PATCHv2 1/4] atomic: replace atomic_compareandswap() with atomic_cmpxchg() David Vrabel
@ 2016-01-26 16:25 ` David Vrabel
  2016-01-26 16:25 ` [PATCHv2 3/4] spinlock: move rwlock API and per-cpu rwlocks into their own files David Vrabel
  2016-01-26 16:25 ` [PATCHv2 4/4] spinlock: fair read-write locks David Vrabel
  3 siblings, 0 replies; 6+ messages in thread
From: David Vrabel @ 2016-01-26 16:25 UTC (permalink / raw)
  To: xen-devel
  Cc: Andrew Cooper, Jennifer Herbert, David Vrabel, Jan Beulich, Ian Campbell

From: Jennifer Herbert <jennifer.herbert@citrix.com>

Reduce the size of struct lock_debug so increases in other lock
structures don't increase the size of struct domain too much.

Signed-off-by: Jennifer Herbert <jennifer.herbert@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
---
v2:
- Continue to use int for local variable.
---
 xen/include/xen/spinlock.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 8b2590f..22c4fc2 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -8,7 +8,7 @@
 
 #ifndef NDEBUG
 struct lock_debug {
-    int irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
+    s16 irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
 };
 #define _LOCK_DEBUG { -1 }
 void spin_debug_enable(void);
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCHv2 3/4] spinlock: move rwlock API and per-cpu rwlocks into their own files
  2016-01-26 16:25 [PATCHv2 0/4] spinlock: queue read-write locks David Vrabel
  2016-01-26 16:25 ` [PATCHv2 1/4] atomic: replace atomic_compareandswap() with atomic_cmpxchg() David Vrabel
  2016-01-26 16:25 ` [PATCHv2 2/4] spinlock: shrink struct lock_debug David Vrabel
@ 2016-01-26 16:25 ` David Vrabel
  2016-01-26 16:25 ` [PATCHv2 4/4] spinlock: fair read-write locks David Vrabel
  3 siblings, 0 replies; 6+ messages in thread
From: David Vrabel @ 2016-01-26 16:25 UTC (permalink / raw)
  To: xen-devel
  Cc: Andrew Cooper, Jennifer Herbert, David Vrabel, Jan Beulich, Ian Campbell

From: Jennifer Herbert <jennifer.herbert@citrix.com>

In preparation for a replacement read-write lock implementation, move
the API and the per-cpu read-write locks into their own files.

Signed-off-by: Jennifer Herbert <jennifer.herbert@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
---
v2:
- new
---
 xen/arch/x86/mm/mem_sharing.c |   1 +
 xen/common/Makefile           |   1 +
 xen/common/rwlock.c           |  47 +++++++++++++
 xen/common/spinlock.c         |  45 -------------
 xen/include/asm-x86/mm.h      |   1 +
 xen/include/xen/grant_table.h |   1 +
 xen/include/xen/rwlock.h      | 150 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/sched.h       |   1 +
 xen/include/xen/spinlock.h    | 143 ----------------------------------------
 9 files changed, 202 insertions(+), 188 deletions(-)
 create mode 100644 xen/common/rwlock.c
 create mode 100644 xen/include/xen/rwlock.h

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index a95e105..a522423 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -23,6 +23,7 @@
 #include <xen/types.h>
 #include <xen/domain_page.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <xen/mm.h>
 #include <xen/grant_table.h>
 #include <xen/sched.h>
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 4df71ee..6e82b33 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -30,6 +30,7 @@ obj-y += rangeset.o
 obj-y += radix-tree.o
 obj-y += rbtree.o
 obj-y += rcupdate.o
+obj-y += rwlock.o
 obj-$(CONFIG_SCHED_ARINC653) += sched_arinc653.o
 obj-$(CONFIG_SCHED_CREDIT) += sched_credit.o
 obj-$(CONFIG_SCHED_CREDIT2) += sched_credit2.o
diff --git a/xen/common/rwlock.c b/xen/common/rwlock.c
new file mode 100644
index 0000000..410d4dc
--- /dev/null
+++ b/xen/common/rwlock.c
@@ -0,0 +1,47 @@
+#include <xen/rwlock.h>
+#include <xen/irq.h>
+
+static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
+
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    unsigned int cpu;
+    cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
+
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /*
+     * First take the write lock to protect against other writers or slow
+     * path readers.
+     */
+    write_lock(&percpu_rwlock->rwlock);
+
+    /* Now set the global variable so that readers start using read_lock. */
+    percpu_rwlock->writer_activating = 1;
+    smp_mb();
+
+    /* Using a per cpu cpumask is only safe if there is no nesting. */
+    ASSERT(!in_irq());
+    cpumask_copy(rwlock_readers, &cpu_online_map);
+
+    /* Check if there are any percpu readers in progress on this rwlock. */
+    for ( ; ; )
+    {
+        for_each_cpu(cpu, rwlock_readers)
+        {
+            /*
+             * Remove any percpu readers not contending on this rwlock
+             * from our check mask.
+             */
+            if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
+                __cpumask_clear_cpu(cpu, rwlock_readers);
+        }
+        /* Check if we've cleared all percpu readers from check mask. */
+        if ( cpumask_empty(rwlock_readers) )
+            break;
+        /* Give the coherency fabric a break. */
+        cpu_relax();
+    };
+}
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index bab1f95..7b0cf6c 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -10,8 +10,6 @@
 #include <asm/processor.h>
 #include <asm/atomic.h>
 
-static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
-
 #ifndef NDEBUG
 
 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
@@ -494,49 +492,6 @@ int _rw_is_write_locked(rwlock_t *lock)
     return (lock->lock == RW_WRITE_FLAG); /* writer in critical section? */
 }
 
-void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    unsigned int cpu;
-    cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
-
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* 
-     * First take the write lock to protect against other writers or slow 
-     * path readers.
-     */
-    write_lock(&percpu_rwlock->rwlock);
-
-    /* Now set the global variable so that readers start using read_lock. */
-    percpu_rwlock->writer_activating = 1;
-    smp_mb();
-
-    /* Using a per cpu cpumask is only safe if there is no nesting. */
-    ASSERT(!in_irq());
-    cpumask_copy(rwlock_readers, &cpu_online_map);
-
-    /* Check if there are any percpu readers in progress on this rwlock. */
-    for ( ; ; )
-    {
-        for_each_cpu(cpu, rwlock_readers)
-        {
-            /* 
-             * Remove any percpu readers not contending on this rwlock
-             * from our check mask.
-             */
-            if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
-                __cpumask_clear_cpu(cpu, rwlock_readers);
-        }
-        /* Check if we've cleared all percpu readers from check mask. */
-        if ( cpumask_empty(rwlock_readers) )
-            break;
-        /* Give the coherency fabric a break. */
-        cpu_relax();
-    };
-}
-
 #ifdef LOCK_PROFILE
 
 struct lock_profile_anc {
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 7598414..4560deb 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -5,6 +5,7 @@
 #include <xen/config.h>
 #include <xen/list.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/x86_emulate.h>
diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
index b4f064e..32b0ecd 100644
--- a/xen/include/xen/grant_table.h
+++ b/xen/include/xen/grant_table.h
@@ -23,6 +23,7 @@
 #ifndef __XEN_GRANT_TABLE_H__
 #define __XEN_GRANT_TABLE_H__
 
+#include <xen/rwlock.h>
 #include <public/grant_table.h>
 #include <asm/page.h>
 #include <asm/grant_table.h>
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
new file mode 100644
index 0000000..9d87783
--- /dev/null
+++ b/xen/include/xen/rwlock.h
@@ -0,0 +1,150 @@
+#ifndef __RWLOCK_H__
+#define __RWLOCK_H__
+
+#include <xen/spinlock.h>
+
+#define read_lock(l)                  _read_lock(l)
+#define read_lock_irq(l)              _read_lock_irq(l)
+#define read_lock_irqsave(l, f)                                 \
+    ({                                                          \
+        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
+        ((f) = _read_lock_irqsave(l));                          \
+    })
+
+#define read_unlock(l)                _read_unlock(l)
+#define read_unlock_irq(l)            _read_unlock_irq(l)
+#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
+#define read_trylock(l)               _read_trylock(l)
+
+#define write_lock(l)                 _write_lock(l)
+#define write_lock_irq(l)             _write_lock_irq(l)
+#define write_lock_irqsave(l, f)                                \
+    ({                                                          \
+        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
+        ((f) = _write_lock_irqsave(l));                         \
+    })
+#define write_trylock(l)              _write_trylock(l)
+
+#define write_unlock(l)               _write_unlock(l)
+#define write_unlock_irq(l)           _write_unlock_irq(l)
+#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
+
+#define rw_is_locked(l)               _rw_is_locked(l)
+#define rw_is_write_locked(l)         _rw_is_write_locked(l)
+
+
+typedef struct percpu_rwlock percpu_rwlock_t;
+
+struct percpu_rwlock {
+    rwlock_t            rwlock;
+    bool_t              writer_activating;
+#ifndef NDEBUG
+    percpu_rwlock_t     **percpu_owner;
+#endif
+};
+
+#ifndef NDEBUG
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
+static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
+                                         percpu_rwlock_t *percpu_rwlock)
+{
+    ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
+}
+#else
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
+#define _percpu_rwlock_owner_check(data, lock) ((void)0)
+#endif
+
+#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
+    percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
+#define percpu_rwlock_resource_init(l, owner) \
+    (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
+
+static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
+                                         percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /* We cannot support recursion on the same lock. */
+    ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
+    /*
+     * Detect using a second percpu_rwlock_t simulatenously and fallback
+     * to standard read_lock.
+     */
+    if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
+    {
+        read_lock(&percpu_rwlock->rwlock);
+        return;
+    }
+
+    /* Indicate this cpu is reading. */
+    this_cpu_ptr(per_cpudata) = percpu_rwlock;
+    smp_mb();
+    /* Check if a writer is waiting. */
+    if ( unlikely(percpu_rwlock->writer_activating) )
+    {
+        /* Let the waiting writer know we aren't holding the lock. */
+        this_cpu_ptr(per_cpudata) = NULL;
+        /* Wait using the read lock to keep the lock fair. */
+        read_lock(&percpu_rwlock->rwlock);
+        /* Set the per CPU data again and continue. */
+        this_cpu_ptr(per_cpudata) = percpu_rwlock;
+        /* Drop the read lock because we don't need it anymore. */
+        read_unlock(&percpu_rwlock->rwlock);
+    }
+}
+
+static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    /* Verify the read lock was taken for this lock */
+    ASSERT(this_cpu_ptr(per_cpudata) != NULL);
+    /*
+     * Detect using a second percpu_rwlock_t simulatenously and fallback
+     * to standard read_unlock.
+     */
+    if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
+    {
+        read_unlock(&percpu_rwlock->rwlock);
+        return;
+    }
+    this_cpu_ptr(per_cpudata) = NULL;
+    smp_wmb();
+}
+
+/* Don't inline percpu write lock as it's a complex function. */
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+                        percpu_rwlock_t *percpu_rwlock);
+
+static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
+                percpu_rwlock_t *percpu_rwlock)
+{
+    /* Validate the correct per_cpudata variable has been provided. */
+    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+    ASSERT(percpu_rwlock->writer_activating);
+    percpu_rwlock->writer_activating = 0;
+    write_unlock(&percpu_rwlock->rwlock);
+}
+
+#define percpu_rw_is_write_locked(l)         _rw_is_write_locked(&((l)->rwlock))
+
+#define percpu_read_lock(percpu, lock) \
+    _percpu_read_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_read_unlock(percpu, lock) \
+    _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_lock(percpu, lock) \
+    _percpu_write_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_unlock(percpu, lock) \
+    _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
+
+#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
+                                                         name)
+#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
+                                                           name)
+
+#endif /* __RWLOCK_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 5870745..b47a3fe 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -4,6 +4,7 @@
 
 #include <xen/types.h>
 #include <xen/spinlock.h>
+#include <xen/rwlock.h>
 #include <xen/shared.h>
 #include <xen/timer.h>
 #include <xen/rangeset.h>
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 22c4fc2..765db51 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -236,147 +236,4 @@ int _rw_is_write_locked(rwlock_t *lock);
 #define spin_lock_recursive(l)        _spin_lock_recursive(l)
 #define spin_unlock_recursive(l)      _spin_unlock_recursive(l)
 
-#define read_lock(l)                  _read_lock(l)
-#define read_lock_irq(l)              _read_lock_irq(l)
-#define read_lock_irqsave(l, f)                                 \
-    ({                                                          \
-        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
-        ((f) = _read_lock_irqsave(l));                          \
-    })
-
-#define read_unlock(l)                _read_unlock(l)
-#define read_unlock_irq(l)            _read_unlock_irq(l)
-#define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
-#define read_trylock(l)               _read_trylock(l)
-
-#define write_lock(l)                 _write_lock(l)
-#define write_lock_irq(l)             _write_lock_irq(l)
-#define write_lock_irqsave(l, f)                                \
-    ({                                                          \
-        BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
-        ((f) = _write_lock_irqsave(l));                         \
-    })
-#define write_trylock(l)              _write_trylock(l)
-
-#define write_unlock(l)               _write_unlock(l)
-#define write_unlock_irq(l)           _write_unlock_irq(l)
-#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
-
-#define rw_is_locked(l)               _rw_is_locked(l)
-#define rw_is_write_locked(l)         _rw_is_write_locked(l)
-
-typedef struct percpu_rwlock percpu_rwlock_t;
-
-struct percpu_rwlock {
-    rwlock_t            rwlock;
-    bool_t              writer_activating;
-#ifndef NDEBUG
-    percpu_rwlock_t     **percpu_owner;
-#endif
-};
-
-#ifndef NDEBUG
-#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
-static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
-                                         percpu_rwlock_t *percpu_rwlock)
-{
-    ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
-}
-#else
-#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
-#define _percpu_rwlock_owner_check(data, lock) ((void)0)
-#endif
-
-#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
-    percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
-#define percpu_rwlock_resource_init(l, owner) \
-    (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
-
-static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
-                                         percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* We cannot support recursion on the same lock. */
-    ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
-    /* 
-     * Detect using a second percpu_rwlock_t simulatenously and fallback
-     * to standard read_lock.
-     */
-    if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
-    {
-        read_lock(&percpu_rwlock->rwlock);
-        return;
-    }
-
-    /* Indicate this cpu is reading. */
-    this_cpu_ptr(per_cpudata) = percpu_rwlock;
-    smp_mb();
-    /* Check if a writer is waiting. */
-    if ( unlikely(percpu_rwlock->writer_activating) )
-    {
-        /* Let the waiting writer know we aren't holding the lock. */
-        this_cpu_ptr(per_cpudata) = NULL;
-        /* Wait using the read lock to keep the lock fair. */
-        read_lock(&percpu_rwlock->rwlock);
-        /* Set the per CPU data again and continue. */
-        this_cpu_ptr(per_cpudata) = percpu_rwlock;
-        /* Drop the read lock because we don't need it anymore. */
-        read_unlock(&percpu_rwlock->rwlock);
-    }
-}
-
-static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    /* Verify the read lock was taken for this lock */
-    ASSERT(this_cpu_ptr(per_cpudata) != NULL);
-    /* 
-     * Detect using a second percpu_rwlock_t simulatenously and fallback
-     * to standard read_unlock.
-     */
-    if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
-    {
-        read_unlock(&percpu_rwlock->rwlock);
-        return;
-    }
-    this_cpu_ptr(per_cpudata) = NULL;
-    smp_wmb();
-}
-
-/* Don't inline percpu write lock as it's a complex function. */
-void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
-                        percpu_rwlock_t *percpu_rwlock);
-
-static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
-                percpu_rwlock_t *percpu_rwlock)
-{
-    /* Validate the correct per_cpudata variable has been provided. */
-    _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
-    ASSERT(percpu_rwlock->writer_activating);
-    percpu_rwlock->writer_activating = 0;
-    write_unlock(&percpu_rwlock->rwlock);
-}
-
-#define percpu_rw_is_write_locked(l)         _rw_is_write_locked(&((l)->rwlock))
-
-#define percpu_read_lock(percpu, lock) \
-    _percpu_read_lock(&get_per_cpu_var(percpu), lock)
-#define percpu_read_unlock(percpu, lock) \
-    _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_lock(percpu, lock) \
-    _percpu_write_lock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_unlock(percpu, lock) \
-    _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
-
-#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
-                                                         name)
-#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
-                                                         name)
-
 #endif /* __SPINLOCK_H__ */
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCHv2 4/4] spinlock: fair read-write locks
  2016-01-26 16:25 [PATCHv2 0/4] spinlock: queue read-write locks David Vrabel
                   ` (2 preceding siblings ...)
  2016-01-26 16:25 ` [PATCHv2 3/4] spinlock: move rwlock API and per-cpu rwlocks into their own files David Vrabel
@ 2016-01-26 16:25 ` David Vrabel
  3 siblings, 0 replies; 6+ messages in thread
From: David Vrabel @ 2016-01-26 16:25 UTC (permalink / raw)
  To: xen-devel
  Cc: Andrew Cooper, Jennifer Herbert, David Vrabel, Jan Beulich, Ian Campbell

From: Jennifer Herbert <jennifer.herbert@citrix.com>

The current rwlocks are write-biased and unfair.  This allows writers
to starve readers in situations where there are many writers (e.g.,
p2m type changes from log dirty updates during domain save).

Replace the current implementation with queued read-write locks which use
a fair spinlock (a ticket lock in this case) to ensure fairness between
readers and writers when they are contended.

This implementation is from the Linux commit 70af2f8a4f48 by Waiman
Long and Peter Zijlstra.

    locking/rwlocks: Introduce 'qrwlocks' - fair, queued rwlocks

    This rwlock uses the arch_spin_lock_t as a waitqueue, and assuming
    the arch_spin_lock_t is a fair lock (ticket,mcs etc..) the
    resulting rwlock is a fair lock.

    It fits in the same 8 bytes as the regular rwlock_t by folding the
    reader and writer count into a single integer, using the remaining
    4 bytes for the arch_spinlock_t.

    Architectures that can single-copy adress bytes can optimize
    queue_write_unlock() with a 0 write to the LSB (the write count).

We do not yet make use of the architecture-specific optimization noted
above.

Signed-off-by: Jennifer Herbert <jennifer.herbert@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
---
v2:
- Remove in_irq() special case from read slow path.
- Comment on why 8-bit write mask.
- Style.
- Use 0 in various cmpxchg() calls where appropriate.
---
 xen/common/rwlock.c        | 102 +++++++++++++++++++++++
 xen/common/spinlock.c      | 204 ---------------------------------------------
 xen/include/xen/rwlock.h   | 182 ++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/spinlock.h |  32 -------
 4 files changed, 284 insertions(+), 236 deletions(-)

diff --git a/xen/common/rwlock.c b/xen/common/rwlock.c
index 410d4dc..1fd8ea0 100644
--- a/xen/common/rwlock.c
+++ b/xen/common/rwlock.c
@@ -1,6 +1,108 @@
 #include <xen/rwlock.h>
 #include <xen/irq.h>
 
+/*
+ * rspin_until_writer_unlock - spin until writer is gone.
+ * @lock  : Pointer to queue rwlock structure.
+ * @cnts: Current queue rwlock writer status byte.
+ *
+ * In interrupt context or at the head of the queue, the reader will just
+ * increment the reader count & wait until the writer releases the lock.
+ */
+static inline void rspin_until_writer_unlock(rwlock_t *lock, u32 cnts)
+{
+    while ( (cnts & _QW_WMASK) == _QW_LOCKED )
+    {
+        cpu_relax();
+        smp_rmb();
+        cnts = atomic_read(&lock->cnts);
+    }
+}
+
+/*
+ * queue_read_lock_slowpath - acquire read lock of a queue rwlock.
+ * @lock: Pointer to queue rwlock structure.
+ */
+void queue_read_lock_slowpath(rwlock_t *lock)
+{
+    u32 cnts;
+
+    /*
+     * Readers come here when they cannot get the lock without waiting.
+     */
+    atomic_sub(_QR_BIAS, &lock->cnts);
+
+    /*
+     * Put the reader into the wait queue.
+     */
+    spin_lock(&lock->lock);
+
+    /*
+     * At the head of the wait queue now, wait until the writer state
+     * goes to 0 and then try to increment the reader count and get
+     * the lock. It is possible that an incoming writer may steal the
+     * lock in the interim, so it is necessary to check the writer byte
+     * to make sure that the write lock isn't taken.
+     */
+    while ( atomic_read(&lock->cnts) & _QW_WMASK )
+        cpu_relax();
+
+    cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
+    rspin_until_writer_unlock(lock, cnts);
+
+    /*
+     * Signal the next one in queue to become queue head.
+     */
+    spin_unlock(&lock->lock);
+}
+
+/*
+ * queue_write_lock_slowpath - acquire write lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure.
+ */
+void queue_write_lock_slowpath(rwlock_t *lock)
+{
+    u32 cnts;
+
+    /* Put the writer into the wait queue. */
+    spin_lock(&lock->lock);
+
+    /* Try to acquire the lock directly if no reader is present. */
+    if ( !atomic_read(&lock->cnts) &&
+         (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) )
+        goto unlock;
+
+    /*
+     * Set the waiting flag to notify readers that a writer is pending,
+     * or wait for a previous writer to go away.
+     */
+    for (;;)
+    {
+        cnts = atomic_read(&lock->cnts);
+        if ( !(cnts & _QW_WMASK) &&
+             (atomic_cmpxchg(&lock->cnts, cnts,
+                             cnts | _QW_WAITING) == cnts) )
+            break;
+
+        cpu_relax();
+    }
+
+    /* When no more readers, set the locked flag. */
+    for (;;)
+    {
+        cnts = atomic_read(&lock->cnts);
+        if ( (cnts == _QW_WAITING) &&
+             (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
+                             _QW_LOCKED) == _QW_WAITING) )
+            break;
+
+        cpu_relax();
+    }
+ unlock:
+    spin_unlock(&lock->lock);
+}
+
+
 static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
 
 void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 7b0cf6c..a43fa84 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -288,210 +288,6 @@ void _spin_unlock_recursive(spinlock_t *lock)
     }
 }
 
-void _read_lock(rwlock_t *lock)
-{
-    uint32_t x;
-
-    check_lock(&lock->debug);
-    do {
-        while ( (x = lock->lock) & RW_WRITE_FLAG )
-            cpu_relax();
-    } while ( cmpxchg(&lock->lock, x, x+1) != x );
-    preempt_disable();
-}
-
-void _read_lock_irq(rwlock_t *lock)
-{
-    uint32_t x;
-
-    ASSERT(local_irq_is_enabled());
-    local_irq_disable();
-    check_lock(&lock->debug);
-    do {
-        if ( (x = lock->lock) & RW_WRITE_FLAG )
-        {
-            local_irq_enable();
-            while ( (x = lock->lock) & RW_WRITE_FLAG )
-                cpu_relax();
-            local_irq_disable();
-        }
-    } while ( cmpxchg(&lock->lock, x, x+1) != x );
-    preempt_disable();
-}
-
-unsigned long _read_lock_irqsave(rwlock_t *lock)
-{
-    uint32_t x;
-    unsigned long flags;
-
-    local_irq_save(flags);
-    check_lock(&lock->debug);
-    do {
-        if ( (x = lock->lock) & RW_WRITE_FLAG )
-        {
-            local_irq_restore(flags);
-            while ( (x = lock->lock) & RW_WRITE_FLAG )
-                cpu_relax();
-            local_irq_disable();
-        }
-    } while ( cmpxchg(&lock->lock, x, x+1) != x );
-    preempt_disable();
-    return flags;
-}
-
-int _read_trylock(rwlock_t *lock)
-{
-    uint32_t x;
-
-    check_lock(&lock->debug);
-    do {
-        if ( (x = lock->lock) & RW_WRITE_FLAG )
-            return 0;
-    } while ( cmpxchg(&lock->lock, x, x+1) != x );
-    preempt_disable();
-    return 1;
-}
-
-#ifndef _raw_read_unlock
-# define _raw_read_unlock(l) do {                      \
-    uint32_t x = (l)->lock, y;                         \
-    while ( (y = cmpxchg(&(l)->lock, x, x - 1)) != x ) \
-        x = y;                                         \
-} while (0)
-#endif
-
-inline void _read_unlock(rwlock_t *lock)
-{
-    preempt_enable();
-    _raw_read_unlock(lock);
-}
-
-void _read_unlock_irq(rwlock_t *lock)
-{
-    _read_unlock(lock);
-    local_irq_enable();
-}
-
-void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
-    _read_unlock(lock);
-    local_irq_restore(flags);
-}
-
-void _write_lock(rwlock_t *lock)
-{
-    uint32_t x;
-
-    check_lock(&lock->debug);
-    do {
-        while ( (x = lock->lock) & RW_WRITE_FLAG )
-            cpu_relax();
-    } while ( cmpxchg(&lock->lock, x, x|RW_WRITE_FLAG) != x );
-    while ( x != 0 )
-    {
-        cpu_relax();
-        x = lock->lock & ~RW_WRITE_FLAG;
-    }
-    preempt_disable();
-}
-
-void _write_lock_irq(rwlock_t *lock)
-{
-    uint32_t x;
-
-    ASSERT(local_irq_is_enabled());
-    local_irq_disable();
-    check_lock(&lock->debug);
-    do {
-        if ( (x = lock->lock) & RW_WRITE_FLAG )
-        {
-            local_irq_enable();
-            while ( (x = lock->lock) & RW_WRITE_FLAG )
-                cpu_relax();
-            local_irq_disable();
-        }
-    } while ( cmpxchg(&lock->lock, x, x|RW_WRITE_FLAG) != x );
-    while ( x != 0 )
-    {
-        cpu_relax();
-        x = lock->lock & ~RW_WRITE_FLAG;
-    }
-    preempt_disable();
-}
-
-unsigned long _write_lock_irqsave(rwlock_t *lock)
-{
-    uint32_t x;
-    unsigned long flags;
-
-    local_irq_save(flags);
-    check_lock(&lock->debug);
-    do {
-        if ( (x = lock->lock) & RW_WRITE_FLAG )
-        {
-            local_irq_restore(flags);
-            while ( (x = lock->lock) & RW_WRITE_FLAG )
-                cpu_relax();
-            local_irq_disable();
-        }
-    } while ( cmpxchg(&lock->lock, x, x|RW_WRITE_FLAG) != x );
-    while ( x != 0 )
-    {
-        cpu_relax();
-        x = lock->lock & ~RW_WRITE_FLAG;
-    }
-    preempt_disable();
-    return flags;
-}
-
-int _write_trylock(rwlock_t *lock)
-{
-    uint32_t x;
-
-    check_lock(&lock->debug);
-    do {
-        if ( (x = lock->lock) != 0 )
-            return 0;
-    } while ( cmpxchg(&lock->lock, x, x|RW_WRITE_FLAG) != x );
-    preempt_disable();
-    return 1;
-}
-
-#ifndef _raw_write_unlock
-# define _raw_write_unlock(l) xchg(&(l)->lock, 0)
-#endif
-
-inline void _write_unlock(rwlock_t *lock)
-{
-    preempt_enable();
-    if ( _raw_write_unlock(lock) != RW_WRITE_FLAG )
-        BUG();
-}
-
-void _write_unlock_irq(rwlock_t *lock)
-{
-    _write_unlock(lock);
-    local_irq_enable();
-}
-
-void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
-    _write_unlock(lock);
-    local_irq_restore(flags);
-}
-
-int _rw_is_locked(rwlock_t *lock)
-{
-    check_lock(&lock->debug);
-    return (lock->lock != 0); /* anyone in critical section? */
-}
-
-int _rw_is_write_locked(rwlock_t *lock)
-{
-    check_lock(&lock->debug);
-    return (lock->lock == RW_WRITE_FLAG); /* writer in critical section? */
-}
-
 #ifdef LOCK_PROFILE
 
 struct lock_profile_anc {
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 9d87783..35657c5 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -3,6 +3,188 @@
 
 #include <xen/spinlock.h>
 
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+typedef struct {
+    atomic_t cnts;
+    spinlock_t lock;
+} rwlock_t;
+
+#define    RW_LOCK_UNLOCKED {           \
+    .cnts = ATOMIC_INIT(0),             \
+    .lock = SPIN_LOCK_UNLOCKED          \
+}
+
+#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
+#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
+
+/*
+ * Writer states & reader shift and bias.
+ *
+ * Writer field is 8 bit to allow for potential optimisation, see
+ * _write_unlock().
+ */
+#define    _QW_WAITING  1               /* A writer is waiting     */
+#define    _QW_LOCKED   0xff            /* A writer holds the lock */
+#define    _QW_WMASK    0xff            /* Writer mask.*/
+#define    _QR_SHIFT    8               /* Reader count shift      */
+#define    _QR_BIAS     (1U << _QR_SHIFT)
+
+void queue_read_lock_slowpath(rwlock_t *lock);
+void queue_write_lock_slowpath(rwlock_t *lock);
+
+/*
+ * _read_trylock - try to acquire read lock of a queue rwlock.
+ * @lock : Pointer to queue rwlock structure.
+ * Return: 1 if lock acquired, 0 if failed.
+ */
+static inline int _read_trylock(rwlock_t *lock)
+{
+    u32 cnts;
+
+    cnts = atomic_read(&lock->cnts);
+    if ( likely(!(cnts & _QW_WMASK)) )
+    {
+        cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+        if ( likely(!(cnts & _QW_WMASK)) )
+            return 1;
+        atomic_sub(_QR_BIAS, &lock->cnts);
+    }
+    return 0;
+}
+
+/*
+ * _read_lock - acquire read lock of a queue rwlock.
+ * @lock: Pointer to queue rwlock structure.
+ */
+static inline void _read_lock(rwlock_t *lock)
+{
+    u32 cnts;
+
+    cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+    if ( likely(!(cnts & _QW_WMASK)) )
+        return;
+
+    /* The slowpath will decrement the reader count, if necessary. */
+    queue_read_lock_slowpath(lock);
+}
+
+static inline void _read_lock_irq(rwlock_t *lock)
+{
+    ASSERT(local_irq_is_enabled());
+    local_irq_disable();
+    _read_lock(lock);
+}
+
+static inline unsigned long _read_lock_irqsave(rwlock_t *lock)
+{
+    unsigned long flags;
+    local_irq_save(flags);
+    _read_lock(lock);
+    return flags;
+}
+
+/*
+ * _read_unlock - release read lock of a queue rwlock.
+ * @lock : Pointer to queue rwlock structure.
+ */
+static inline void _read_unlock(rwlock_t *lock)
+{
+    /*
+     * Atomically decrement the reader count
+     */
+    atomic_sub(_QR_BIAS, &lock->cnts);
+}
+
+static inline void _read_unlock_irq(rwlock_t *lock)
+{
+    _read_unlock(lock);
+    local_irq_enable();
+}
+
+static inline void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+    _read_unlock(lock);
+    local_irq_restore(flags);
+}
+
+static inline int _rw_is_locked(rwlock_t *lock)
+{
+    return atomic_read(&lock->cnts);
+}
+
+/*
+ * queue_write_lock - acquire write lock of a queue rwlock.
+ * @lock : Pointer to queue rwlock structure.
+ */
+static inline void _write_lock(rwlock_t *lock)
+{
+    /* Optimize for the unfair lock case where the fair flag is 0. */
+    if ( atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0 )
+        return;
+
+    queue_write_lock_slowpath(lock);
+}
+
+static inline void _write_lock_irq(rwlock_t *lock)
+{
+    ASSERT(local_irq_is_enabled());
+    local_irq_disable();
+    _write_lock(lock);
+}
+
+static inline unsigned long _write_lock_irqsave(rwlock_t *lock)
+{
+    unsigned long flags;
+
+    local_irq_save(flags);
+    _write_lock(lock);
+    return flags;
+}
+
+/*
+ * queue_write_trylock - try to acquire write lock of a queue rwlock.
+ * @lock : Pointer to queue rwlock structure.
+ * Return: 1 if lock acquired, 0 if failed.
+ */
+static inline int _write_trylock(rwlock_t *lock)
+{
+    u32 cnts;
+
+    cnts = atomic_read(&lock->cnts);
+    if ( unlikely(cnts) )
+        return 0;
+
+    return likely(atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0);
+}
+
+static inline void _write_unlock(rwlock_t *lock)
+{
+    /*
+     * If the writer field is atomic, it can be cleared directly.
+     * Otherwise, an atomic subtraction will be used to clear it.
+     */
+    atomic_sub(_QW_LOCKED, &lock->cnts);
+}
+
+static inline void _write_unlock_irq(rwlock_t *lock)
+{
+    _write_unlock(lock);
+    local_irq_enable();
+}
+
+static inline void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+    _write_unlock(lock);
+    local_irq_restore(flags);
+}
+
+static inline int _rw_is_write_locked(rwlock_t *lock)
+{
+    return (atomic_read(&lock->cnts) & _QW_WMASK) == _QW_LOCKED;
+}
+
 #define read_lock(l)                  _read_lock(l)
 #define read_lock_irq(l)              _read_lock_irq(l)
 #define read_lock_irqsave(l, f)                                 \
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 765db51..77ab7d5 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -152,17 +152,6 @@ typedef struct spinlock {
 
 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
 
-typedef struct {
-    volatile uint32_t lock;
-    struct lock_debug debug;
-} rwlock_t;
-
-#define RW_WRITE_FLAG (1u<<31)
-
-#define RW_LOCK_UNLOCKED { 0, _LOCK_DEBUG }
-#define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
-#define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
-
 void _spin_lock(spinlock_t *lock);
 void _spin_lock_irq(spinlock_t *lock);
 unsigned long _spin_lock_irqsave(spinlock_t *lock);
@@ -179,27 +168,6 @@ int _spin_trylock_recursive(spinlock_t *lock);
 void _spin_lock_recursive(spinlock_t *lock);
 void _spin_unlock_recursive(spinlock_t *lock);
 
-void _read_lock(rwlock_t *lock);
-void _read_lock_irq(rwlock_t *lock);
-unsigned long _read_lock_irqsave(rwlock_t *lock);
-
-void _read_unlock(rwlock_t *lock);
-void _read_unlock_irq(rwlock_t *lock);
-void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
-int _read_trylock(rwlock_t *lock);
-
-void _write_lock(rwlock_t *lock);
-void _write_lock_irq(rwlock_t *lock);
-unsigned long _write_lock_irqsave(rwlock_t *lock);
-int _write_trylock(rwlock_t *lock);
-
-void _write_unlock(rwlock_t *lock);
-void _write_unlock_irq(rwlock_t *lock);
-void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
-
-int _rw_is_locked(rwlock_t *lock);
-int _rw_is_write_locked(rwlock_t *lock);
-
 #define spin_lock(l)                  _spin_lock(l)
 #define spin_lock_irq(l)              _spin_lock_irq(l)
 #define spin_lock_irqsave(l, f)                                 \
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCHv2 1/4] atomic: replace atomic_compareandswap() with atomic_cmpxchg()
  2016-01-26 16:25 ` [PATCHv2 1/4] atomic: replace atomic_compareandswap() with atomic_cmpxchg() David Vrabel
@ 2016-01-26 17:09   ` Ian Campbell
  0 siblings, 0 replies; 6+ messages in thread
From: Ian Campbell @ 2016-01-26 17:09 UTC (permalink / raw)
  To: David Vrabel, xen-devel; +Cc: Andrew Cooper, Jennifer Herbert, Jan Beulich

On Tue, 2016-01-26 at 16:25 +0000, David Vrabel wrote:
> atomic_compareandswap() used atomic_t as the new, old and returned
> values which is less convinient than using just int.

"convenient"

> diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h
> index 5a38c67..29ab265 100644
> --- a/xen/include/asm-arm/atomic.h
> +++ b/xen/include/asm-arm/atomic.h
> @@ -138,14 +138,6 @@ static inline void _atomic_set(atomic_t *v, int i)
>  # error "unknown ARM variant"
>  #endif
>  
> -static inline atomic_t atomic_compareandswap(
> -    atomic_t old, atomic_t new, atomic_t *v)
> -{
> -    atomic_t rc;
> -    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter,
> sizeof(int));
> -    return rc;
> -}


Acked-by: Ian Campbell <ian.campbell@citrix.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2016-01-26 17:09 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-26 16:25 [PATCHv2 0/4] spinlock: queue read-write locks David Vrabel
2016-01-26 16:25 ` [PATCHv2 1/4] atomic: replace atomic_compareandswap() with atomic_cmpxchg() David Vrabel
2016-01-26 17:09   ` Ian Campbell
2016-01-26 16:25 ` [PATCHv2 2/4] spinlock: shrink struct lock_debug David Vrabel
2016-01-26 16:25 ` [PATCHv2 3/4] spinlock: move rwlock API and per-cpu rwlocks into their own files David Vrabel
2016-01-26 16:25 ` [PATCHv2 4/4] spinlock: fair read-write locks David Vrabel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.