All of lore.kernel.org
 help / color / mirror / Atom feed
From: Al Viro <viro@zeniv.linux.org.uk>
To: "Paul E. McKenney" <paulmck@kernel.org>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	elver@google.com, akpm@linux-foundation.org, tglx@linutronix.de,
	peterz@infradead.org, dianders@chromium.org, pmladek@suse.com,
	arnd@arndb.de, torvalds@linux-foundation.org,
	kernel-team@meta.com, Andi Shyti <andi.shyti@linux.intel.com>,
	Palmer Dabbelt <palmer@rivosinc.com>,
	Masami Hiramatsu <mhiramat@kernel.org>,
	linux-sh@vger.kernel.org, linux-alpha@vger.kernel.org
Subject: alpha cmpxchg.h (was Re: [PATCH v2 cmpxchg 12/13] sh: Emulate one-byte cmpxchg)
Date: Thu, 2 May 2024 22:01:22 +0100	[thread overview]
Message-ID: <20240502210122.GA2322432@ZenIV> (raw)
In-Reply-To: <20240502205345.GK2118490@ZenIV>

On Thu, May 02, 2024 at 09:53:45PM +0100, Al Viro wrote:
> What's more, load-locked/store-conditional doesn't have 16bit and 8bit
> variants on any Alphas - it's always 32bit (ldl_l) or 64bit (ldq_l).
> 
> What BWX adds is load/store byte/word, load/store byte/word unaligned
> and sign-extend byte/word.  IOW, it's absolutely irrelevant for
> cmpxchg (or xchg) purposes.

FWIW, I do have a cmpxchg-related patch for alpha - the mess with xchg.h
(parametrized double include) is no longer needed, and hadn't been since
2018 (fbfcd0199170 "locking/xchg/alpha: Remove superfluous memory barriers
from the _local() variants" was the point when the things settled down).
Only tangentially related to your stuff, but it makes the damn thing
easier to follow.


commit e992b5436ccd504b07a390118cf2be686355b957
Author: Al Viro <viro@zeniv.linux.org.uk>
Date:   Mon Apr 8 17:43:37 2024 -0400

    alpha: no need to include asm/xchg.h twice
    
    We used to generate different helpers for local and full
    {cmp,}xchg(); these days the barriers are in arch_{cmp,}xchg()
    instead and generated helpers are identical for local and
    full cases.  No need for those parametrized includes of
    asm/xchg.h - we might as well insert its contents directly
    in asm/cmpxchg.h and do it only once.
    
    Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 91d4a4d9258c..ae1b96479d0c 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -3,17 +3,232 @@
 #define _ALPHA_CMPXCHG_H
 
 /*
- * Atomic exchange routines.
+ * Atomic exchange.
+ * Since it can be used to implement critical sections
+ * it must clobber "memory" (also for interrupts in UP).
  */
 
-#define ____xchg(type, args...)		__arch_xchg ## type ## _local(args)
-#define ____cmpxchg(type, args...)	__cmpxchg ## type ## _local(args)
-#include <asm/xchg.h>
+static inline unsigned long
+____xchg_u8(volatile char *m, unsigned long val)
+{
+	unsigned long ret, tmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%4,7,%3\n"
+	"	insbl	%1,%4,%1\n"
+	"1:	ldq_l	%2,0(%3)\n"
+	"	extbl	%2,%4,%0\n"
+	"	mskbl	%2,%4,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%3)\n"
+	"	beq	%2,2f\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+	: "r" ((long)m), "1" (val) : "memory");
+
+	return ret;
+}
+
+static inline unsigned long
+____xchg_u16(volatile short *m, unsigned long val)
+{
+	unsigned long ret, tmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%4,7,%3\n"
+	"	inswl	%1,%4,%1\n"
+	"1:	ldq_l	%2,0(%3)\n"
+	"	extwl	%2,%4,%0\n"
+	"	mskwl	%2,%4,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%3)\n"
+	"	beq	%2,2f\n"
+	".subsection 2\n"
+	"2:	br	1b\n"
+	".previous"
+	: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+	: "r" ((long)m), "1" (val) : "memory");
+
+	return ret;
+}
+
+static inline unsigned long
+____xchg_u32(volatile int *m, unsigned long val)
+{
+	unsigned long dummy;
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%4\n"
+	"	bis $31,%3,%1\n"
+	"	stl_c %1,%2\n"
+	"	beq %1,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	: "=&r" (val), "=&r" (dummy), "=m" (*m)
+	: "rI" (val), "m" (*m) : "memory");
+
+	return val;
+}
+
+static inline unsigned long
+____xchg_u64(volatile long *m, unsigned long val)
+{
+	unsigned long dummy;
+
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%4\n"
+	"	bis $31,%3,%1\n"
+	"	stq_c %1,%2\n"
+	"	beq %1,2f\n"
+	".subsection 2\n"
+	"2:	br 1b\n"
+	".previous"
+	: "=&r" (val), "=&r" (dummy), "=m" (*m)
+	: "rI" (val), "m" (*m) : "memory");
+
+	return val;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid xchg().  */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____xchg(volatile void *ptr, unsigned long x, int size)
+{
+	return
+		size == 1 ? ____xchg_u8(ptr, x) :
+		size == 2 ? ____xchg_u16(ptr, x) :
+		size == 4 ? ____xchg_u32(ptr, x) :
+		size == 8 ? ____xchg_u64(ptr, x) :
+			(__xchg_called_with_bad_pointer(), x);
+}
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+static inline unsigned long
+____cmpxchg_u8(volatile char *m, unsigned char old, unsigned char new)
+{
+	unsigned long prev, tmp, cmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%5,7,%4\n"
+	"	insbl	%1,%5,%1\n"
+	"1:	ldq_l	%2,0(%4)\n"
+	"	extbl	%2,%5,%0\n"
+	"	cmpeq	%0,%6,%3\n"
+	"	beq	%3,2f\n"
+	"	mskbl	%2,%5,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%4)\n"
+	"	beq	%2,3f\n"
+	"2:\n"
+	".subsection 2\n"
+	"3:	br	1b\n"
+	".previous"
+	: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+	: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u16(volatile short *m, unsigned short old, unsigned short new)
+{
+	unsigned long prev, tmp, cmp, addr64;
+
+	__asm__ __volatile__(
+	"	andnot	%5,7,%4\n"
+	"	inswl	%1,%5,%1\n"
+	"1:	ldq_l	%2,0(%4)\n"
+	"	extwl	%2,%5,%0\n"
+	"	cmpeq	%0,%6,%3\n"
+	"	beq	%3,2f\n"
+	"	mskwl	%2,%5,%2\n"
+	"	or	%1,%2,%2\n"
+	"	stq_c	%2,0(%4)\n"
+	"	beq	%2,3f\n"
+	"2:\n"
+	".subsection 2\n"
+	"3:	br	1b\n"
+	".previous"
+	: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+	: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u32(volatile int *m, int old, int new)
+{
+	unsigned long prev, cmp;
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%5\n"
+	"	cmpeq %0,%3,%1\n"
+	"	beq %1,2f\n"
+	"	mov %4,%1\n"
+	"	stl_c %1,%2\n"
+	"	beq %1,3f\n"
+	"2:\n"
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	: "=&r"(prev), "=&r"(cmp), "=m"(*m)
+	: "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+	return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+{
+	unsigned long prev, cmp;
+
+	__asm__ __volatile__(
+	"1:	ldq_l %0,%5\n"
+	"	cmpeq %0,%3,%1\n"
+	"	beq %1,2f\n"
+	"	mov %4,%1\n"
+	"	stq_c %1,%2\n"
+	"	beq %1,3f\n"
+	"2:\n"
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	: "=&r"(prev), "=&r"(cmp), "=m"(*m)
+	: "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+	return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+   if something tries to do an invalid cmpxchg().  */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+	      int size)
+{
+	return
+		size == 1 ? ____cmpxchg_u8(ptr, old, new) :
+		size == 2 ? ____cmpxchg_u16(ptr, old, new) :
+		size == 4 ? ____cmpxchg_u32(ptr, old, new) :
+		size == 8 ? ____cmpxchg_u64(ptr, old, new) :
+			(__cmpxchg_called_with_bad_pointer(), old);
+}
 
 #define xchg_local(ptr, x)						\
 ({									\
 	__typeof__(*(ptr)) _x_ = (x);					\
-	(__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_,\
+	(__typeof__(*(ptr))) ____xchg((ptr), (unsigned long)_x_,	\
 					       sizeof(*(ptr)));		\
 })
 
@@ -21,7 +236,7 @@
 ({									\
 	__typeof__(*(ptr)) _o_ = (o);					\
 	__typeof__(*(ptr)) _n_ = (n);					\
-	(__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,	\
+	(__typeof__(*(ptr))) ____cmpxchg((ptr), (unsigned long)_o_,	\
 					  (unsigned long)_n_,		\
 					  sizeof(*(ptr)));		\
 })
@@ -32,12 +247,6 @@
 	cmpxchg_local((ptr), (o), (n));					\
 })
 
-#undef ____xchg
-#undef ____cmpxchg
-#define ____xchg(type, args...)		__arch_xchg ##type(args)
-#define ____cmpxchg(type, args...)	__cmpxchg ##type(args)
-#include <asm/xchg.h>
-
 /*
  * The leading and the trailing memory barriers guarantee that these
  * operations are fully ordered.
@@ -48,7 +257,7 @@
 	__typeof__(*(ptr)) _x_ = (x);					\
 	smp_mb();							\
 	__ret = (__typeof__(*(ptr)))					\
-		__arch_xchg((ptr), (unsigned long)_x_, sizeof(*(ptr)));	\
+		____xchg((ptr), (unsigned long)_x_, sizeof(*(ptr)));	\
 	smp_mb();							\
 	__ret;								\
 })
@@ -59,7 +268,7 @@
 	__typeof__(*(ptr)) _o_ = (o);					\
 	__typeof__(*(ptr)) _n_ = (n);					\
 	smp_mb();							\
-	__ret = (__typeof__(*(ptr))) __cmpxchg((ptr),			\
+	__ret = (__typeof__(*(ptr))) ____cmpxchg((ptr),			\
 		(unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
 	smp_mb();							\
 	__ret;								\
@@ -71,6 +280,4 @@
 	arch_cmpxchg((ptr), (o), (n));					\
 })
 
-#undef ____cmpxchg
-
 #endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
deleted file mode 100644
index 7adb80c6746a..000000000000
--- a/arch/alpha/include/asm/xchg.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_CMPXCHG_H
-#error Do not include xchg.h directly!
-#else
-/*
- * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
- * except that local version do not have the expensive memory barrier.
- * So this file is included twice from asm/cmpxchg.h.
- */
-
-/*
- * Atomic exchange.
- * Since it can be used to implement critical sections
- * it must clobber "memory" (also for interrupts in UP).
- */
-
-static inline unsigned long
-____xchg(_u8, volatile char *m, unsigned long val)
-{
-	unsigned long ret, tmp, addr64;
-
-	__asm__ __volatile__(
-	"	andnot	%4,7,%3\n"
-	"	insbl	%1,%4,%1\n"
-	"1:	ldq_l	%2,0(%3)\n"
-	"	extbl	%2,%4,%0\n"
-	"	mskbl	%2,%4,%2\n"
-	"	or	%1,%2,%2\n"
-	"	stq_c	%2,0(%3)\n"
-	"	beq	%2,2f\n"
-	".subsection 2\n"
-	"2:	br	1b\n"
-	".previous"
-	: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
-	: "r" ((long)m), "1" (val) : "memory");
-
-	return ret;
-}
-
-static inline unsigned long
-____xchg(_u16, volatile short *m, unsigned long val)
-{
-	unsigned long ret, tmp, addr64;
-
-	__asm__ __volatile__(
-	"	andnot	%4,7,%3\n"
-	"	inswl	%1,%4,%1\n"
-	"1:	ldq_l	%2,0(%3)\n"
-	"	extwl	%2,%4,%0\n"
-	"	mskwl	%2,%4,%2\n"
-	"	or	%1,%2,%2\n"
-	"	stq_c	%2,0(%3)\n"
-	"	beq	%2,2f\n"
-	".subsection 2\n"
-	"2:	br	1b\n"
-	".previous"
-	: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
-	: "r" ((long)m), "1" (val) : "memory");
-
-	return ret;
-}
-
-static inline unsigned long
-____xchg(_u32, volatile int *m, unsigned long val)
-{
-	unsigned long dummy;
-
-	__asm__ __volatile__(
-	"1:	ldl_l %0,%4\n"
-	"	bis $31,%3,%1\n"
-	"	stl_c %1,%2\n"
-	"	beq %1,2f\n"
-	".subsection 2\n"
-	"2:	br 1b\n"
-	".previous"
-	: "=&r" (val), "=&r" (dummy), "=m" (*m)
-	: "rI" (val), "m" (*m) : "memory");
-
-	return val;
-}
-
-static inline unsigned long
-____xchg(_u64, volatile long *m, unsigned long val)
-{
-	unsigned long dummy;
-
-	__asm__ __volatile__(
-	"1:	ldq_l %0,%4\n"
-	"	bis $31,%3,%1\n"
-	"	stq_c %1,%2\n"
-	"	beq %1,2f\n"
-	".subsection 2\n"
-	"2:	br 1b\n"
-	".previous"
-	: "=&r" (val), "=&r" (dummy), "=m" (*m)
-	: "rI" (val), "m" (*m) : "memory");
-
-	return val;
-}
-
-/* This function doesn't exist, so you'll get a linker error
-   if something tries to do an invalid xchg().  */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____xchg(, volatile void *ptr, unsigned long x, int size)
-{
-	switch (size) {
-		case 1:
-			return ____xchg(_u8, ptr, x);
-		case 2:
-			return ____xchg(_u16, ptr, x);
-		case 4:
-			return ____xchg(_u32, ptr, x);
-		case 8:
-			return ____xchg(_u64, ptr, x);
-	}
-	__xchg_called_with_bad_pointer();
-	return x;
-}
-
-/*
- * Atomic compare and exchange.  Compare OLD with MEM, if identical,
- * store NEW in MEM.  Return the initial value in MEM.  Success is
- * indicated by comparing RETURN with OLD.
- */
-
-static inline unsigned long
-____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
-{
-	unsigned long prev, tmp, cmp, addr64;
-
-	__asm__ __volatile__(
-	"	andnot	%5,7,%4\n"
-	"	insbl	%1,%5,%1\n"
-	"1:	ldq_l	%2,0(%4)\n"
-	"	extbl	%2,%5,%0\n"
-	"	cmpeq	%0,%6,%3\n"
-	"	beq	%3,2f\n"
-	"	mskbl	%2,%5,%2\n"
-	"	or	%1,%2,%2\n"
-	"	stq_c	%2,0(%4)\n"
-	"	beq	%2,3f\n"
-	"2:\n"
-	".subsection 2\n"
-	"3:	br	1b\n"
-	".previous"
-	: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
-	: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
-	return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
-{
-	unsigned long prev, tmp, cmp, addr64;
-
-	__asm__ __volatile__(
-	"	andnot	%5,7,%4\n"
-	"	inswl	%1,%5,%1\n"
-	"1:	ldq_l	%2,0(%4)\n"
-	"	extwl	%2,%5,%0\n"
-	"	cmpeq	%0,%6,%3\n"
-	"	beq	%3,2f\n"
-	"	mskwl	%2,%5,%2\n"
-	"	or	%1,%2,%2\n"
-	"	stq_c	%2,0(%4)\n"
-	"	beq	%2,3f\n"
-	"2:\n"
-	".subsection 2\n"
-	"3:	br	1b\n"
-	".previous"
-	: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
-	: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
-	return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u32, volatile int *m, int old, int new)
-{
-	unsigned long prev, cmp;
-
-	__asm__ __volatile__(
-	"1:	ldl_l %0,%5\n"
-	"	cmpeq %0,%3,%1\n"
-	"	beq %1,2f\n"
-	"	mov %4,%1\n"
-	"	stl_c %1,%2\n"
-	"	beq %1,3f\n"
-	"2:\n"
-	".subsection 2\n"
-	"3:	br 1b\n"
-	".previous"
-	: "=&r"(prev), "=&r"(cmp), "=m"(*m)
-	: "r"((long) old), "r"(new), "m"(*m) : "memory");
-
-	return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
-{
-	unsigned long prev, cmp;
-
-	__asm__ __volatile__(
-	"1:	ldq_l %0,%5\n"
-	"	cmpeq %0,%3,%1\n"
-	"	beq %1,2f\n"
-	"	mov %4,%1\n"
-	"	stq_c %1,%2\n"
-	"	beq %1,3f\n"
-	"2:\n"
-	".subsection 2\n"
-	"3:	br 1b\n"
-	".previous"
-	: "=&r"(prev), "=&r"(cmp), "=m"(*m)
-	: "r"((long) old), "r"(new), "m"(*m) : "memory");
-
-	return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
-   if something tries to do an invalid cmpxchg().  */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new,
-	      int size)
-{
-	switch (size) {
-		case 1:
-			return ____cmpxchg(_u8, ptr, old, new);
-		case 2:
-			return ____cmpxchg(_u16, ptr, old, new);
-		case 4:
-			return ____cmpxchg(_u32, ptr, old, new);
-		case 8:
-			return ____cmpxchg(_u64, ptr, old, new);
-	}
-	__cmpxchg_called_with_bad_pointer();
-	return old;
-}
-
-#endif

  reply	other threads:[~2024-05-02 21:01 UTC|newest]

Thread overview: 149+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-01 21:39 [PATCH RFC cmpxchg 0/8] Provide emulation for one- and two-byte cmpxchg() Paul E. McKenney
2024-04-01 21:39 ` [PATCH RFC cmpxchg 1/8] lib: Add one-byte and two-byte cmpxchg() emulation functions Paul E. McKenney
2024-04-02 13:07   ` Marco Elver
2024-04-02 17:15     ` Paul E. McKenney
2024-04-01 21:39 ` [PATCH RFC cmpxchg 2/8] sparc: Emulate one-byte and two-byte cmpxchg Paul E. McKenney
2024-04-01 22:38   ` Al Viro
2024-04-01 23:58     ` Paul E. McKenney
2024-04-02  0:07       ` Al Viro
2024-04-02  3:37         ` Al Viro
2024-04-02  4:11           ` Al Viro
2024-04-02  4:18             ` Paul E. McKenney
2024-04-02  4:28             ` [PATCH 1/8] sparc32: make __cmpxchg_u32() return u32 Al Viro
2024-04-02  4:28               ` [PATCH 2/8] sparc32: make the first argument of __cmpxchg_u64() volatile u64 * Al Viro
2024-04-02  4:28               ` [PATCH 3/8] sparc32: unify __cmpxchg_u{32,64} Al Viro
2024-04-02  7:28                 ` Arnd Bergmann
2024-04-02 20:02                   ` Paul E. McKenney
2024-04-02  4:28               ` [PATCH 4/8] sparc32: add __cmpxchg_u{8,16}() and teach __cmpxchg() to handle those sizes Al Viro
2024-04-02  4:28               ` [PATCH 5/8] parisc: __cmpxchg_u32(): lift conversion into the callers Al Viro
2024-04-02  4:28               ` [PATCH 6/8] parisc: unify implementations of __cmpxchg_u{8,32,64} Al Viro
2024-04-02  4:28               ` [PATCH 7/8] parisc: add missing export of __cmpxchg_u8() Al Viro
2024-04-02  4:28               ` [PATCH 8/8] parisc: add u16 support to cmpxchg() Al Viro
2024-04-02 20:03               ` [PATCH 1/8] sparc32: make __cmpxchg_u32() return u32 Paul E. McKenney
2024-04-03 22:20                 ` Al Viro
2024-04-04  3:09                   ` Paul E. McKenney
2024-04-02  4:17           ` [PATCH RFC cmpxchg 2/8] sparc: Emulate one-byte and two-byte cmpxchg Paul E. McKenney
2024-04-01 21:39 ` [PATCH RFC cmpxchg 3/8] ARC: " Paul E. McKenney
2024-04-01 21:39   ` Paul E. McKenney
2024-04-02  8:14   ` Arnd Bergmann
2024-04-02  8:14     ` Arnd Bergmann
2024-04-02 17:06     ` Paul E. McKenney
2024-04-02 17:06       ` Paul E. McKenney
2024-04-02 20:52       ` Paul E. McKenney
2024-04-02 20:52         ` Paul E. McKenney
2024-04-04 11:57       ` Arnd Bergmann
2024-04-04 11:57         ` Arnd Bergmann
2024-04-04 14:44         ` Paul E. McKenney
2024-04-04 14:44           ` Paul E. McKenney
2024-04-04 15:06           ` Arnd Bergmann
2024-04-04 15:06             ` Arnd Bergmann
2024-04-01 21:39 ` [PATCH RFC cmpxchg 4/8] csky: " Paul E. McKenney
2024-04-01 21:39 ` [PATCH RFC cmpxchg 5/8] sh: " Paul E. McKenney
2024-04-01 21:39 ` [PATCH RFC cmpxchg 6/8] xtensa: " Paul E. McKenney
2024-04-01 21:39 ` [PATCH RFC cmpxchg 7/8] parisc: Emulate " Paul E. McKenney
2024-04-01 21:39 ` [PATCH RFC cmpxchg 8/8] riscv: Emulate one-byte and " Paul E. McKenney
2024-04-01 21:39   ` Paul E. McKenney
2024-04-04 14:15   ` Palmer Dabbelt
2024-04-04 14:15     ` Palmer Dabbelt
2024-04-04 14:50     ` Paul E. McKenney
2024-04-04 14:50       ` Paul E. McKenney
2024-05-11  6:50     ` Guo Ren
2024-05-11  6:50       ` Guo Ren
2024-05-11 14:54       ` Paul E. McKenney
2024-05-11 14:54         ` Paul E. McKenney
2024-05-11 20:44         ` Leonardo Bras
2024-05-11 20:44           ` Leonardo Bras
2024-04-08 17:47 ` [PATCH RFC cmpxchg 0/8] Provide emulation for one- and two-byte cmpxchg() Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 01/14] sparc32: make __cmpxchg_u32() return u32 Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 02/14] sparc32: make the first argument of __cmpxchg_u64() volatile u64 * Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 03/14] sparc32: unify __cmpxchg_u{32,64} Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 04/14] sparc32: add __cmpxchg_u{8,16}() and teach __cmpxchg() to handle those sizes Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 05/14] parisc: __cmpxchg_u32(): lift conversion into the callers Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 06/14] parisc: unify implementations of __cmpxchg_u{8,32,64} Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 07/14] parisc: add missing export of __cmpxchg_u8() Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 08/14] parisc: add u16 support to cmpxchg() Paul E. McKenney
2024-04-08 20:10     ` Linus Torvalds
2024-04-08 20:53       ` Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 09/14] lib: Add one-byte emulation function Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 10/14] ARC: Emulate one-byte cmpxchg Paul E. McKenney
2024-04-08 17:49     ` Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 11/14] csky: " Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 12/14] sh: " Paul E. McKenney
2024-04-18  8:04     ` Geert Uytterhoeven
2024-04-08 17:49   ` [PATCH cmpxchg 13/14] xtensa: " Paul E. McKenney
2024-04-18  8:06     ` Geert Uytterhoeven
2024-04-18 23:21       ` Paul E. McKenney
2024-04-19  5:07         ` Yujie Liu
2024-04-19  8:02           ` Geert Uytterhoeven
2024-04-20 14:03             ` Paul E. McKenney
2024-04-08 17:49   ` [PATCH cmpxchg 14/14] riscv: " Paul E. McKenney
2024-04-08 17:49     ` Paul E. McKenney
2024-04-09 17:35     ` Andrea Parri
2024-04-09 17:35       ` Andrea Parri
2024-04-09 18:08       ` Paul E. McKenney
2024-04-09 18:08         ` Paul E. McKenney
2024-05-01 22:58   ` [PATCH v2 cmpxchg 0/8] Provide emulation for one--byte cmpxchg() Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 01/13] sparc32: make __cmpxchg_u32() return u32 Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 02/13] sparc32: make the first argument of __cmpxchg_u64() volatile u64 * Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 03/13] sparc32: unify __cmpxchg_u{32,64} Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 04/13] sparc32: add __cmpxchg_u{8,16}() and teach __cmpxchg() to handle those sizes Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 05/13] parisc: __cmpxchg_u32(): lift conversion into the callers Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 06/13] parisc: unify implementations of __cmpxchg_u{8,32,64} Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 07/13] parisc: add missing export of __cmpxchg_u8() Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 08/13] parisc: add u16 support to cmpxchg() Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 09/13] lib: Add one-byte emulation function Paul E. McKenney
2024-05-13 14:44       ` Boqun Feng
2024-05-13 15:41         ` Paul E. McKenney
2024-05-13 15:57           ` Boqun Feng
2024-05-13 21:19             ` Boqun Feng
2024-05-14 14:22               ` Paul E. McKenney
2024-05-14 14:53                 ` Boqun Feng
2024-05-14 15:02                   ` Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 10/13] ARC: Emulate one-byte cmpxchg Paul E. McKenney
2024-05-01 23:01       ` Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 11/13] csky: " Paul E. McKenney
2024-05-11  6:42       ` Guo Ren
2024-05-11 14:49         ` Paul E. McKenney
2024-05-01 23:01     ` [PATCH v2 cmpxchg 12/13] sh: " Paul E. McKenney
2024-05-02  4:52       ` John Paul Adrian Glaubitz
2024-05-02  5:06         ` Paul E. McKenney
2024-05-02  5:11           ` John Paul Adrian Glaubitz
2024-05-02 13:33             ` Paul E. McKenney
2024-05-02 20:53               ` Al Viro
2024-05-02 21:01                 ` Al Viro [this message]
2024-05-02 22:16                   ` alpha cmpxchg.h (was Re: [PATCH v2 cmpxchg 12/13] sh: Emulate one-byte cmpxchg) Linus Torvalds
2024-05-02 21:18                 ` [PATCH v2 cmpxchg 12/13] sh: Emulate one-byte cmpxchg Paul E. McKenney
2024-05-02 22:07                   ` Al Viro
2024-05-02 23:12                     ` Paul E. McKenney
2024-05-02 23:24                       ` Al Viro
2024-05-02 23:45                         ` Paul E. McKenney
2024-05-02 23:32                       ` Linus Torvalds
2024-05-03  0:16                         ` Paul E. McKenney
2024-05-02 21:50               ` Arnd Bergmann
2024-05-02  5:42           ` D. Jeff Dionne
2024-05-02 11:30             ` Arnd Bergmann
2024-05-01 23:01     ` [PATCH v2 cmpxchg 13/13] xtensa: " Paul E. McKenney
2024-05-02 20:01     ` [PATCH v2 cmpxchg 0/8] Provide emulation for one--byte cmpxchg() Al Viro
2024-05-02 21:20       ` Paul E. McKenney
2024-06-04 17:02     ` [PATCH v3 cmpxchg 0/4] " Paul E. McKenney
2024-06-04 17:04       ` [PATCH v3 cmpxchg 1/4] ARC: Emulate one-byte cmpxchg Paul E. McKenney
2024-06-04 17:04         ` Paul E. McKenney
2024-06-04 17:04       ` [PATCH v3 cmpxchg 2/4] sh: " Paul E. McKenney
2024-06-04 17:09         ` John Paul Adrian Glaubitz
2024-06-04 17:50           ` Paul E. McKenney
2024-06-04 17:56             ` John Paul Adrian Glaubitz
2024-06-04 21:14               ` Paul E. McKenney
2024-06-17 16:50                 ` Paul E. McKenney
2024-06-17 20:30                   ` John Paul Adrian Glaubitz
2024-06-17 21:59                     ` Paul E. McKenney
2024-06-04 17:04       ` [PATCH v3 cmpxchg 3/4] xtensa: " Paul E. McKenney
2024-06-04 17:04       ` [PATCH v3 cmpxchg 4/4] ARM: " Paul E. McKenney
2024-06-04 17:04         ` Paul E. McKenney
2024-06-04 20:52         ` Linus Walleij
2024-06-04 20:52           ` Linus Walleij
2024-06-04 21:14           ` Paul E. McKenney
2024-06-04 21:14             ` Paul E. McKenney
2024-06-05  8:38             ` Linus Walleij
2024-06-05  8:38               ` Linus Walleij
2024-06-05 18:05               ` Paul E. McKenney
2024-06-05 18:05                 ` Paul E. McKenney

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240502210122.GA2322432@ZenIV \
    --to=viro@zeniv.linux.org.uk \
    --cc=akpm@linux-foundation.org \
    --cc=andi.shyti@linux.intel.com \
    --cc=arnd@arndb.de \
    --cc=dianders@chromium.org \
    --cc=elver@google.com \
    --cc=glaubitz@physik.fu-berlin.de \
    --cc=kernel-team@meta.com \
    --cc=linux-alpha@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=palmer@rivosinc.com \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=pmladek@suse.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.