All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mark Rutland <mark.rutland@arm.com>
To: linux-kernel@vger.kernel.org, will@kernel.org,
	boqun.feng@gmail.com, peterz@infradead.org
Cc: aou@eecs.berkeley.edu, arnd@arndb.de, bcain@codeaurora.org,
	benh@kernel.crashing.org, chris@zankel.net, dalias@libc.org,
	davem@davemloft.net, deanbo422@gmail.com, deller@gmx.de,
	geert@linux-m68k.org, gerg@linux-m68k.org, green.hu@gmail.com,
	guoren@kernel.org, ink@jurassic.park.msu.ru,
	James.Bottomley@HansenPartnership.com, jcmvbkbc@gmail.com,
	jonas@southpole.se, ley.foon.tan@intel.com,
	linux@armlinux.org.uk, mark.rutland@arm.com, mattst88@gmail.com,
	monstr@monstr.eu, mpe@ellerman.id.au, nickhu@andestech.com,
	palmerdabbelt@google.com, paulus@samba.org,
	paul.walmsley@sifive.com, rth@twiddle.net, shorne@gmail.com,
	stefan.kristiansson@saunalahti.fi, tsbogend@alpha.franken.de,
	vgupta@synopsys.com, ysato@users.sourceforge.jp
Subject: [PATCH v2 22/33] locking/atomic: mips: move to ARCH_ATOMIC
Date: Tue, 25 May 2021 15:02:21 +0100	[thread overview]
Message-ID: <20210525140232.53872-23-mark.rutland@arm.com> (raw)
In-Reply-To: <20210525140232.53872-1-mark.rutland@arm.com>

We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates mips to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Will Deacon <will@kernel.org>
---
 arch/mips/Kconfig               |  1 +
 arch/mips/include/asm/atomic.h  | 55 ++++++++++++++++++++++-------------------
 arch/mips/include/asm/cmpxchg.h | 22 ++++++++---------
 arch/mips/kernel/cmpxchg.c      |  4 +--
 4 files changed, 43 insertions(+), 39 deletions(-)

diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ed51970c08e7..55b4da96872f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -3,6 +3,7 @@ config MIPS
 	bool
 	default y
 	select ARCH_32BIT_OFF_T if !64BIT
+	select ARCH_ATOMIC
 	select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
 	select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
 	select ARCH_HAS_FORTIFY_SOURCE
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 27ad76791539..95e1f7f3597f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -25,24 +25,25 @@
 #include <asm/war.h>
 
 #define ATOMIC_OPS(pfx, type)						\
-static __always_inline type pfx##_read(const pfx##_t *v)		\
+static __always_inline type arch_##pfx##_read(const pfx##_t *v)		\
 {									\
 	return READ_ONCE(v->counter);					\
 }									\
 									\
-static __always_inline void pfx##_set(pfx##_t *v, type i)		\
+static __always_inline void arch_##pfx##_set(pfx##_t *v, type i)	\
 {									\
 	WRITE_ONCE(v->counter, i);					\
 }									\
 									\
-static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n)	\
+static __always_inline type						\
+arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n)			\
 {									\
-	return cmpxchg(&v->counter, o, n);				\
+	return arch_cmpxchg(&v->counter, o, n);				\
 }									\
 									\
-static __always_inline type pfx##_xchg(pfx##_t *v, type n)		\
+static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n)	\
 {									\
-	return xchg(&v->counter, n);					\
+	return arch_xchg(&v->counter, n);				\
 }
 
 ATOMIC_OPS(atomic, int)
@@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64)
 #endif
 
 #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)			\
-static __inline__ void pfx##_##op(type i, pfx##_t * v)			\
+static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v)		\
 {									\
 	type temp;							\
 									\
@@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v)			\
 }
 
 #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)		\
-static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v)	\
+static __inline__ type							\
+arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v)			\
 {									\
 	type temp, result;						\
 									\
@@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v)	\
 }
 
 #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)		\
-static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)	\
+static __inline__ type							\
+arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)			\
 {									\
 	int temp, result;						\
 									\
@@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)	\
 ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
 ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
 
-#define atomic_add_return_relaxed	atomic_add_return_relaxed
-#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
 
 #ifdef CONFIG_64BIT
 ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
 ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
-# define atomic64_add_return_relaxed	atomic64_add_return_relaxed
-# define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
-# define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
-# define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
+# define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
+# define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
+# define arch_atomic64_fetch_add_relaxed	arch_atomic64_fetch_add_relaxed
+# define arch_atomic64_fetch_sub_relaxed	arch_atomic64_fetch_sub_relaxed
 #endif /* CONFIG_64BIT */
 
 #undef ATOMIC_OPS
@@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
 ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
 ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
 
-#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
 
 #ifdef CONFIG_64BIT
 ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
 ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
 ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
-# define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
-# define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
-# define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
+# define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
+# define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
+# define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
 #endif
 
 #undef ATOMIC_OPS
@@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
  * The function returns the old value of @v minus @i.
  */
 #define ATOMIC_SIP_OP(pfx, type, op, ll, sc)				\
-static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v)	\
+static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v)	\
 {									\
 	type temp, result;						\
 									\
@@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v)	\
 }
 
 ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
-#define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
+#define arch_atomic_dec_if_positive(v)	arch_atomic_sub_if_positive(1, v)
 
 #ifdef CONFIG_64BIT
 ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
-#define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
+#define arch_atomic64_dec_if_positive(v)	arch_atomic64_sub_if_positive(1, v)
 #endif
 
 #undef ATOMIC_SIP_OP
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index c7e0455d4d46..0b983800f48b 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
 	}
 }
 
-#define xchg(ptr, x)							\
+#define arch_xchg(ptr, x)						\
 ({									\
 	__typeof__(*(ptr)) __res;					\
 									\
@@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 	}
 }
 
-#define cmpxchg_local(ptr, old, new)					\
+#define arch_cmpxchg_local(ptr, old, new)				\
 	((__typeof__(*(ptr)))						\
 		__cmpxchg((ptr),					\
 			  (unsigned long)(__typeof__(*(ptr)))(old),	\
 			  (unsigned long)(__typeof__(*(ptr)))(new),	\
 			  sizeof(*(ptr))))
 
-#define cmpxchg(ptr, old, new)						\
+#define arch_cmpxchg(ptr, old, new)					\
 ({									\
 	__typeof__(*(ptr)) __res;					\
 									\
@@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 	if (__SYNC_loongson3_war == 0)					\
 		smp_mb__before_llsc();					\
 									\
-	__res = cmpxchg_local((ptr), (old), (new));			\
+	__res = arch_cmpxchg_local((ptr), (old), (new));		\
 									\
 	/*								\
 	 * In the Loongson3 workaround case __cmpxchg_asm() already	\
@@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 })
 
 #ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n)					\
+#define arch_cmpxchg64_local(ptr, o, n)					\
   ({									\
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
-	cmpxchg_local((ptr), (o), (n));					\
+	arch_cmpxchg_local((ptr), (o), (n));				\
   })
 
-#define cmpxchg64(ptr, o, n)						\
+#define arch_cmpxchg64(ptr, o, n)					\
   ({									\
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
-	cmpxchg((ptr), (o), (n));					\
+	arch_cmpxchg((ptr), (o), (n));					\
   })
 #else
 
 # include <asm-generic/cmpxchg-local.h>
-# define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
 
 # ifdef CONFIG_SMP
 
@@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
 	return ret;
 }
 
-#  define cmpxchg64(ptr, o, n) ({					\
+#  define arch_cmpxchg64(ptr, o, n) ({					\
 	unsigned long long __old = (__typeof__(*(ptr)))(o);		\
 	unsigned long long __new = (__typeof__(*(ptr)))(n);		\
 	__typeof__(*(ptr)) __res;					\
@@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
 })
 
 # else /* !CONFIG_SMP */
-#  define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#  define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
 # endif /* !CONFIG_SMP */
 #endif /* !CONFIG_64BIT */
 
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 89107deb03fc..ac9c8cfb2ba9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
 	do {
 		old32 = load32;
 		new32 = (load32 & ~mask) | (val << shift);
-		load32 = cmpxchg(ptr32, old32, new32);
+		load32 = arch_cmpxchg(ptr32, old32, new32);
 	} while (load32 != old32);
 
 	return (load32 & mask) >> shift;
@@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
 		 */
 		old32 = (load32 & ~mask) | (old << shift);
 		new32 = (load32 & ~mask) | (new << shift);
-		load32 = cmpxchg(ptr32, old32, new32);
+		load32 = arch_cmpxchg(ptr32, old32, new32);
 		if (load32 == old32)
 			return old;
 	}
-- 
2.11.0


  parent reply	other threads:[~2021-05-25 14:06 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-25 14:01 [PATCH v2 00/33] locking/atomic: convert all architectures to ARCH_ATOMIC Mark Rutland
2021-05-25 14:02 ` [PATCH v2 01/33] locking/atomic: make ARCH_ATOMIC a Kconfig symbol Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 02/33] locking/atomic: net: use linux/atomic.h for xchg & cmpxchg Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 03/33] locking/atomic: h8300: use asm-generic exclusively Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 04/33] locking/atomic: microblaze: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-31 12:31   ` [PATCH v2 04/33] " Michal Simek
2021-05-25 14:02 ` [PATCH v2 05/33] locking/atomic: openrisc: avoid asm-generic/atomic.h Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 06/33] locking/atomic: atomic: remove stale comments Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 07/33] locking/atomic: atomic: remove redundant include Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 08/33] locking/atomic: atomic: simplify ifdeffery Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 09/33] locking/atomic: atomic: support ARCH_ATOMIC Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 10/33] locking/atomic: atomic64: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 11/33] locking/atomic: cmpxchg: make `generic` a prefix Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 12/33] locking/atomic: cmpxchg: support ARCH_ATOMIC Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 13/33] locking/atomic: alpha: move to ARCH_ATOMIC Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 14/33] locking/atomic: arc: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 15/33] locking/atomic: arm: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 16/33] locking/atomic: csky: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 17/33] locking/atomic: h8300: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 18/33] locking/atomic: hexagon: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 19/33] locking/atomic: ia64: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 20/33] locking/atomic: m68k: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 21/33] locking/atomic: microblaze: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-31 12:31   ` [PATCH v2 21/33] " Michal Simek
2021-05-25 14:02 ` Mark Rutland [this message]
2021-05-26 11:24   ` [tip: locking/core] locking/atomic: mips: " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 23/33] locking/atomic: nds32: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 24/33] locking/atomic: nios2: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 25/33] locking/atomic: openrisc: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 26/33] locking/atomic: parisc: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 27/33] locking/atomic: powerpc: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 28/33] locking/atomic: riscv: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 29/33] locking/atomic: sh: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 30/33] locking/atomic: sparc: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 31/33] locking/atomic: xtensa: " Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 32/33] locking/atomic: delete !ARCH_ATOMIC remnants Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-25 14:02 ` [PATCH v2 33/33] locking/atomics: atomic-instrumented: simplify ifdeffery Mark Rutland
2021-05-26 11:24   ` [tip: locking/core] " tip-bot2 for Mark Rutland
2021-05-26 11:30 ` [PATCH v2 00/33] locking/atomic: convert all architectures to ARCH_ATOMIC Peter Zijlstra
2021-05-26 12:29   ` Junio C Hamano
2021-05-26 14:19     ` Peter Zijlstra
2021-06-05  5:56 ` Randy Dunlap
2021-06-18  8:48   ` Mark Rutland
2021-06-27 21:47     ` Randy Dunlap
2021-06-28 21:22       ` Randy Dunlap
2021-06-28 22:13         ` Peter Zijlstra
2021-06-28 22:21           ` Vineet Gupta
2021-06-28 22:21             ` Vineet Gupta
2021-06-28 22:21           ` Randy Dunlap
2021-06-29  7:36             ` Arnd Bergmann
2021-06-30  7:55               ` Peter Zijlstra
2021-06-30 12:24                 ` Arnd Bergmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210525140232.53872-23-mark.rutland@arm.com \
    --to=mark.rutland@arm.com \
    --cc=James.Bottomley@HansenPartnership.com \
    --cc=aou@eecs.berkeley.edu \
    --cc=arnd@arndb.de \
    --cc=bcain@codeaurora.org \
    --cc=benh@kernel.crashing.org \
    --cc=boqun.feng@gmail.com \
    --cc=chris@zankel.net \
    --cc=dalias@libc.org \
    --cc=davem@davemloft.net \
    --cc=deanbo422@gmail.com \
    --cc=deller@gmx.de \
    --cc=geert@linux-m68k.org \
    --cc=gerg@linux-m68k.org \
    --cc=green.hu@gmail.com \
    --cc=guoren@kernel.org \
    --cc=ink@jurassic.park.msu.ru \
    --cc=jcmvbkbc@gmail.com \
    --cc=jonas@southpole.se \
    --cc=ley.foon.tan@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=mattst88@gmail.com \
    --cc=monstr@monstr.eu \
    --cc=mpe@ellerman.id.au \
    --cc=nickhu@andestech.com \
    --cc=palmerdabbelt@google.com \
    --cc=paul.walmsley@sifive.com \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=rth@twiddle.net \
    --cc=shorne@gmail.com \
    --cc=stefan.kristiansson@saunalahti.fi \
    --cc=tsbogend@alpha.franken.de \
    --cc=vgupta@synopsys.com \
    --cc=will@kernel.org \
    --cc=ysato@users.sourceforge.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.