From: Paul Burton <paul.burton@mips.com>
To: "linux-mips@vger.kernel.org" <linux-mips@vger.kernel.org>
Cc: Huacai Chen <chenhc@lemote.com>,
Jiaxun Yang <jiaxun.yang@flygoat.com>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
Paul Burton <pburton@wavecomp.com>
Subject: [PATCH 11/37] MIPS: atomic: Use one macro to generate 32b & 64b functions
Date: Mon, 30 Sep 2019 23:08:23 +0000 [thread overview]
Message-ID: <20190930230806.2940505-12-paul.burton@mips.com> (raw)
In-Reply-To: <20190930230806.2940505-1-paul.burton@mips.com>
Cut down on duplication by generalizing the ATOMIC_OP(),
ATOMIC_OP_RETURN() & ATOMIC_FETCH_OP() macros to work for both 32b &
64b atomics, and removing the ATOMIC64_ variants. This ensures
consistency between our atomic_* & atomic64_* functions.
Signed-off-by: Paul Burton <paul.burton@mips.com>
---
arch/mips/include/asm/atomic.h | 196 ++++++++-------------------------
1 file changed, 45 insertions(+), 151 deletions(-)
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index ace2ea005588..b834af5a7382 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -42,10 +42,10 @@
*/
#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
-#define ATOMIC_OP(op, c_op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
+#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ void pfx##_##op(type i, pfx##_t * v) \
{ \
- int temp; \
+ type temp; \
\
if (!kernel_uses_llsc) { \
unsigned long flags; \
@@ -60,19 +60,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
__asm__ __volatile__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
- "1: ll %0, %1 # atomic_" #op " \n" \
+ "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
" " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
+ " " #sc " %0, %1 \n" \
"\t" __SC_BEQZ "%0, 1b \n" \
" .set pop \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i) : __LLSC_CLOBBER); \
}
-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
+#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
{ \
- int temp, result; \
+ type temp, result; \
\
if (!kernel_uses_llsc) { \
unsigned long flags; \
@@ -89,9 +89,9 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
__asm__ __volatile__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
- "1: ll %1, %2 # atomic_" #op "_return \n" \
+ "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
+ " " #sc " %0, %2 \n" \
"\t" __SC_BEQZ "%0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set pop \n" \
@@ -102,8 +102,8 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
return result; \
}
-#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
{ \
int temp, result; \
\
@@ -120,10 +120,10 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %1, %2 # atomic_fetch_" #op " \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
+ " " #sc " %0, %2 \n" \
"\t" __SC_BEQZ "%0, 1b \n" \
" .set pop \n" \
" move %0, %1 \n" \
@@ -134,32 +134,50 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
return result; \
}
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
-ATOMIC_OPS(add, +=, addu)
-ATOMIC_OPS(sub, -=, subu)
+ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
+ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
+ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
+# define atomic64_add_return_relaxed atomic64_add_return_relaxed
+# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#endif /* CONFIG_64BIT */
+
#undef ATOMIC_OPS
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
-ATOMIC_OPS(and, &=, and)
-ATOMIC_OPS(or, |=, or)
-ATOMIC_OPS(xor, ^=, xor)
+ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
+ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
+ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
+ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
+ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
+# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#endif
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -243,130 +261,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
*/
#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
-#define ATOMIC64_OP(op, c_op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
-{ \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %0, %1 # atomic64_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- "\t" __SC_BEQZ "%0, 1b \n" \
- " .set pop \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
-}
-
-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
-{ \
- s64 result; \
- \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- "\t" __SC_BEQZ "%0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- result c_op i; \
- v->counter = result; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
-}
-
-#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
-{ \
- s64 result; \
- \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- "\t" __SC_BEQZ "%0, 1b \n" \
- " move %0, %1 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
-}
-
-#define ATOMIC64_OPS(op, c_op, asm_op) \
- ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_OP_RETURN(op, c_op, asm_op) \
- ATOMIC64_FETCH_OP(op, c_op, asm_op)
-
-ATOMIC64_OPS(add, +=, daddu)
-ATOMIC64_OPS(sub, -=, dsubu)
-
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#undef ATOMIC64_OPS
-#define ATOMIC64_OPS(op, c_op, asm_op) \
- ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_FETCH_OP(op, c_op, asm_op)
-
-ATOMIC64_OPS(and, &=, and)
-ATOMIC64_OPS(or, |=, or)
-ATOMIC64_OPS(xor, ^=, xor)
-
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
/*
* atomic64_sub_if_positive - conditionally subtract integer from atomic
* variable
--
2.23.0
next prev parent reply other threads:[~2019-09-30 23:08 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-30 23:08 [PATCH 00/37] MIPS: barriers & atomics cleanups Paul Burton
2019-09-30 23:08 ` [PATCH 01/37] MIPS: Unify sc beqz definition Paul Burton
2019-09-30 23:08 ` [PATCH 02/37] MIPS: Use compact branch for LL/SC loops on MIPSr6+ Paul Burton
2019-09-30 23:08 ` [PATCH 03/37] MIPS: barrier: Add __SYNC() infrastructure Paul Burton
2019-09-30 23:08 ` [PATCH 04/37] MIPS: barrier: Clean up rmb() & wmb() definitions Paul Burton
2019-09-30 23:08 ` [PATCH 05/37] MIPS: barrier: Clean up __smp_mb() definition Paul Burton
2019-09-30 23:08 ` [PATCH 06/37] MIPS: barrier: Remove fast_mb() Octeon #ifdef'ery Paul Burton
2019-09-30 23:08 ` [PATCH 07/37] MIPS: barrier: Clean up __sync() definition Paul Burton
2019-09-30 23:08 ` [PATCH 08/37] MIPS: barrier: Clean up sync_ginv() Paul Burton
2019-09-30 23:08 ` [PATCH 09/37] MIPS: atomic: Fix whitespace in ATOMIC_OP macros Paul Burton
2019-09-30 23:08 ` Paul Burton [this message]
2019-09-30 23:08 ` [PATCH 10/37] MIPS: atomic: Handle !kernel_uses_llsc first Paul Burton
2019-09-30 23:08 ` [PATCH 12/37] MIPS: atomic: Emit Loongson3 sync workarounds within asm Paul Burton
2019-09-30 23:08 ` [PATCH 13/37] MIPS: atomic: Use _atomic barriers in atomic_sub_if_positive() Paul Burton
2019-09-30 23:08 ` [PATCH 14/37] MIPS: atomic: Unify 32b & 64b sub_if_positive Paul Burton
2019-09-30 23:08 ` [PATCH 15/37] MIPS: atomic: Deduplicate 32b & 64b read, set, xchg, cmpxchg Paul Burton
2019-09-30 23:08 ` [PATCH 17/37] MIPS: bitops: Handle !kernel_uses_llsc first Paul Burton
2019-09-30 23:08 ` [PATCH 16/37] MIPS: bitops: Use generic builtin ffs/fls; drop cpu_has_clo_clz Paul Burton
2019-09-30 23:08 ` [PATCH 18/37] MIPS: bitops: Only use ins for bit 16 or higher Paul Burton
2019-09-30 23:08 ` [PATCH 19/37] MIPS: bitops: Use MIPS_ISA_REV, not #ifdefs Paul Burton
2019-09-30 23:08 ` [PATCH 20/37] MIPS: bitops: ins start position is always an immediate Paul Burton
2019-09-30 23:08 ` [PATCH 21/37] MIPS: bitops: Implement test_and_set_bit() in terms of _lock variant Paul Burton
2019-09-30 23:08 ` [PATCH 22/37] MIPS: bitops: Allow immediates in test_and_{set,clear,change}_bit Paul Burton
2019-09-30 23:08 ` [PATCH 23/37] MIPS: bitops: Use the BIT() macro Paul Burton
2019-09-30 23:08 ` [PATCH 24/37] MIPS: bitops: Avoid redundant zero-comparison for non-LLSC Paul Burton
2019-09-30 23:08 ` [PATCH 25/37] MIPS: bitops: Abstract LL/SC loops Paul Burton
2019-09-30 23:08 ` [PATCH 26/37] MIPS: bitops: Use BIT_WORD() & BITS_PER_LONG Paul Burton
2019-09-30 23:08 ` [PATCH 27/37] MIPS: bitops: Emit Loongson3 sync workarounds within asm Paul Burton
2019-09-30 23:08 ` [PATCH 28/37] MIPS: bitops: Use smp_mb__before_atomic in test_* ops Paul Burton
2019-09-30 23:08 ` [PATCH 29/37] MIPS: cmpxchg: Emit Loongson3 sync workarounds within asm Paul Burton
2019-09-30 23:08 ` [PATCH 30/37] MIPS: cmpxchg: Omit redundant barriers for Loongson3 Paul Burton
2019-09-30 23:08 ` [PATCH 31/37] MIPS: futex: Emit Loongson3 sync workarounds within asm Paul Burton
2019-09-30 23:08 ` [PATCH 32/37] MIPS: syscall: " Paul Burton
2019-09-30 23:08 ` [PATCH 33/37] MIPS: barrier: Remove loongson_llsc_mb() Paul Burton
2019-09-30 23:08 ` [PATCH 34/37] MIPS: barrier: Make __smp_mb__before_atomic() a no-op for Loongson3 Paul Burton
2019-09-30 23:08 ` [PATCH 35/37] MIPS: genex: Add Loongson3 LL/SC workaround to ejtag_debug_handler Paul Burton
2019-09-30 23:08 ` [PATCH 36/37] MIPS: genex: Don't reload address unnecessarily Paul Burton
2019-09-30 23:08 ` [PATCH 37/37] MIPS: Check Loongson3 LL/SC errata workaround correctness Paul Burton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190930230806.2940505-12-paul.burton@mips.com \
--to=paul.burton@mips.com \
--cc=chenhc@lemote.com \
--cc=jiaxun.yang@flygoat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=pburton@wavecomp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).