* [PATCH 0/3] powerpc: convert to use ARCH_ATOMIC
@ 2020-11-11 11:07 ` Nicholas Piggin
0 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-11 11:07 UTC (permalink / raw)
To: linuxppc-dev
Cc: Nicholas Piggin, Will Deacon, Peter Zijlstra, Boqun Feng,
Michael Ellerman, Arnd Bergmann, Christophe Leroy,
Alexey Kardashevskiy, linux-kernel, linux-arch
This conversion seems to require generic atomic64 changes, looks
like nothing else uses ARCH_ATOMIC and GENERIC_ATOMIC64 yet.
Thanks,
Nick
Nicholas Piggin (3):
asm-generic/atomic64: Add support for ARCH_ATOMIC
powerpc/64s/iommu: don't use atomic_ function on atomic64_t type
powerpc: rewrite atomics to use ARCH_ATOMIC
arch/powerpc/include/asm/atomic.h | 681 ++++++++++-----------------
arch/powerpc/include/asm/cmpxchg.h | 62 +--
arch/powerpc/mm/book3s64/iommu_api.c | 2 +-
include/asm-generic/atomic64.h | 70 ++-
lib/atomic64.c | 36 +-
5 files changed, 324 insertions(+), 527 deletions(-)
--
2.23.0
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH 0/3] powerpc: convert to use ARCH_ATOMIC
@ 2020-11-11 11:07 ` Nicholas Piggin
0 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-11 11:07 UTC (permalink / raw)
To: linuxppc-dev
Cc: Christophe Leroy, linux-arch, Arnd Bergmann, Peter Zijlstra,
Boqun Feng, linux-kernel, Nicholas Piggin, Alexey Kardashevskiy,
Will Deacon
This conversion seems to require generic atomic64 changes, looks
like nothing else uses ARCH_ATOMIC and GENERIC_ATOMIC64 yet.
Thanks,
Nick
Nicholas Piggin (3):
asm-generic/atomic64: Add support for ARCH_ATOMIC
powerpc/64s/iommu: don't use atomic_ function on atomic64_t type
powerpc: rewrite atomics to use ARCH_ATOMIC
arch/powerpc/include/asm/atomic.h | 681 ++++++++++-----------------
arch/powerpc/include/asm/cmpxchg.h | 62 +--
arch/powerpc/mm/book3s64/iommu_api.c | 2 +-
include/asm-generic/atomic64.h | 70 ++-
lib/atomic64.c | 36 +-
5 files changed, 324 insertions(+), 527 deletions(-)
--
2.23.0
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC
2020-11-11 11:07 ` Nicholas Piggin
@ 2020-11-11 11:07 ` Nicholas Piggin
-1 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-11 11:07 UTC (permalink / raw)
To: linuxppc-dev
Cc: Nicholas Piggin, Will Deacon, Peter Zijlstra, Boqun Feng,
Michael Ellerman, Arnd Bergmann, Christophe Leroy,
Alexey Kardashevskiy, linux-kernel, linux-arch
This passes atomic64 selftest on ppc32 on qemu (uniprocessor only)
both before and after powerpc is converted to use ARCH_ATOMIC.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
include/asm-generic/atomic64.h | 70 +++++++++++++++++++++++++++-------
lib/atomic64.c | 36 ++++++++---------
2 files changed, 75 insertions(+), 31 deletions(-)
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 370f01d4450f..2b1ecb591bb9 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-extern s64 atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, s64 i);
-
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+extern s64 __atomic64_read(const atomic64_t *v);
+extern void __atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void atomic64_##op(s64 a, atomic64_t *v);
+extern void __atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 __atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 __atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +44,57 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-extern s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
-extern s64 atomic64_xchg(atomic64_t *v, s64 new);
-extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+extern s64 __atomic64_dec_if_positive(atomic64_t *v);
+extern s64 __atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 __atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 __atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#ifdef ARCH_ATOMIC
+#define arch_atomic64_read __atomic64_read
+#define arch_atomic64_set __atomic64_set
+#define arch_atomic64_add __atomic64_add
+#define arch_atomic64_add_return __atomic64_add_return
+#define arch_atomic64_fetch_add __atomic64_fetch_add
+#define arch_atomic64_sub __atomic64_sub
+#define arch_atomic64_sub_return __atomic64_sub_return
+#define arch_atomic64_fetch_sub __atomic64_fetch_sub
+#define arch_atomic64_and __atomic64_and
+#define arch_atomic64_and_return __atomic64_and_return
+#define arch_atomic64_fetch_and __atomic64_fetch_and
+#define arch_atomic64_or __atomic64_or
+#define arch_atomic64_or_return __atomic64_or_return
+#define arch_atomic64_fetch_or __atomic64_fetch_or
+#define arch_atomic64_xor __atomic64_xor
+#define arch_atomic64_xor_return __atomic64_xor_return
+#define arch_atomic64_fetch_xor __atomic64_fetch_xor
+#define arch_atomic64_xchg __atomic64_xchg
+#define arch_atomic64_cmpxchg __atomic64_cmpxchg
+#define arch_atomic64_set_release(v, i) __atomic64_set((v), (i))
+#define arch_atomic64_dec_if_positive __atomic64_dec_if_positive
+#define arch_atomic64_fetch_add_unless __atomic64_fetch_add_unless
+#else
+#define atomic64_read __atomic64_read
+#define atomic64_set __atomic64_set
+#define atomic64_add __atomic64_add
+#define atomic64_add_return __atomic64_add_return
+#define atomic64_fetch_add __atomic64_fetch_add
+#define atomic64_sub __atomic64_sub
+#define atomic64_sub_return __atomic64_sub_return
+#define atomic64_fetch_sub __atomic64_fetch_sub
+#define atomic64_and __atomic64_and
+#define atomic64_and_return __atomic64_and_return
+#define atomic64_fetch_and __atomic64_fetch_and
+#define atomic64_or __atomic64_or
+#define atomic64_or_return __atomic64_or_return
+#define atomic64_fetch_or __atomic64_fetch_or
+#define atomic64_xor __atomic64_xor
+#define atomic64_xor_return __atomic64_xor_return
+#define atomic64_fetch_xor __atomic64_fetch_xor
+#define atomic64_xchg __atomic64_xchg
+#define atomic64_cmpxchg __atomic64_cmpxchg
+#define atomic64_set_release(v, i) __atomic64_set((v), (i))
+#define atomic64_dec_if_positive __atomic64_dec_if_positive
+#define atomic64_fetch_add_unless __atomic64_fetch_add_unless
+#endif
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e98c85a99787..05aba5e3268f 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-s64 atomic64_read(const atomic64_t *v)
+s64 __atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_read);
+EXPORT_SYMBOL(__atomic64_read);
-void atomic64_set(atomic64_t *v, s64 i)
+void __atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(atomic64_set);
+EXPORT_SYMBOL(__atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(s64 a, atomic64_t *v) \
+void __atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
-EXPORT_SYMBOL(atomic64_##op);
+EXPORT_SYMBOL(__atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+s64 __atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_##op##_return);
+EXPORT_SYMBOL(__atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+s64 __atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+EXPORT_SYMBOL(__atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
@@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-s64 atomic64_dec_if_positive(atomic64_t *v)
+s64 __atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
+EXPORT_SYMBOL(__atomic64_dec_if_positive);
-s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+s64 __atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_cmpxchg);
+EXPORT_SYMBOL(__atomic64_cmpxchg);
-s64 atomic64_xchg(atomic64_t *v, s64 new)
+s64 __atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_xchg);
+EXPORT_SYMBOL(__atomic64_xchg);
-s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+s64 __atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return val;
}
-EXPORT_SYMBOL(atomic64_fetch_add_unless);
+EXPORT_SYMBOL(__atomic64_fetch_add_unless);
--
2.23.0
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC
@ 2020-11-11 11:07 ` Nicholas Piggin
0 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-11 11:07 UTC (permalink / raw)
To: linuxppc-dev
Cc: Christophe Leroy, linux-arch, Arnd Bergmann, Peter Zijlstra,
Boqun Feng, linux-kernel, Nicholas Piggin, Alexey Kardashevskiy,
Will Deacon
This passes atomic64 selftest on ppc32 on qemu (uniprocessor only)
both before and after powerpc is converted to use ARCH_ATOMIC.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
include/asm-generic/atomic64.h | 70 +++++++++++++++++++++++++++-------
lib/atomic64.c | 36 ++++++++---------
2 files changed, 75 insertions(+), 31 deletions(-)
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 370f01d4450f..2b1ecb591bb9 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-extern s64 atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, s64 i);
-
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+extern s64 __atomic64_read(const atomic64_t *v);
+extern void __atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void atomic64_##op(s64 a, atomic64_t *v);
+extern void __atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 __atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 __atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +44,57 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-extern s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
-extern s64 atomic64_xchg(atomic64_t *v, s64 new);
-extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+extern s64 __atomic64_dec_if_positive(atomic64_t *v);
+extern s64 __atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 __atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 __atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#ifdef ARCH_ATOMIC
+#define arch_atomic64_read __atomic64_read
+#define arch_atomic64_set __atomic64_set
+#define arch_atomic64_add __atomic64_add
+#define arch_atomic64_add_return __atomic64_add_return
+#define arch_atomic64_fetch_add __atomic64_fetch_add
+#define arch_atomic64_sub __atomic64_sub
+#define arch_atomic64_sub_return __atomic64_sub_return
+#define arch_atomic64_fetch_sub __atomic64_fetch_sub
+#define arch_atomic64_and __atomic64_and
+#define arch_atomic64_and_return __atomic64_and_return
+#define arch_atomic64_fetch_and __atomic64_fetch_and
+#define arch_atomic64_or __atomic64_or
+#define arch_atomic64_or_return __atomic64_or_return
+#define arch_atomic64_fetch_or __atomic64_fetch_or
+#define arch_atomic64_xor __atomic64_xor
+#define arch_atomic64_xor_return __atomic64_xor_return
+#define arch_atomic64_fetch_xor __atomic64_fetch_xor
+#define arch_atomic64_xchg __atomic64_xchg
+#define arch_atomic64_cmpxchg __atomic64_cmpxchg
+#define arch_atomic64_set_release(v, i) __atomic64_set((v), (i))
+#define arch_atomic64_dec_if_positive __atomic64_dec_if_positive
+#define arch_atomic64_fetch_add_unless __atomic64_fetch_add_unless
+#else
+#define atomic64_read __atomic64_read
+#define atomic64_set __atomic64_set
+#define atomic64_add __atomic64_add
+#define atomic64_add_return __atomic64_add_return
+#define atomic64_fetch_add __atomic64_fetch_add
+#define atomic64_sub __atomic64_sub
+#define atomic64_sub_return __atomic64_sub_return
+#define atomic64_fetch_sub __atomic64_fetch_sub
+#define atomic64_and __atomic64_and
+#define atomic64_and_return __atomic64_and_return
+#define atomic64_fetch_and __atomic64_fetch_and
+#define atomic64_or __atomic64_or
+#define atomic64_or_return __atomic64_or_return
+#define atomic64_fetch_or __atomic64_fetch_or
+#define atomic64_xor __atomic64_xor
+#define atomic64_xor_return __atomic64_xor_return
+#define atomic64_fetch_xor __atomic64_fetch_xor
+#define atomic64_xchg __atomic64_xchg
+#define atomic64_cmpxchg __atomic64_cmpxchg
+#define atomic64_set_release(v, i) __atomic64_set((v), (i))
+#define atomic64_dec_if_positive __atomic64_dec_if_positive
+#define atomic64_fetch_add_unless __atomic64_fetch_add_unless
+#endif
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e98c85a99787..05aba5e3268f 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-s64 atomic64_read(const atomic64_t *v)
+s64 __atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_read);
+EXPORT_SYMBOL(__atomic64_read);
-void atomic64_set(atomic64_t *v, s64 i)
+void __atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(atomic64_set);
+EXPORT_SYMBOL(__atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(s64 a, atomic64_t *v) \
+void __atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
-EXPORT_SYMBOL(atomic64_##op);
+EXPORT_SYMBOL(__atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+s64 __atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_##op##_return);
+EXPORT_SYMBOL(__atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+s64 __atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+EXPORT_SYMBOL(__atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
@@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-s64 atomic64_dec_if_positive(atomic64_t *v)
+s64 __atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
+EXPORT_SYMBOL(__atomic64_dec_if_positive);
-s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+s64 __atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_cmpxchg);
+EXPORT_SYMBOL(__atomic64_cmpxchg);
-s64 atomic64_xchg(atomic64_t *v, s64 new)
+s64 __atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_xchg);
+EXPORT_SYMBOL(__atomic64_xchg);
-s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+s64 __atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return val;
}
-EXPORT_SYMBOL(atomic64_fetch_add_unless);
+EXPORT_SYMBOL(__atomic64_fetch_add_unless);
--
2.23.0
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 2/3] powerpc/64s/iommu: don't use atomic_ function on atomic64_t type
2020-11-11 11:07 ` Nicholas Piggin
@ 2020-11-11 11:07 ` Nicholas Piggin
-1 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-11 11:07 UTC (permalink / raw)
To: linuxppc-dev
Cc: Nicholas Piggin, Will Deacon, Peter Zijlstra, Boqun Feng,
Michael Ellerman, Arnd Bergmann, Christophe Leroy,
Alexey Kardashevskiy, linux-kernel, linux-arch
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/mm/book3s64/iommu_api.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index 563faa10bb66..685d7bb3d26f 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -263,7 +263,7 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
goto unlock_exit;
/* Are there still mappings? */
- if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
+ if (atomic64_cmpxchg(&mem->mapped, 1, 0) != 1) {
++mem->used;
ret = -EBUSY;
goto unlock_exit;
--
2.23.0
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 2/3] powerpc/64s/iommu: don't use atomic_ function on atomic64_t type
@ 2020-11-11 11:07 ` Nicholas Piggin
0 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-11 11:07 UTC (permalink / raw)
To: linuxppc-dev
Cc: Christophe Leroy, linux-arch, Arnd Bergmann, Peter Zijlstra,
Boqun Feng, linux-kernel, Nicholas Piggin, Alexey Kardashevskiy,
Will Deacon
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/mm/book3s64/iommu_api.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index 563faa10bb66..685d7bb3d26f 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -263,7 +263,7 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
goto unlock_exit;
/* Are there still mappings? */
- if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
+ if (atomic64_cmpxchg(&mem->mapped, 1, 0) != 1) {
++mem->used;
ret = -EBUSY;
goto unlock_exit;
--
2.23.0
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
2020-11-11 11:07 ` Nicholas Piggin
@ 2020-11-11 11:07 ` Nicholas Piggin
-1 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-11 11:07 UTC (permalink / raw)
To: linuxppc-dev
Cc: Nicholas Piggin, Will Deacon, Peter Zijlstra, Boqun Feng,
Michael Ellerman, Arnd Bergmann, Christophe Leroy,
Alexey Kardashevskiy, linux-kernel, linux-arch
All the cool kids are doing it.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/include/asm/atomic.h | 681 ++++++++++-------------------
arch/powerpc/include/asm/cmpxchg.h | 62 +--
2 files changed, 248 insertions(+), 495 deletions(-)
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 8a55eb8cc97b..899aa2403ba7 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -11,185 +11,285 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
+#define ARCH_ATOMIC
+
+#ifndef CONFIG_64BIT
+#include <asm-generic/atomic64.h>
+#endif
+
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
* on the platform without lwsync.
*/
#define __atomic_acquire_fence() \
- __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+ asm volatile(PPC_ACQUIRE_BARRIER "" : : : "memory")
#define __atomic_release_fence() \
- __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
+ asm volatile(PPC_RELEASE_BARRIER "" : : : "memory")
-static __inline__ int atomic_read(const atomic_t *v)
-{
- int t;
+#define __atomic_pre_full_fence smp_mb
- __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+#define __atomic_post_full_fence smp_mb
- return t;
+#define arch_atomic_read(v) __READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
+#ifdef CONFIG_64BIT
+#define ATOMIC64_INIT(i) { (i) }
+#define arch_atomic64_read(v) __READ_ONCE((v)->counter)
+#define arch_atomic64_set(v, i) __WRITE_ONCE(((v)->counter), (i))
+#endif
+
+#define ATOMIC_OP(name, type, dtype, width, asm_op) \
+static inline void arch_##name(dtype a, type *v) \
+{ \
+ dtype t; \
+ \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+"\t" #asm_op " %0,%2,%0 \n" \
+" st" #width "cx. %0,0,%3 \n" \
+" bne- 1b \n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cr0", "xer"); \
}
-static __inline__ void atomic_set(atomic_t *v, int i)
-{
- __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+#define ATOMIC_OP_IMM(name, type, dtype, width, asm_op, imm) \
+static inline void arch_##name(type *v) \
+{ \
+ dtype t; \
+ \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+"\t" #asm_op " %0,%0,%2 \n" \
+" st" #width "cx. %0,0,%3 \n" \
+" bne- 1b \n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "i" (imm), "r" (&v->counter) \
+ : "cr0", "xer"); \
}
-#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int a, atomic_t *v) \
+#define ATOMIC_OP_RETURN_RELAXED(name, type, dtype, width, asm_op) \
+static inline dtype arch_##name##_relaxed(dtype a, type *v) \
{ \
- int t; \
+ dtype t; \
\
- __asm__ __volatile__( \
-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
- #asm_op " %0,%2,%0\n" \
-" stwcx. %0,0,%3 \n" \
-" bne- 1b\n" \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+"\t" #asm_op " %0,%2,%0 \n" \
+" st" #width "cx. %0,0,%3 \n" \
+" bne- 1b \n" \
: "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
- : "cc"); \
-} \
+ : "cr0", "xer"); \
+ \
+ return t; \
+}
-#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+#define ATOMIC_OP_IMM_RETURN_RELAXED(name, type, dtype, width, asm_op, imm) \
+static inline dtype arch_##name##_relaxed(type *v) \
{ \
- int t; \
+ dtype t; \
\
- __asm__ __volatile__( \
-"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
- #asm_op " %0,%2,%0\n" \
-" stwcx. %0,0,%3\n" \
-" bne- 1b\n" \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+"\t" #asm_op " %0,%0,%2 \n" \
+" st" #width "cx. %0,0,%3 \n" \
+" bne- 1b \n" \
: "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "i" (imm), "r" (&v->counter) \
+ : "cr0", "xer"); \
\
return t; \
}
-#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+#define ATOMIC_FETCH_OP_RELAXED(name, type, dtype, width, asm_op) \
+static inline dtype arch_##name##_relaxed(dtype a, type *v) \
{ \
- int res, t; \
+ dtype res, t; \
\
- __asm__ __volatile__( \
-"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
- #asm_op " %1,%3,%0\n" \
-" stwcx. %1,0,%4\n" \
-" bne- 1b\n" \
+ asm volatile( \
+"1: l" #width "arx %0,0,%4 # " #name "\n" \
+"\t" #asm_op " %1,%3,%0 \n" \
+" st" #width "cx. %1,0,%4 \n" \
+" bne- 1b \n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "cr0", "xer"); \
\
return res; \
}
+#define ATOMIC_FETCH_OP_UNLESS_RELAXED(name, type, dtype, width, asm_op) \
+static inline int arch_##name##_relaxed(type *v, dtype a, dtype u) \
+{ \
+ dtype res, t; \
+ \
+ asm volatile( \
+"1: l" #width "arx %0,0,%5 # " #name "\n" \
+" cmp" #width " 0,%0,%3 \n" \
+" beq- 2f \n" \
+"\t" #asm_op " %1,%2,%0 \n" \
+" st" #width "cx. %1,0,%5 \n" \
+" bne- 1b \n" \
+"2: \n" \
+ : "=&r" (res), "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (u), "r" (&v->counter) \
+ : "cr0", "xer"); \
+ \
+ return res; \
+}
+
+#define ATOMIC_INC_NOT_ZERO_RELAXED(name, type, dtype, width) \
+static inline dtype arch_##name##_relaxed(type *v) \
+{ \
+ dtype t1, t2; \
+ \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+" cmp" #width "i 0,%0,0 \n" \
+" beq- 2f \n" \
+" addic %1,%2,1 \n" \
+" st" #width "cx. %1,0,%3 \n" \
+" bne- 1b \n" \
+"2: \n" \
+ : "=&r" (t1), "=&r" (t2), "+m" (v->counter) \
+ : "r" (&v->counter) \
+ : "cr0", "xer"); \
+ \
+ return t1; \
+}
+
+#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
- ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+ATOMIC_OP(atomic_##op, atomic_t, int, w, asm_op) \
+ATOMIC_OP_RETURN_RELAXED(atomic_##op##_return, atomic_t, int, w, asm_op) \
+ATOMIC_FETCH_OP_RELAXED(atomic_fetch_##op, atomic_t, int, w, asm_op) \
+ATOMIC_FETCH_OP_UNLESS_RELAXED(atomic_fetch_##op##_unless, atomic_t, int, w, asm_op)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op) \
+ATOMIC_OP(atomic64_##op, atomic64_t, u64, d, asm_op) \
+ATOMIC_OP_RETURN_RELAXED(atomic64_##op##_return, atomic64_t, u64, d, asm_op) \
+ATOMIC_FETCH_OP_RELAXED(atomic64_fetch_##op, atomic64_t, u64, d, asm_op) \
+ATOMIC_FETCH_OP_UNLESS_RELAXED(atomic64_fetch_##op##_unless, atomic64_t, u64, d, asm_op)
ATOMIC_OPS(add, add)
+#define arch_atomic_add arch_atomic_add
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_unless_relaxed arch_atomic_fetch_add_unless_relaxed
+
ATOMIC_OPS(sub, subf)
+#define arch_atomic_sub arch_atomic_sub
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+/* skip atomic_fetch_sub_unless_relaxed */
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC64_OPS(add, add)
+#define arch_atomic64_add arch_atomic64_add
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_unless_relaxed arch_atomic64_fetch_add_unless_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+ATOMIC64_OPS(sub, subf)
+#define arch_atomic64_sub arch_atomic64_sub
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+/* skip atomic64_fetch_sub_unless_relaxed */
+#endif
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+ATOMIC_OP(atomic_##op, atomic_t, int, w, asm_op) \
+ATOMIC_FETCH_OP_RELAXED(atomic_fetch_##op, atomic_t, int, w, asm_op)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op) \
+ATOMIC_OP(atomic64_##op, atomic64_t, u64, d, asm_op) \
+ATOMIC_FETCH_OP_RELAXED(atomic64_fetch_##op, atomic64_t, u64, d, asm_op)
ATOMIC_OPS(and, and)
+#define arch_atomic_and arch_atomic_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+
ATOMIC_OPS(or, or)
+#define arch_atomic_or arch_atomic_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+
ATOMIC_OPS(xor, xor)
+#define arch_atomic_xor arch_atomic_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#ifdef CONFIG_64BIT
+ATOMIC64_OPS(and, and)
+#define arch_atomic64_and arch_atomic64_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+ATOMIC64_OPS(or, or)
+#define arch_atomic64_or arch_atomic64_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+
+ATOMIC64_OPS(xor, xor)
+#define arch_atomic64_xor arch_atomic64_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#endif
#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op, imm) \
+ATOMIC_OP_IMM(atomic_##op, atomic_t, int, w, asm_op, imm) \
+ATOMIC_OP_IMM_RETURN_RELAXED(atomic_##op##_return, atomic_t, int, w, asm_op, imm)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op, imm) \
+ATOMIC_OP_IMM(atomic64_##op, atomic64_t, u64, d, asm_op, imm) \
+ATOMIC_OP_IMM_RETURN_RELAXED(atomic64_##op##_return, atomic64_t, u64, d, asm_op, imm)
+
+ATOMIC_OPS(inc, addic, 1)
+#define arch_atomic_inc arch_atomic_inc
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+
+ATOMIC_OPS(dec, addic, -1)
+#define arch_atomic_dec arch_atomic_dec
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+
+#ifdef CONFIG_64BIT
+ATOMIC64_OPS(inc, addic, 1)
+#define arch_atomic64_inc arch_atomic64_inc
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+
+ATOMIC64_OPS(dec, addic, -1)
+#define arch_atomic64_dec arch_atomic64_dec
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+ATOMIC_INC_NOT_ZERO_RELAXED(atomic_inc_not_zero, atomic_t, int, w)
+#define arch_atomic_inc_not_zero_relaxed(v) arch_atomic_inc_not_zero_relaxed(v)
+
+#ifdef CONFIG_64BIT
+ATOMIC_INC_NOT_ZERO_RELAXED(atomic64_inc_not_zero, atomic64_t, u64, d)
+#define arch_atomic64_inc_not_zero_relaxed(v) arch_atomic64_inc_not_zero_relaxed(v)
+#endif
+
+#undef ATOMIC_INC_NOT_ZERO_RELAXED
+#undef ATOMIC_FETCH_OP_UNLESS_RELAXED
#undef ATOMIC_FETCH_OP_RELAXED
+#undef ATOMIC_OP_IMM_RETURN_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
+#undef ATOMIC_OP_IMM
#undef ATOMIC_OP
+#undef ATOMIC_OPS
+#undef ATOMIC64_OPS
-static __inline__ void atomic_inc(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc\n\
- addic %0,%0,1\n"
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic_inc atomic_inc
-
-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
-" addic %0,%0,1\n"
-" stwcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-static __inline__ void atomic_dec(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec\n\
- addic %0,%0,-1\n"
-" stwcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic_dec atomic_dec
-
-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
-" addic %0,%0,-1\n"
-" stwcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg_relaxed(v, o, n) arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic_xchg_relaxed(v, new) arch_xchg_relaxed(&((v)->counter), (new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#ifdef CONFIG_64BIT
+#define arch_atomic64_cmpxchg_relaxed(v, o, n) arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic64_xchg_relaxed(v, new) arch_xchg_relaxed(&((v)->counter), (new))
+#endif
/*
* Don't want to override the generic atomic_try_cmpxchg_acquire, because
@@ -203,7 +303,7 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
int r, o = *old;
__asm__ __volatile__ (
-"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
+"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_lock \n"
" cmpw 0,%0,%3 \n"
" bne- 2f \n"
" stwcx. %4,0,%2 \n"
@@ -219,270 +319,41 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
return likely(r == o);
}
-/**
- * atomic_fetch_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int t;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
- cmpw 0,%0,%3 \n\
- beq 2f \n\
- add %0,%2,%0 \n"
-" stwcx. %0,0,%1 \n\
- bne- 1b \n"
- PPC_ATOMIC_EXIT_BARRIER
-" subf %0,%2,%0 \n\
-2:"
- : "=&r" (t)
- : "r" (&v->counter), "r" (a), "r" (u)
- : "cc", "memory");
-
- return t;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
- */
-static __inline__ int atomic_inc_not_zero(atomic_t *v)
-{
- int t1, t2;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
- cmpwi 0,%0,0\n\
- beq- 2f\n\
- addic %1,%0,1\n"
-" stwcx. %1,0,%2\n\
- bne- 1b\n"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
-2:"
- : "=&r" (t1), "=&r" (t2)
- : "r" (&v->counter)
- : "cc", "xer", "memory");
-
- return t1;
-}
-#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
+static inline int atomic_dec_if_positive_relaxed(atomic_t *v)
{
int t;
- __asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
- cmpwi %0,1\n\
- addi %0,%0,-1\n\
- blt- 2f\n"
-" stwcx. %0,0,%1\n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
-2:" : "=&b" (t)
+ asm volatile(
+"1: lwarx %0,0,%1 # atomic_dec_if_positive \n"
+" cmpwi %0,1 \n"
+" addi %0,%0,-1 \n"
+" blt- 2f \n"
+" stwcx. %0,0,%1 \n"
+" bne- 1b \n"
+"2: \n"
+ : "=&b" (t)
: "r" (&v->counter)
: "cc", "memory");
return t;
}
-#define atomic_dec_if_positive atomic_dec_if_positive
-
-#ifdef __powerpc64__
-
-#define ATOMIC64_INIT(i) { (i) }
-
-static __inline__ s64 atomic64_read(const atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
-
- return t;
-}
-
-static __inline__ void atomic64_set(atomic64_t *v, s64 i)
-{
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
-}
-
-#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
-{ \
- s64 t; \
- \
- __asm__ __volatile__( \
-"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
- #asm_op " %0,%2,%0\n" \
-" stdcx. %0,0,%3 \n" \
-" bne- 1b\n" \
- : "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
-}
-
-#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
-static inline s64 \
-atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
-{ \
- s64 t; \
- \
- __asm__ __volatile__( \
-"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
- #asm_op " %0,%2,%0\n" \
-" stdcx. %0,0,%3\n" \
-" bne- 1b\n" \
- : "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
- \
- return t; \
-}
-
-#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
-static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
-{ \
- s64 res, t; \
- \
- __asm__ __volatile__( \
-"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
- #asm_op " %1,%3,%0\n" \
-" stdcx. %1,0,%4\n" \
-" bne- 1b\n" \
- : "=&r" (res), "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
- \
- return res; \
-}
-
-#define ATOMIC64_OPS(op, asm_op) \
- ATOMIC64_OP(op, asm_op) \
- ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
- ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
-
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, subf)
-
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#undef ATOMIC64_OPS
-#define ATOMIC64_OPS(op, asm_op) \
- ATOMIC64_OP(op, asm_op) \
- ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
-
-ATOMIC64_OPS(and, and)
-ATOMIC64_OPS(or, or)
-ATOMIC64_OPS(xor, xor)
-
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#undef ATOPIC64_OPS
-#undef ATOMIC64_FETCH_OP_RELAXED
-#undef ATOMIC64_OP_RETURN_RELAXED
-#undef ATOMIC64_OP
-
-static __inline__ void atomic64_inc(atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 # atomic64_inc\n\
- addic %0,%0,1\n\
- stdcx. %0,0,%2 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic64_inc atomic64_inc
-
-static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
-" addic %0,%0,1\n"
-" stdcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-static __inline__ void atomic64_dec(atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 # atomic64_dec\n\
- addic %0,%0,-1\n\
- stdcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic64_dec atomic64_dec
-
-static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
-" addic %0,%0,-1\n"
-" stdcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#define atomic_dec_if_positive_relaxed atomic_dec_if_positive_relaxed
+#ifdef CONFIG_64BIT
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
-static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 atomic64_dec_if_positive_relaxed(atomic64_t *v)
{
s64 t;
- __asm__ __volatile__(
+ asm volatile(
PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
addic. %0,%0,-1\n\
@@ -497,80 +368,8 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
return t;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-
-#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic64_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
-
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
-
-/**
- * atomic64_fetch_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 t;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
- cmpd 0,%0,%3 \n\
- beq 2f \n\
- add %0,%2,%0 \n"
-" stdcx. %0,0,%1 \n\
- bne- 1b \n"
- PPC_ATOMIC_EXIT_BARRIER
-" subf %0,%2,%0 \n\
-2:"
- : "=&r" (t)
- : "r" (&v->counter), "r" (a), "r" (u)
- : "cc", "memory");
-
- return t;
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-
-/**
- * atomic_inc64_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
- */
-static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
-{
- s64 t1, t2;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
- cmpdi 0,%0,0\n\
- beq- 2f\n\
- addic %1,%0,1\n\
- stdcx. %1,0,%2\n\
- bne- 1b\n"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
-2:"
- : "=&r" (t1), "=&r" (t2)
- : "r" (&v->counter)
- : "cc", "xer", "memory");
-
- return t1 != 0;
-}
-#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
-
-#endif /* __powerpc64__ */
+#define atomic64_dec_if_positive_relaxed atomic64_dec_if_positive_relaxed
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index cf091c4c22e5..181f7e8b3281 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -192,7 +192,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
(unsigned long)_x_, sizeof(*(ptr))); \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -448,35 +448,7 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
return old;
}
-static __always_inline unsigned long
-__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 1:
- return __cmpxchg_u8_acquire(ptr, old, new);
- case 2:
- return __cmpxchg_u16_acquire(ptr, old, new);
- case 4:
- return __cmpxchg_u32_acquire(ptr, old, new);
-#ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64_acquire(ptr, old, new);
-#endif
- }
- BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
- return old;
-}
-#define cmpxchg(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -484,7 +456,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -493,38 +465,20 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
-#define cmpxchg_acquire(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
- (unsigned long)_o_, (unsigned long)_n_, \
- sizeof(*(ptr))); \
-})
#ifdef CONFIG_PPC64
-#define cmpxchg64(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
- })
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64_relaxed(ptr, o, n) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
-})
-#define cmpxchg64_acquire(ptr, o, n) \
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_acquire((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#endif /* __KERNEL__ */
--
2.23.0
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
@ 2020-11-11 11:07 ` Nicholas Piggin
0 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-11 11:07 UTC (permalink / raw)
To: linuxppc-dev
Cc: Christophe Leroy, linux-arch, Arnd Bergmann, Peter Zijlstra,
Boqun Feng, linux-kernel, Nicholas Piggin, Alexey Kardashevskiy,
Will Deacon
All the cool kids are doing it.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/include/asm/atomic.h | 681 ++++++++++-------------------
arch/powerpc/include/asm/cmpxchg.h | 62 +--
2 files changed, 248 insertions(+), 495 deletions(-)
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 8a55eb8cc97b..899aa2403ba7 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -11,185 +11,285 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
+#define ARCH_ATOMIC
+
+#ifndef CONFIG_64BIT
+#include <asm-generic/atomic64.h>
+#endif
+
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
* on the platform without lwsync.
*/
#define __atomic_acquire_fence() \
- __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+ asm volatile(PPC_ACQUIRE_BARRIER "" : : : "memory")
#define __atomic_release_fence() \
- __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
+ asm volatile(PPC_RELEASE_BARRIER "" : : : "memory")
-static __inline__ int atomic_read(const atomic_t *v)
-{
- int t;
+#define __atomic_pre_full_fence smp_mb
- __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+#define __atomic_post_full_fence smp_mb
- return t;
+#define arch_atomic_read(v) __READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
+#ifdef CONFIG_64BIT
+#define ATOMIC64_INIT(i) { (i) }
+#define arch_atomic64_read(v) __READ_ONCE((v)->counter)
+#define arch_atomic64_set(v, i) __WRITE_ONCE(((v)->counter), (i))
+#endif
+
+#define ATOMIC_OP(name, type, dtype, width, asm_op) \
+static inline void arch_##name(dtype a, type *v) \
+{ \
+ dtype t; \
+ \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+"\t" #asm_op " %0,%2,%0 \n" \
+" st" #width "cx. %0,0,%3 \n" \
+" bne- 1b \n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cr0", "xer"); \
}
-static __inline__ void atomic_set(atomic_t *v, int i)
-{
- __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+#define ATOMIC_OP_IMM(name, type, dtype, width, asm_op, imm) \
+static inline void arch_##name(type *v) \
+{ \
+ dtype t; \
+ \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+"\t" #asm_op " %0,%0,%2 \n" \
+" st" #width "cx. %0,0,%3 \n" \
+" bne- 1b \n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "i" (imm), "r" (&v->counter) \
+ : "cr0", "xer"); \
}
-#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int a, atomic_t *v) \
+#define ATOMIC_OP_RETURN_RELAXED(name, type, dtype, width, asm_op) \
+static inline dtype arch_##name##_relaxed(dtype a, type *v) \
{ \
- int t; \
+ dtype t; \
\
- __asm__ __volatile__( \
-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
- #asm_op " %0,%2,%0\n" \
-" stwcx. %0,0,%3 \n" \
-" bne- 1b\n" \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+"\t" #asm_op " %0,%2,%0 \n" \
+" st" #width "cx. %0,0,%3 \n" \
+" bne- 1b \n" \
: "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
- : "cc"); \
-} \
+ : "cr0", "xer"); \
+ \
+ return t; \
+}
-#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+#define ATOMIC_OP_IMM_RETURN_RELAXED(name, type, dtype, width, asm_op, imm) \
+static inline dtype arch_##name##_relaxed(type *v) \
{ \
- int t; \
+ dtype t; \
\
- __asm__ __volatile__( \
-"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
- #asm_op " %0,%2,%0\n" \
-" stwcx. %0,0,%3\n" \
-" bne- 1b\n" \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+"\t" #asm_op " %0,%0,%2 \n" \
+" st" #width "cx. %0,0,%3 \n" \
+" bne- 1b \n" \
: "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "i" (imm), "r" (&v->counter) \
+ : "cr0", "xer"); \
\
return t; \
}
-#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+#define ATOMIC_FETCH_OP_RELAXED(name, type, dtype, width, asm_op) \
+static inline dtype arch_##name##_relaxed(dtype a, type *v) \
{ \
- int res, t; \
+ dtype res, t; \
\
- __asm__ __volatile__( \
-"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
- #asm_op " %1,%3,%0\n" \
-" stwcx. %1,0,%4\n" \
-" bne- 1b\n" \
+ asm volatile( \
+"1: l" #width "arx %0,0,%4 # " #name "\n" \
+"\t" #asm_op " %1,%3,%0 \n" \
+" st" #width "cx. %1,0,%4 \n" \
+" bne- 1b \n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "cr0", "xer"); \
\
return res; \
}
+#define ATOMIC_FETCH_OP_UNLESS_RELAXED(name, type, dtype, width, asm_op) \
+static inline int arch_##name##_relaxed(type *v, dtype a, dtype u) \
+{ \
+ dtype res, t; \
+ \
+ asm volatile( \
+"1: l" #width "arx %0,0,%5 # " #name "\n" \
+" cmp" #width " 0,%0,%3 \n" \
+" beq- 2f \n" \
+"\t" #asm_op " %1,%2,%0 \n" \
+" st" #width "cx. %1,0,%5 \n" \
+" bne- 1b \n" \
+"2: \n" \
+ : "=&r" (res), "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (u), "r" (&v->counter) \
+ : "cr0", "xer"); \
+ \
+ return res; \
+}
+
+#define ATOMIC_INC_NOT_ZERO_RELAXED(name, type, dtype, width) \
+static inline dtype arch_##name##_relaxed(type *v) \
+{ \
+ dtype t1, t2; \
+ \
+ asm volatile( \
+"1: l" #width "arx %0,0,%3 # " #name "\n" \
+" cmp" #width "i 0,%0,0 \n" \
+" beq- 2f \n" \
+" addic %1,%2,1 \n" \
+" st" #width "cx. %1,0,%3 \n" \
+" bne- 1b \n" \
+"2: \n" \
+ : "=&r" (t1), "=&r" (t2), "+m" (v->counter) \
+ : "r" (&v->counter) \
+ : "cr0", "xer"); \
+ \
+ return t1; \
+}
+
+#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
- ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+ATOMIC_OP(atomic_##op, atomic_t, int, w, asm_op) \
+ATOMIC_OP_RETURN_RELAXED(atomic_##op##_return, atomic_t, int, w, asm_op) \
+ATOMIC_FETCH_OP_RELAXED(atomic_fetch_##op, atomic_t, int, w, asm_op) \
+ATOMIC_FETCH_OP_UNLESS_RELAXED(atomic_fetch_##op##_unless, atomic_t, int, w, asm_op)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op) \
+ATOMIC_OP(atomic64_##op, atomic64_t, u64, d, asm_op) \
+ATOMIC_OP_RETURN_RELAXED(atomic64_##op##_return, atomic64_t, u64, d, asm_op) \
+ATOMIC_FETCH_OP_RELAXED(atomic64_fetch_##op, atomic64_t, u64, d, asm_op) \
+ATOMIC_FETCH_OP_UNLESS_RELAXED(atomic64_fetch_##op##_unless, atomic64_t, u64, d, asm_op)
ATOMIC_OPS(add, add)
+#define arch_atomic_add arch_atomic_add
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_unless_relaxed arch_atomic_fetch_add_unless_relaxed
+
ATOMIC_OPS(sub, subf)
+#define arch_atomic_sub arch_atomic_sub
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+/* skip atomic_fetch_sub_unless_relaxed */
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC64_OPS(add, add)
+#define arch_atomic64_add arch_atomic64_add
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_unless_relaxed arch_atomic64_fetch_add_unless_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+ATOMIC64_OPS(sub, subf)
+#define arch_atomic64_sub arch_atomic64_sub
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+/* skip atomic64_fetch_sub_unless_relaxed */
+#endif
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+ATOMIC_OP(atomic_##op, atomic_t, int, w, asm_op) \
+ATOMIC_FETCH_OP_RELAXED(atomic_fetch_##op, atomic_t, int, w, asm_op)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op) \
+ATOMIC_OP(atomic64_##op, atomic64_t, u64, d, asm_op) \
+ATOMIC_FETCH_OP_RELAXED(atomic64_fetch_##op, atomic64_t, u64, d, asm_op)
ATOMIC_OPS(and, and)
+#define arch_atomic_and arch_atomic_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+
ATOMIC_OPS(or, or)
+#define arch_atomic_or arch_atomic_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+
ATOMIC_OPS(xor, xor)
+#define arch_atomic_xor arch_atomic_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#ifdef CONFIG_64BIT
+ATOMIC64_OPS(and, and)
+#define arch_atomic64_and arch_atomic64_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+ATOMIC64_OPS(or, or)
+#define arch_atomic64_or arch_atomic64_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+
+ATOMIC64_OPS(xor, xor)
+#define arch_atomic64_xor arch_atomic64_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#endif
#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op, imm) \
+ATOMIC_OP_IMM(atomic_##op, atomic_t, int, w, asm_op, imm) \
+ATOMIC_OP_IMM_RETURN_RELAXED(atomic_##op##_return, atomic_t, int, w, asm_op, imm)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op, imm) \
+ATOMIC_OP_IMM(atomic64_##op, atomic64_t, u64, d, asm_op, imm) \
+ATOMIC_OP_IMM_RETURN_RELAXED(atomic64_##op##_return, atomic64_t, u64, d, asm_op, imm)
+
+ATOMIC_OPS(inc, addic, 1)
+#define arch_atomic_inc arch_atomic_inc
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+
+ATOMIC_OPS(dec, addic, -1)
+#define arch_atomic_dec arch_atomic_dec
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+
+#ifdef CONFIG_64BIT
+ATOMIC64_OPS(inc, addic, 1)
+#define arch_atomic64_inc arch_atomic64_inc
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+
+ATOMIC64_OPS(dec, addic, -1)
+#define arch_atomic64_dec arch_atomic64_dec
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+ATOMIC_INC_NOT_ZERO_RELAXED(atomic_inc_not_zero, atomic_t, int, w)
+#define arch_atomic_inc_not_zero_relaxed(v) arch_atomic_inc_not_zero_relaxed(v)
+
+#ifdef CONFIG_64BIT
+ATOMIC_INC_NOT_ZERO_RELAXED(atomic64_inc_not_zero, atomic64_t, u64, d)
+#define arch_atomic64_inc_not_zero_relaxed(v) arch_atomic64_inc_not_zero_relaxed(v)
+#endif
+
+#undef ATOMIC_INC_NOT_ZERO_RELAXED
+#undef ATOMIC_FETCH_OP_UNLESS_RELAXED
#undef ATOMIC_FETCH_OP_RELAXED
+#undef ATOMIC_OP_IMM_RETURN_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
+#undef ATOMIC_OP_IMM
#undef ATOMIC_OP
+#undef ATOMIC_OPS
+#undef ATOMIC64_OPS
-static __inline__ void atomic_inc(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc\n\
- addic %0,%0,1\n"
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic_inc atomic_inc
-
-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
-" addic %0,%0,1\n"
-" stwcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-static __inline__ void atomic_dec(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec\n\
- addic %0,%0,-1\n"
-" stwcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic_dec atomic_dec
-
-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
-" addic %0,%0,-1\n"
-" stwcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg_relaxed(v, o, n) arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic_xchg_relaxed(v, new) arch_xchg_relaxed(&((v)->counter), (new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#ifdef CONFIG_64BIT
+#define arch_atomic64_cmpxchg_relaxed(v, o, n) arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic64_xchg_relaxed(v, new) arch_xchg_relaxed(&((v)->counter), (new))
+#endif
/*
* Don't want to override the generic atomic_try_cmpxchg_acquire, because
@@ -203,7 +303,7 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
int r, o = *old;
__asm__ __volatile__ (
-"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
+"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_lock \n"
" cmpw 0,%0,%3 \n"
" bne- 2f \n"
" stwcx. %4,0,%2 \n"
@@ -219,270 +319,41 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
return likely(r == o);
}
-/**
- * atomic_fetch_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int t;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
- cmpw 0,%0,%3 \n\
- beq 2f \n\
- add %0,%2,%0 \n"
-" stwcx. %0,0,%1 \n\
- bne- 1b \n"
- PPC_ATOMIC_EXIT_BARRIER
-" subf %0,%2,%0 \n\
-2:"
- : "=&r" (t)
- : "r" (&v->counter), "r" (a), "r" (u)
- : "cc", "memory");
-
- return t;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
- */
-static __inline__ int atomic_inc_not_zero(atomic_t *v)
-{
- int t1, t2;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
- cmpwi 0,%0,0\n\
- beq- 2f\n\
- addic %1,%0,1\n"
-" stwcx. %1,0,%2\n\
- bne- 1b\n"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
-2:"
- : "=&r" (t1), "=&r" (t2)
- : "r" (&v->counter)
- : "cc", "xer", "memory");
-
- return t1;
-}
-#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
+static inline int atomic_dec_if_positive_relaxed(atomic_t *v)
{
int t;
- __asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
- cmpwi %0,1\n\
- addi %0,%0,-1\n\
- blt- 2f\n"
-" stwcx. %0,0,%1\n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
-2:" : "=&b" (t)
+ asm volatile(
+"1: lwarx %0,0,%1 # atomic_dec_if_positive \n"
+" cmpwi %0,1 \n"
+" addi %0,%0,-1 \n"
+" blt- 2f \n"
+" stwcx. %0,0,%1 \n"
+" bne- 1b \n"
+"2: \n"
+ : "=&b" (t)
: "r" (&v->counter)
: "cc", "memory");
return t;
}
-#define atomic_dec_if_positive atomic_dec_if_positive
-
-#ifdef __powerpc64__
-
-#define ATOMIC64_INIT(i) { (i) }
-
-static __inline__ s64 atomic64_read(const atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
-
- return t;
-}
-
-static __inline__ void atomic64_set(atomic64_t *v, s64 i)
-{
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
-}
-
-#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
-{ \
- s64 t; \
- \
- __asm__ __volatile__( \
-"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
- #asm_op " %0,%2,%0\n" \
-" stdcx. %0,0,%3 \n" \
-" bne- 1b\n" \
- : "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
-}
-
-#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
-static inline s64 \
-atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
-{ \
- s64 t; \
- \
- __asm__ __volatile__( \
-"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
- #asm_op " %0,%2,%0\n" \
-" stdcx. %0,0,%3\n" \
-" bne- 1b\n" \
- : "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
- \
- return t; \
-}
-
-#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
-static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
-{ \
- s64 res, t; \
- \
- __asm__ __volatile__( \
-"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
- #asm_op " %1,%3,%0\n" \
-" stdcx. %1,0,%4\n" \
-" bne- 1b\n" \
- : "=&r" (res), "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
- \
- return res; \
-}
-
-#define ATOMIC64_OPS(op, asm_op) \
- ATOMIC64_OP(op, asm_op) \
- ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
- ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
-
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, subf)
-
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#undef ATOMIC64_OPS
-#define ATOMIC64_OPS(op, asm_op) \
- ATOMIC64_OP(op, asm_op) \
- ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
-
-ATOMIC64_OPS(and, and)
-ATOMIC64_OPS(or, or)
-ATOMIC64_OPS(xor, xor)
-
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#undef ATOPIC64_OPS
-#undef ATOMIC64_FETCH_OP_RELAXED
-#undef ATOMIC64_OP_RETURN_RELAXED
-#undef ATOMIC64_OP
-
-static __inline__ void atomic64_inc(atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 # atomic64_inc\n\
- addic %0,%0,1\n\
- stdcx. %0,0,%2 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic64_inc atomic64_inc
-
-static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
-" addic %0,%0,1\n"
-" stdcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-static __inline__ void atomic64_dec(atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 # atomic64_dec\n\
- addic %0,%0,-1\n\
- stdcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic64_dec atomic64_dec
-
-static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
-{
- s64 t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
-" addic %0,%0,-1\n"
-" stdcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#define atomic_dec_if_positive_relaxed atomic_dec_if_positive_relaxed
+#ifdef CONFIG_64BIT
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
-static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 atomic64_dec_if_positive_relaxed(atomic64_t *v)
{
s64 t;
- __asm__ __volatile__(
+ asm volatile(
PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
addic. %0,%0,-1\n\
@@ -497,80 +368,8 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
return t;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-
-#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic64_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
-
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
-
-/**
- * atomic64_fetch_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 t;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
- cmpd 0,%0,%3 \n\
- beq 2f \n\
- add %0,%2,%0 \n"
-" stdcx. %0,0,%1 \n\
- bne- 1b \n"
- PPC_ATOMIC_EXIT_BARRIER
-" subf %0,%2,%0 \n\
-2:"
- : "=&r" (t)
- : "r" (&v->counter), "r" (a), "r" (u)
- : "cc", "memory");
-
- return t;
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-
-/**
- * atomic_inc64_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
- */
-static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
-{
- s64 t1, t2;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
- cmpdi 0,%0,0\n\
- beq- 2f\n\
- addic %1,%0,1\n\
- stdcx. %1,0,%2\n\
- bne- 1b\n"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
-2:"
- : "=&r" (t1), "=&r" (t2)
- : "r" (&v->counter)
- : "cc", "xer", "memory");
-
- return t1 != 0;
-}
-#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
-
-#endif /* __powerpc64__ */
+#define atomic64_dec_if_positive_relaxed atomic64_dec_if_positive_relaxed
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index cf091c4c22e5..181f7e8b3281 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -192,7 +192,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
(unsigned long)_x_, sizeof(*(ptr))); \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -448,35 +448,7 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
return old;
}
-static __always_inline unsigned long
-__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 1:
- return __cmpxchg_u8_acquire(ptr, old, new);
- case 2:
- return __cmpxchg_u16_acquire(ptr, old, new);
- case 4:
- return __cmpxchg_u32_acquire(ptr, old, new);
-#ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64_acquire(ptr, old, new);
-#endif
- }
- BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
- return old;
-}
-#define cmpxchg(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -484,7 +456,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -493,38 +465,20 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
-#define cmpxchg_acquire(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
- (unsigned long)_o_, (unsigned long)_n_, \
- sizeof(*(ptr))); \
-})
#ifdef CONFIG_PPC64
-#define cmpxchg64(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
- })
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64_relaxed(ptr, o, n) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
-})
-#define cmpxchg64_acquire(ptr, o, n) \
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_acquire((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#endif /* __KERNEL__ */
--
2.23.0
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC
2020-11-11 11:07 ` Nicholas Piggin
@ 2020-11-11 13:39 ` Christophe Leroy
-1 siblings, 0 replies; 28+ messages in thread
From: Christophe Leroy @ 2020-11-11 13:39 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Cc: Will Deacon, Peter Zijlstra, Boqun Feng, Michael Ellerman,
Arnd Bergmann, Christophe Leroy, Alexey Kardashevskiy,
linux-kernel, linux-arch
Hello,
Le 11/11/2020 à 12:07, Nicholas Piggin a écrit :
> This passes atomic64 selftest on ppc32 on qemu (uniprocessor only)
> both before and after powerpc is converted to use ARCH_ATOMIC.
Can you explain what this change does and why it is needed ?
Christophe
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> include/asm-generic/atomic64.h | 70 +++++++++++++++++++++++++++-------
> lib/atomic64.c | 36 ++++++++---------
> 2 files changed, 75 insertions(+), 31 deletions(-)
>
> diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
> index 370f01d4450f..2b1ecb591bb9 100644
> --- a/include/asm-generic/atomic64.h
> +++ b/include/asm-generic/atomic64.h
> @@ -15,19 +15,17 @@ typedef struct {
>
> #define ATOMIC64_INIT(i) { (i) }
>
> -extern s64 atomic64_read(const atomic64_t *v);
> -extern void atomic64_set(atomic64_t *v, s64 i);
> -
> -#define atomic64_set_release(v, i) atomic64_set((v), (i))
> +extern s64 __atomic64_read(const atomic64_t *v);
> +extern void __atomic64_set(atomic64_t *v, s64 i);
>
> #define ATOMIC64_OP(op) \
> -extern void atomic64_##op(s64 a, atomic64_t *v);
> +extern void __atomic64_##op(s64 a, atomic64_t *v);
>
> #define ATOMIC64_OP_RETURN(op) \
> -extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
> +extern s64 __atomic64_##op##_return(s64 a, atomic64_t *v);
>
> #define ATOMIC64_FETCH_OP(op) \
> -extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
> +extern s64 __atomic64_fetch_##op(s64 a, atomic64_t *v);
>
> #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
>
> @@ -46,11 +44,57 @@ ATOMIC64_OPS(xor)
> #undef ATOMIC64_OP_RETURN
> #undef ATOMIC64_OP
>
> -extern s64 atomic64_dec_if_positive(atomic64_t *v);
> -#define atomic64_dec_if_positive atomic64_dec_if_positive
> -extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
> -extern s64 atomic64_xchg(atomic64_t *v, s64 new);
> -extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
> -#define atomic64_fetch_add_unless atomic64_fetch_add_unless
> +extern s64 __atomic64_dec_if_positive(atomic64_t *v);
> +extern s64 __atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
> +extern s64 __atomic64_xchg(atomic64_t *v, s64 new);
> +extern s64 __atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
> +
> +#ifdef ARCH_ATOMIC
> +#define arch_atomic64_read __atomic64_read
> +#define arch_atomic64_set __atomic64_set
> +#define arch_atomic64_add __atomic64_add
> +#define arch_atomic64_add_return __atomic64_add_return
> +#define arch_atomic64_fetch_add __atomic64_fetch_add
> +#define arch_atomic64_sub __atomic64_sub
> +#define arch_atomic64_sub_return __atomic64_sub_return
> +#define arch_atomic64_fetch_sub __atomic64_fetch_sub
> +#define arch_atomic64_and __atomic64_and
> +#define arch_atomic64_and_return __atomic64_and_return
> +#define arch_atomic64_fetch_and __atomic64_fetch_and
> +#define arch_atomic64_or __atomic64_or
> +#define arch_atomic64_or_return __atomic64_or_return
> +#define arch_atomic64_fetch_or __atomic64_fetch_or
> +#define arch_atomic64_xor __atomic64_xor
> +#define arch_atomic64_xor_return __atomic64_xor_return
> +#define arch_atomic64_fetch_xor __atomic64_fetch_xor
> +#define arch_atomic64_xchg __atomic64_xchg
> +#define arch_atomic64_cmpxchg __atomic64_cmpxchg
> +#define arch_atomic64_set_release(v, i) __atomic64_set((v), (i))
> +#define arch_atomic64_dec_if_positive __atomic64_dec_if_positive
> +#define arch_atomic64_fetch_add_unless __atomic64_fetch_add_unless
> +#else
> +#define atomic64_read __atomic64_read
> +#define atomic64_set __atomic64_set
> +#define atomic64_add __atomic64_add
> +#define atomic64_add_return __atomic64_add_return
> +#define atomic64_fetch_add __atomic64_fetch_add
> +#define atomic64_sub __atomic64_sub
> +#define atomic64_sub_return __atomic64_sub_return
> +#define atomic64_fetch_sub __atomic64_fetch_sub
> +#define atomic64_and __atomic64_and
> +#define atomic64_and_return __atomic64_and_return
> +#define atomic64_fetch_and __atomic64_fetch_and
> +#define atomic64_or __atomic64_or
> +#define atomic64_or_return __atomic64_or_return
> +#define atomic64_fetch_or __atomic64_fetch_or
> +#define atomic64_xor __atomic64_xor
> +#define atomic64_xor_return __atomic64_xor_return
> +#define atomic64_fetch_xor __atomic64_fetch_xor
> +#define atomic64_xchg __atomic64_xchg
> +#define atomic64_cmpxchg __atomic64_cmpxchg
> +#define atomic64_set_release(v, i) __atomic64_set((v), (i))
> +#define atomic64_dec_if_positive __atomic64_dec_if_positive
> +#define atomic64_fetch_add_unless __atomic64_fetch_add_unless
> +#endif
>
> #endif /* _ASM_GENERIC_ATOMIC64_H */
> diff --git a/lib/atomic64.c b/lib/atomic64.c
> index e98c85a99787..05aba5e3268f 100644
> --- a/lib/atomic64.c
> +++ b/lib/atomic64.c
> @@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
> return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
> }
>
> -s64 atomic64_read(const atomic64_t *v)
> +s64 __atomic64_read(const atomic64_t *v)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
> raw_spin_unlock_irqrestore(lock, flags);
> return val;
> }
> -EXPORT_SYMBOL(atomic64_read);
> +EXPORT_SYMBOL(__atomic64_read);
>
> -void atomic64_set(atomic64_t *v, s64 i)
> +void __atomic64_set(atomic64_t *v, s64 i)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
> v->counter = i;
> raw_spin_unlock_irqrestore(lock, flags);
> }
> -EXPORT_SYMBOL(atomic64_set);
> +EXPORT_SYMBOL(__atomic64_set);
>
> #define ATOMIC64_OP(op, c_op) \
> -void atomic64_##op(s64 a, atomic64_t *v) \
> +void __atomic64_##op(s64 a, atomic64_t *v) \
> { \
> unsigned long flags; \
> raw_spinlock_t *lock = lock_addr(v); \
> @@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
> v->counter c_op a; \
> raw_spin_unlock_irqrestore(lock, flags); \
> } \
> -EXPORT_SYMBOL(atomic64_##op);
> +EXPORT_SYMBOL(__atomic64_##op);
>
> #define ATOMIC64_OP_RETURN(op, c_op) \
> -s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
> +s64 __atomic64_##op##_return(s64 a, atomic64_t *v) \
> { \
> unsigned long flags; \
> raw_spinlock_t *lock = lock_addr(v); \
> @@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
> raw_spin_unlock_irqrestore(lock, flags); \
> return val; \
> } \
> -EXPORT_SYMBOL(atomic64_##op##_return);
> +EXPORT_SYMBOL(__atomic64_##op##_return);
>
> #define ATOMIC64_FETCH_OP(op, c_op) \
> -s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
> +s64 __atomic64_fetch_##op(s64 a, atomic64_t *v) \
> { \
> unsigned long flags; \
> raw_spinlock_t *lock = lock_addr(v); \
> @@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
> raw_spin_unlock_irqrestore(lock, flags); \
> return val; \
> } \
> -EXPORT_SYMBOL(atomic64_fetch_##op);
> +EXPORT_SYMBOL(__atomic64_fetch_##op);
>
> #define ATOMIC64_OPS(op, c_op) \
> ATOMIC64_OP(op, c_op) \
> @@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
> #undef ATOMIC64_OP_RETURN
> #undef ATOMIC64_OP
>
> -s64 atomic64_dec_if_positive(atomic64_t *v)
> +s64 __atomic64_dec_if_positive(atomic64_t *v)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
> raw_spin_unlock_irqrestore(lock, flags);
> return val;
> }
> -EXPORT_SYMBOL(atomic64_dec_if_positive);
> +EXPORT_SYMBOL(__atomic64_dec_if_positive);
>
> -s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
> +s64 __atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
> raw_spin_unlock_irqrestore(lock, flags);
> return val;
> }
> -EXPORT_SYMBOL(atomic64_cmpxchg);
> +EXPORT_SYMBOL(__atomic64_cmpxchg);
>
> -s64 atomic64_xchg(atomic64_t *v, s64 new)
> +s64 __atomic64_xchg(atomic64_t *v, s64 new)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
> raw_spin_unlock_irqrestore(lock, flags);
> return val;
> }
> -EXPORT_SYMBOL(atomic64_xchg);
> +EXPORT_SYMBOL(__atomic64_xchg);
>
> -s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
> +s64 __atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
>
> return val;
> }
> -EXPORT_SYMBOL(atomic64_fetch_add_unless);
> +EXPORT_SYMBOL(__atomic64_fetch_add_unless);
>
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC
@ 2020-11-11 13:39 ` Christophe Leroy
0 siblings, 0 replies; 28+ messages in thread
From: Christophe Leroy @ 2020-11-11 13:39 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Cc: Christophe Leroy, linux-arch, Arnd Bergmann, Peter Zijlstra,
Boqun Feng, linux-kernel, Alexey Kardashevskiy, Will Deacon
Hello,
Le 11/11/2020 à 12:07, Nicholas Piggin a écrit :
> This passes atomic64 selftest on ppc32 on qemu (uniprocessor only)
> both before and after powerpc is converted to use ARCH_ATOMIC.
Can you explain what this change does and why it is needed ?
Christophe
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> include/asm-generic/atomic64.h | 70 +++++++++++++++++++++++++++-------
> lib/atomic64.c | 36 ++++++++---------
> 2 files changed, 75 insertions(+), 31 deletions(-)
>
> diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
> index 370f01d4450f..2b1ecb591bb9 100644
> --- a/include/asm-generic/atomic64.h
> +++ b/include/asm-generic/atomic64.h
> @@ -15,19 +15,17 @@ typedef struct {
>
> #define ATOMIC64_INIT(i) { (i) }
>
> -extern s64 atomic64_read(const atomic64_t *v);
> -extern void atomic64_set(atomic64_t *v, s64 i);
> -
> -#define atomic64_set_release(v, i) atomic64_set((v), (i))
> +extern s64 __atomic64_read(const atomic64_t *v);
> +extern void __atomic64_set(atomic64_t *v, s64 i);
>
> #define ATOMIC64_OP(op) \
> -extern void atomic64_##op(s64 a, atomic64_t *v);
> +extern void __atomic64_##op(s64 a, atomic64_t *v);
>
> #define ATOMIC64_OP_RETURN(op) \
> -extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
> +extern s64 __atomic64_##op##_return(s64 a, atomic64_t *v);
>
> #define ATOMIC64_FETCH_OP(op) \
> -extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
> +extern s64 __atomic64_fetch_##op(s64 a, atomic64_t *v);
>
> #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
>
> @@ -46,11 +44,57 @@ ATOMIC64_OPS(xor)
> #undef ATOMIC64_OP_RETURN
> #undef ATOMIC64_OP
>
> -extern s64 atomic64_dec_if_positive(atomic64_t *v);
> -#define atomic64_dec_if_positive atomic64_dec_if_positive
> -extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
> -extern s64 atomic64_xchg(atomic64_t *v, s64 new);
> -extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
> -#define atomic64_fetch_add_unless atomic64_fetch_add_unless
> +extern s64 __atomic64_dec_if_positive(atomic64_t *v);
> +extern s64 __atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
> +extern s64 __atomic64_xchg(atomic64_t *v, s64 new);
> +extern s64 __atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
> +
> +#ifdef ARCH_ATOMIC
> +#define arch_atomic64_read __atomic64_read
> +#define arch_atomic64_set __atomic64_set
> +#define arch_atomic64_add __atomic64_add
> +#define arch_atomic64_add_return __atomic64_add_return
> +#define arch_atomic64_fetch_add __atomic64_fetch_add
> +#define arch_atomic64_sub __atomic64_sub
> +#define arch_atomic64_sub_return __atomic64_sub_return
> +#define arch_atomic64_fetch_sub __atomic64_fetch_sub
> +#define arch_atomic64_and __atomic64_and
> +#define arch_atomic64_and_return __atomic64_and_return
> +#define arch_atomic64_fetch_and __atomic64_fetch_and
> +#define arch_atomic64_or __atomic64_or
> +#define arch_atomic64_or_return __atomic64_or_return
> +#define arch_atomic64_fetch_or __atomic64_fetch_or
> +#define arch_atomic64_xor __atomic64_xor
> +#define arch_atomic64_xor_return __atomic64_xor_return
> +#define arch_atomic64_fetch_xor __atomic64_fetch_xor
> +#define arch_atomic64_xchg __atomic64_xchg
> +#define arch_atomic64_cmpxchg __atomic64_cmpxchg
> +#define arch_atomic64_set_release(v, i) __atomic64_set((v), (i))
> +#define arch_atomic64_dec_if_positive __atomic64_dec_if_positive
> +#define arch_atomic64_fetch_add_unless __atomic64_fetch_add_unless
> +#else
> +#define atomic64_read __atomic64_read
> +#define atomic64_set __atomic64_set
> +#define atomic64_add __atomic64_add
> +#define atomic64_add_return __atomic64_add_return
> +#define atomic64_fetch_add __atomic64_fetch_add
> +#define atomic64_sub __atomic64_sub
> +#define atomic64_sub_return __atomic64_sub_return
> +#define atomic64_fetch_sub __atomic64_fetch_sub
> +#define atomic64_and __atomic64_and
> +#define atomic64_and_return __atomic64_and_return
> +#define atomic64_fetch_and __atomic64_fetch_and
> +#define atomic64_or __atomic64_or
> +#define atomic64_or_return __atomic64_or_return
> +#define atomic64_fetch_or __atomic64_fetch_or
> +#define atomic64_xor __atomic64_xor
> +#define atomic64_xor_return __atomic64_xor_return
> +#define atomic64_fetch_xor __atomic64_fetch_xor
> +#define atomic64_xchg __atomic64_xchg
> +#define atomic64_cmpxchg __atomic64_cmpxchg
> +#define atomic64_set_release(v, i) __atomic64_set((v), (i))
> +#define atomic64_dec_if_positive __atomic64_dec_if_positive
> +#define atomic64_fetch_add_unless __atomic64_fetch_add_unless
> +#endif
>
> #endif /* _ASM_GENERIC_ATOMIC64_H */
> diff --git a/lib/atomic64.c b/lib/atomic64.c
> index e98c85a99787..05aba5e3268f 100644
> --- a/lib/atomic64.c
> +++ b/lib/atomic64.c
> @@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
> return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
> }
>
> -s64 atomic64_read(const atomic64_t *v)
> +s64 __atomic64_read(const atomic64_t *v)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
> raw_spin_unlock_irqrestore(lock, flags);
> return val;
> }
> -EXPORT_SYMBOL(atomic64_read);
> +EXPORT_SYMBOL(__atomic64_read);
>
> -void atomic64_set(atomic64_t *v, s64 i)
> +void __atomic64_set(atomic64_t *v, s64 i)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
> v->counter = i;
> raw_spin_unlock_irqrestore(lock, flags);
> }
> -EXPORT_SYMBOL(atomic64_set);
> +EXPORT_SYMBOL(__atomic64_set);
>
> #define ATOMIC64_OP(op, c_op) \
> -void atomic64_##op(s64 a, atomic64_t *v) \
> +void __atomic64_##op(s64 a, atomic64_t *v) \
> { \
> unsigned long flags; \
> raw_spinlock_t *lock = lock_addr(v); \
> @@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
> v->counter c_op a; \
> raw_spin_unlock_irqrestore(lock, flags); \
> } \
> -EXPORT_SYMBOL(atomic64_##op);
> +EXPORT_SYMBOL(__atomic64_##op);
>
> #define ATOMIC64_OP_RETURN(op, c_op) \
> -s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
> +s64 __atomic64_##op##_return(s64 a, atomic64_t *v) \
> { \
> unsigned long flags; \
> raw_spinlock_t *lock = lock_addr(v); \
> @@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
> raw_spin_unlock_irqrestore(lock, flags); \
> return val; \
> } \
> -EXPORT_SYMBOL(atomic64_##op##_return);
> +EXPORT_SYMBOL(__atomic64_##op##_return);
>
> #define ATOMIC64_FETCH_OP(op, c_op) \
> -s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
> +s64 __atomic64_fetch_##op(s64 a, atomic64_t *v) \
> { \
> unsigned long flags; \
> raw_spinlock_t *lock = lock_addr(v); \
> @@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
> raw_spin_unlock_irqrestore(lock, flags); \
> return val; \
> } \
> -EXPORT_SYMBOL(atomic64_fetch_##op);
> +EXPORT_SYMBOL(__atomic64_fetch_##op);
>
> #define ATOMIC64_OPS(op, c_op) \
> ATOMIC64_OP(op, c_op) \
> @@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
> #undef ATOMIC64_OP_RETURN
> #undef ATOMIC64_OP
>
> -s64 atomic64_dec_if_positive(atomic64_t *v)
> +s64 __atomic64_dec_if_positive(atomic64_t *v)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
> raw_spin_unlock_irqrestore(lock, flags);
> return val;
> }
> -EXPORT_SYMBOL(atomic64_dec_if_positive);
> +EXPORT_SYMBOL(__atomic64_dec_if_positive);
>
> -s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
> +s64 __atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
> raw_spin_unlock_irqrestore(lock, flags);
> return val;
> }
> -EXPORT_SYMBOL(atomic64_cmpxchg);
> +EXPORT_SYMBOL(__atomic64_cmpxchg);
>
> -s64 atomic64_xchg(atomic64_t *v, s64 new)
> +s64 __atomic64_xchg(atomic64_t *v, s64 new)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
> raw_spin_unlock_irqrestore(lock, flags);
> return val;
> }
> -EXPORT_SYMBOL(atomic64_xchg);
> +EXPORT_SYMBOL(__atomic64_xchg);
>
> -s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
> +s64 __atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
> {
> unsigned long flags;
> raw_spinlock_t *lock = lock_addr(v);
> @@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
>
> return val;
> }
> -EXPORT_SYMBOL(atomic64_fetch_add_unless);
> +EXPORT_SYMBOL(__atomic64_fetch_add_unless);
>
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC
2020-11-11 13:39 ` Christophe Leroy
@ 2020-11-11 13:44 ` Peter Zijlstra
-1 siblings, 0 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-11-11 13:44 UTC (permalink / raw)
To: Christophe Leroy
Cc: Nicholas Piggin, linuxppc-dev, Will Deacon, Boqun Feng,
Michael Ellerman, Arnd Bergmann, Christophe Leroy,
Alexey Kardashevskiy, linux-kernel, linux-arch
On Wed, Nov 11, 2020 at 02:39:01PM +0100, Christophe Leroy wrote:
> Hello,
>
> Le 11/11/2020 à 12:07, Nicholas Piggin a écrit :
> > This passes atomic64 selftest on ppc32 on qemu (uniprocessor only)
> > both before and after powerpc is converted to use ARCH_ATOMIC.
>
> Can you explain what this change does and why it is needed ?
That certainly should've been in the Changelog. This enables atomic
instrumentation, see asm-generic/atomic-instrumented.h. IOW, it makes
atomic ops visible to K*SAN.
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC
@ 2020-11-11 13:44 ` Peter Zijlstra
0 siblings, 0 replies; 28+ messages in thread
From: Peter Zijlstra @ 2020-11-11 13:44 UTC (permalink / raw)
To: Christophe Leroy
Cc: Christophe Leroy, linux-arch, Arnd Bergmann,
Alexey Kardashevskiy, linuxppc-dev, Boqun Feng, linux-kernel,
Nicholas Piggin, Will Deacon
On Wed, Nov 11, 2020 at 02:39:01PM +0100, Christophe Leroy wrote:
> Hello,
>
> Le 11/11/2020 à 12:07, Nicholas Piggin a écrit :
> > This passes atomic64 selftest on ppc32 on qemu (uniprocessor only)
> > both before and after powerpc is converted to use ARCH_ATOMIC.
>
> Can you explain what this change does and why it is needed ?
That certainly should've been in the Changelog. This enables atomic
instrumentation, see asm-generic/atomic-instrumented.h. IOW, it makes
atomic ops visible to K*SAN.
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
2020-11-11 11:07 ` Nicholas Piggin
(?)
@ 2020-11-11 19:07 ` kernel test robot
-1 siblings, 0 replies; 28+ messages in thread
From: kernel test robot @ 2020-11-11 19:07 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Cc: kbuild-all, Nicholas Piggin, Will Deacon, Peter Zijlstra,
Boqun Feng, Michael Ellerman, Arnd Bergmann, Christophe Leroy,
Alexey Kardashevskiy, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 13242 bytes --]
Hi Nicholas,
I love your patch! Perhaps something to improve:
[auto build test WARNING on powerpc/next]
[also build test WARNING on asm-generic/master linus/master v5.10-rc3 next-20201111]
[cannot apply to scottwood/next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/9e1bec8fe216b0745c647e52c40d1f0033fb4efd
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
git checkout 9e1bec8fe216b0745c647e52c40d1f0033fb4efd
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
In file included from arch/powerpc/include/asm/atomic.h:11,
from include/linux/atomic.h:7,
from include/linux/rcupdate.h:25,
from include/linux/rculist.h:11,
from include/linux/sched/signal.h:5,
from drivers/gpu/drm/drm_lock.c:37:
drivers/gpu/drm/drm_lock.c: In function 'drm_lock_take':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:75:10: note: in expansion of macro 'cmpxchg'
75 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:75:10: note: in expansion of macro 'cmpxchg'
75 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_lock_transfer':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:118:10: note: in expansion of macro 'cmpxchg'
118 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:118:10: note: in expansion of macro 'cmpxchg'
118 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_legacy_lock_free':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:141:10: note: in expansion of macro 'cmpxchg'
141 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:141:10: note: in expansion of macro 'cmpxchg'
141 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_legacy_idlelock_release':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:319:12: note: in expansion of macro 'cmpxchg'
319 | prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:319:12: note: in expansion of macro 'cmpxchg'
319 | prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
vim +463 arch/powerpc/include/asm/cmpxchg.h
56c08e6d226c860 Boqun Feng 2015-12-15 450
9e1bec8fe216b07 Nicholas Piggin 2020-11-11 451 #define arch_cmpxchg_local(ptr, o, n) \
ae3a197e3d0bfe3 David Howells 2012-03-28 452 ({ \
ae3a197e3d0bfe3 David Howells 2012-03-28 453 __typeof__(*(ptr)) _o_ = (o); \
ae3a197e3d0bfe3 David Howells 2012-03-28 454 __typeof__(*(ptr)) _n_ = (n); \
ae3a197e3d0bfe3 David Howells 2012-03-28 455 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
ae3a197e3d0bfe3 David Howells 2012-03-28 456 (unsigned long)_n_, sizeof(*(ptr))); \
ae3a197e3d0bfe3 David Howells 2012-03-28 457 })
ae3a197e3d0bfe3 David Howells 2012-03-28 458
9e1bec8fe216b07 Nicholas Piggin 2020-11-11 459 #define arch_cmpxchg_relaxed(ptr, o, n) \
56c08e6d226c860 Boqun Feng 2015-12-15 460 ({ \
56c08e6d226c860 Boqun Feng 2015-12-15 461 __typeof__(*(ptr)) _o_ = (o); \
56c08e6d226c860 Boqun Feng 2015-12-15 462 __typeof__(*(ptr)) _n_ = (n); \
56c08e6d226c860 Boqun Feng 2015-12-15 @463 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
56c08e6d226c860 Boqun Feng 2015-12-15 464 (unsigned long)_o_, (unsigned long)_n_, \
56c08e6d226c860 Boqun Feng 2015-12-15 465 sizeof(*(ptr))); \
56c08e6d226c860 Boqun Feng 2015-12-15 466 })
56c08e6d226c860 Boqun Feng 2015-12-15 467
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 71460 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
@ 2020-11-11 19:07 ` kernel test robot
0 siblings, 0 replies; 28+ messages in thread
From: kernel test robot @ 2020-11-11 19:07 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Cc: Christophe Leroy, kbuild-all, Arnd Bergmann, Peter Zijlstra,
Boqun Feng, linux-kernel, Nicholas Piggin, Alexey Kardashevskiy,
Will Deacon
[-- Attachment #1: Type: text/plain, Size: 13242 bytes --]
Hi Nicholas,
I love your patch! Perhaps something to improve:
[auto build test WARNING on powerpc/next]
[also build test WARNING on asm-generic/master linus/master v5.10-rc3 next-20201111]
[cannot apply to scottwood/next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/9e1bec8fe216b0745c647e52c40d1f0033fb4efd
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
git checkout 9e1bec8fe216b0745c647e52c40d1f0033fb4efd
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
In file included from arch/powerpc/include/asm/atomic.h:11,
from include/linux/atomic.h:7,
from include/linux/rcupdate.h:25,
from include/linux/rculist.h:11,
from include/linux/sched/signal.h:5,
from drivers/gpu/drm/drm_lock.c:37:
drivers/gpu/drm/drm_lock.c: In function 'drm_lock_take':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:75:10: note: in expansion of macro 'cmpxchg'
75 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:75:10: note: in expansion of macro 'cmpxchg'
75 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_lock_transfer':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:118:10: note: in expansion of macro 'cmpxchg'
118 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:118:10: note: in expansion of macro 'cmpxchg'
118 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_legacy_lock_free':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:141:10: note: in expansion of macro 'cmpxchg'
141 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:141:10: note: in expansion of macro 'cmpxchg'
141 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_legacy_idlelock_release':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:319:12: note: in expansion of macro 'cmpxchg'
319 | prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:319:12: note: in expansion of macro 'cmpxchg'
319 | prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
vim +463 arch/powerpc/include/asm/cmpxchg.h
56c08e6d226c860 Boqun Feng 2015-12-15 450
9e1bec8fe216b07 Nicholas Piggin 2020-11-11 451 #define arch_cmpxchg_local(ptr, o, n) \
ae3a197e3d0bfe3 David Howells 2012-03-28 452 ({ \
ae3a197e3d0bfe3 David Howells 2012-03-28 453 __typeof__(*(ptr)) _o_ = (o); \
ae3a197e3d0bfe3 David Howells 2012-03-28 454 __typeof__(*(ptr)) _n_ = (n); \
ae3a197e3d0bfe3 David Howells 2012-03-28 455 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
ae3a197e3d0bfe3 David Howells 2012-03-28 456 (unsigned long)_n_, sizeof(*(ptr))); \
ae3a197e3d0bfe3 David Howells 2012-03-28 457 })
ae3a197e3d0bfe3 David Howells 2012-03-28 458
9e1bec8fe216b07 Nicholas Piggin 2020-11-11 459 #define arch_cmpxchg_relaxed(ptr, o, n) \
56c08e6d226c860 Boqun Feng 2015-12-15 460 ({ \
56c08e6d226c860 Boqun Feng 2015-12-15 461 __typeof__(*(ptr)) _o_ = (o); \
56c08e6d226c860 Boqun Feng 2015-12-15 462 __typeof__(*(ptr)) _n_ = (n); \
56c08e6d226c860 Boqun Feng 2015-12-15 @463 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
56c08e6d226c860 Boqun Feng 2015-12-15 464 (unsigned long)_o_, (unsigned long)_n_, \
56c08e6d226c860 Boqun Feng 2015-12-15 465 sizeof(*(ptr))); \
56c08e6d226c860 Boqun Feng 2015-12-15 466 })
56c08e6d226c860 Boqun Feng 2015-12-15 467
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 71460 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
@ 2020-11-11 19:07 ` kernel test robot
0 siblings, 0 replies; 28+ messages in thread
From: kernel test robot @ 2020-11-11 19:07 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 13453 bytes --]
Hi Nicholas,
I love your patch! Perhaps something to improve:
[auto build test WARNING on powerpc/next]
[also build test WARNING on asm-generic/master linus/master v5.10-rc3 next-20201111]
[cannot apply to scottwood/next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/9e1bec8fe216b0745c647e52c40d1f0033fb4efd
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
git checkout 9e1bec8fe216b0745c647e52c40d1f0033fb4efd
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
In file included from arch/powerpc/include/asm/atomic.h:11,
from include/linux/atomic.h:7,
from include/linux/rcupdate.h:25,
from include/linux/rculist.h:11,
from include/linux/sched/signal.h:5,
from drivers/gpu/drm/drm_lock.c:37:
drivers/gpu/drm/drm_lock.c: In function 'drm_lock_take':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:75:10: note: in expansion of macro 'cmpxchg'
75 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:75:10: note: in expansion of macro 'cmpxchg'
75 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_lock_transfer':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:118:10: note: in expansion of macro 'cmpxchg'
118 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:118:10: note: in expansion of macro 'cmpxchg'
118 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_legacy_lock_free':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:141:10: note: in expansion of macro 'cmpxchg'
141 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:141:10: note: in expansion of macro 'cmpxchg'
141 | prev = cmpxchg(lock, old, new);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
drivers/gpu/drm/drm_lock.c: In function 'drm_legacy_idlelock_release':
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:73:9: note: in expansion of macro 'arch_cmpxchg_relaxed'
73 | typeof(op##_relaxed(args)) __ret; \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:319:12: note: in expansion of macro 'cmpxchg'
319 | prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
>> arch/powerpc/include/asm/cmpxchg.h:463:41: warning: passing argument 1 of '__cmpxchg_relaxed' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers]
463 | (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
| ^~~~~
include/linux/atomic.h:75:10: note: in expansion of macro 'arch_cmpxchg_relaxed'
75 | __ret = op##_relaxed(args); \
| ^~
include/linux/atomic-arch-fallback.h:52:2: note: in expansion of macro '__atomic_op_fence'
52 | __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
| ^~~~~~~~~~~~~~~~~
include/asm-generic/atomic-instrumented.h:1685:2: note: in expansion of macro 'arch_cmpxchg'
1685 | arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
| ^~~~~~~~~~~~
drivers/gpu/drm/drm_lock.c:319:12: note: in expansion of macro 'cmpxchg'
319 | prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
| ^~~~~~~
arch/powerpc/include/asm/cmpxchg.h:432:25: note: expected 'void *' but argument is of type 'volatile unsigned int *'
432 | __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
| ~~~~~~^~~
vim +463 arch/powerpc/include/asm/cmpxchg.h
56c08e6d226c860 Boqun Feng 2015-12-15 450
9e1bec8fe216b07 Nicholas Piggin 2020-11-11 451 #define arch_cmpxchg_local(ptr, o, n) \
ae3a197e3d0bfe3 David Howells 2012-03-28 452 ({ \
ae3a197e3d0bfe3 David Howells 2012-03-28 453 __typeof__(*(ptr)) _o_ = (o); \
ae3a197e3d0bfe3 David Howells 2012-03-28 454 __typeof__(*(ptr)) _n_ = (n); \
ae3a197e3d0bfe3 David Howells 2012-03-28 455 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
ae3a197e3d0bfe3 David Howells 2012-03-28 456 (unsigned long)_n_, sizeof(*(ptr))); \
ae3a197e3d0bfe3 David Howells 2012-03-28 457 })
ae3a197e3d0bfe3 David Howells 2012-03-28 458
9e1bec8fe216b07 Nicholas Piggin 2020-11-11 459 #define arch_cmpxchg_relaxed(ptr, o, n) \
56c08e6d226c860 Boqun Feng 2015-12-15 460 ({ \
56c08e6d226c860 Boqun Feng 2015-12-15 461 __typeof__(*(ptr)) _o_ = (o); \
56c08e6d226c860 Boqun Feng 2015-12-15 462 __typeof__(*(ptr)) _n_ = (n); \
56c08e6d226c860 Boqun Feng 2015-12-15 @463 (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
56c08e6d226c860 Boqun Feng 2015-12-15 464 (unsigned long)_o_, (unsigned long)_n_, \
56c08e6d226c860 Boqun Feng 2015-12-15 465 sizeof(*(ptr))); \
56c08e6d226c860 Boqun Feng 2015-12-15 466 })
56c08e6d226c860 Boqun Feng 2015-12-15 467
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 71460 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
2020-11-11 11:07 ` Nicholas Piggin
(?)
@ 2020-11-13 5:05 ` kernel test robot
-1 siblings, 0 replies; 28+ messages in thread
From: kernel test robot @ 2020-11-13 5:05 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Cc: kbuild-all, Nicholas Piggin, Will Deacon, Peter Zijlstra,
Boqun Feng, Michael Ellerman, Arnd Bergmann, Christophe Leroy,
Alexey Kardashevskiy, linux-kernel
[-- Attachment #1: Type: text/plain, Size: 7933 bytes --]
Hi Nicholas,
I love your patch! Perhaps something to improve:
[auto build test WARNING on powerpc/next]
[also build test WARNING on asm-generic/master linus/master v5.10-rc3 next-20201112]
[cannot apply to scottwood/next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc64-randconfig-s031-20201111 (attached as .config)
compiler: powerpc-linux-gcc (GCC) 9.3.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.3-107-gaf3512a6-dirty
# https://github.com/0day-ci/linux/commit/9e1bec8fe216b0745c647e52c40d1f0033fb4efd
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
git checkout 9e1bec8fe216b0745c647e52c40d1f0033fb4efd
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=powerpc64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
"sparse warnings: (new ones prefixed by >>)"
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:75:24: sparse: expected void *ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: got unsigned int volatile *__ai_ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:75:24: sparse: expected void *ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:118:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:118:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:141:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:141:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:319:40: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:319:40: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: got unsigned int volatile *__ai_ptr
vim +75 drivers/gpu/drm/drm_lock.c
4ac5ec40ec70022 Daniel Vetter 2010-08-23 48
bd50d4a2168370b Benjamin Gaignard 2020-03-06 49 /*
1a75a222f5ca106 Daniel Vetter 2016-06-14 50 * Take the heavyweight lock.
1a75a222f5ca106 Daniel Vetter 2016-06-14 51 *
1a75a222f5ca106 Daniel Vetter 2016-06-14 52 * \param lock lock pointer.
1a75a222f5ca106 Daniel Vetter 2016-06-14 53 * \param context locking context.
1a75a222f5ca106 Daniel Vetter 2016-06-14 54 * \return one if the lock is held, or zero otherwise.
1a75a222f5ca106 Daniel Vetter 2016-06-14 55 *
1a75a222f5ca106 Daniel Vetter 2016-06-14 56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
1a75a222f5ca106 Daniel Vetter 2016-06-14 57 */
1a75a222f5ca106 Daniel Vetter 2016-06-14 58 static
1a75a222f5ca106 Daniel Vetter 2016-06-14 59 int drm_lock_take(struct drm_lock_data *lock_data,
1a75a222f5ca106 Daniel Vetter 2016-06-14 60 unsigned int context)
1a75a222f5ca106 Daniel Vetter 2016-06-14 61 {
1a75a222f5ca106 Daniel Vetter 2016-06-14 62 unsigned int old, new, prev;
1a75a222f5ca106 Daniel Vetter 2016-06-14 63 volatile unsigned int *lock = &lock_data->hw_lock->lock;
1a75a222f5ca106 Daniel Vetter 2016-06-14 64
1a75a222f5ca106 Daniel Vetter 2016-06-14 65 spin_lock_bh(&lock_data->spinlock);
1a75a222f5ca106 Daniel Vetter 2016-06-14 66 do {
1a75a222f5ca106 Daniel Vetter 2016-06-14 67 old = *lock;
1a75a222f5ca106 Daniel Vetter 2016-06-14 68 if (old & _DRM_LOCK_HELD)
1a75a222f5ca106 Daniel Vetter 2016-06-14 69 new = old | _DRM_LOCK_CONT;
1a75a222f5ca106 Daniel Vetter 2016-06-14 70 else {
1a75a222f5ca106 Daniel Vetter 2016-06-14 71 new = context | _DRM_LOCK_HELD |
1a75a222f5ca106 Daniel Vetter 2016-06-14 72 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
1a75a222f5ca106 Daniel Vetter 2016-06-14 73 _DRM_LOCK_CONT : 0);
1a75a222f5ca106 Daniel Vetter 2016-06-14 74 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 @75 prev = cmpxchg(lock, old, new);
1a75a222f5ca106 Daniel Vetter 2016-06-14 76 } while (prev != old);
1a75a222f5ca106 Daniel Vetter 2016-06-14 77 spin_unlock_bh(&lock_data->spinlock);
1a75a222f5ca106 Daniel Vetter 2016-06-14 78
1a75a222f5ca106 Daniel Vetter 2016-06-14 79 if (_DRM_LOCKING_CONTEXT(old) == context) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 80 if (old & _DRM_LOCK_HELD) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 81 if (context != DRM_KERNEL_CONTEXT) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 82 DRM_ERROR("%d holds heavyweight lock\n",
1a75a222f5ca106 Daniel Vetter 2016-06-14 83 context);
1a75a222f5ca106 Daniel Vetter 2016-06-14 84 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 85 return 0;
1a75a222f5ca106 Daniel Vetter 2016-06-14 86 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 87 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 88
1a75a222f5ca106 Daniel Vetter 2016-06-14 89 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 90 /* Have lock */
1a75a222f5ca106 Daniel Vetter 2016-06-14 91 return 1;
1a75a222f5ca106 Daniel Vetter 2016-06-14 92 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 93 return 0;
1a75a222f5ca106 Daniel Vetter 2016-06-14 94 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 95
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 37481 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
@ 2020-11-13 5:05 ` kernel test robot
0 siblings, 0 replies; 28+ messages in thread
From: kernel test robot @ 2020-11-13 5:05 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Cc: Christophe Leroy, kbuild-all, Arnd Bergmann, Peter Zijlstra,
Boqun Feng, linux-kernel, Nicholas Piggin, Alexey Kardashevskiy,
Will Deacon
[-- Attachment #1: Type: text/plain, Size: 7933 bytes --]
Hi Nicholas,
I love your patch! Perhaps something to improve:
[auto build test WARNING on powerpc/next]
[also build test WARNING on asm-generic/master linus/master v5.10-rc3 next-20201112]
[cannot apply to scottwood/next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc64-randconfig-s031-20201111 (attached as .config)
compiler: powerpc-linux-gcc (GCC) 9.3.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.3-107-gaf3512a6-dirty
# https://github.com/0day-ci/linux/commit/9e1bec8fe216b0745c647e52c40d1f0033fb4efd
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
git checkout 9e1bec8fe216b0745c647e52c40d1f0033fb4efd
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=powerpc64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
"sparse warnings: (new ones prefixed by >>)"
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:75:24: sparse: expected void *ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: got unsigned int volatile *__ai_ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:75:24: sparse: expected void *ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:118:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:118:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:141:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:141:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:319:40: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:319:40: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: got unsigned int volatile *__ai_ptr
vim +75 drivers/gpu/drm/drm_lock.c
4ac5ec40ec70022 Daniel Vetter 2010-08-23 48
bd50d4a2168370b Benjamin Gaignard 2020-03-06 49 /*
1a75a222f5ca106 Daniel Vetter 2016-06-14 50 * Take the heavyweight lock.
1a75a222f5ca106 Daniel Vetter 2016-06-14 51 *
1a75a222f5ca106 Daniel Vetter 2016-06-14 52 * \param lock lock pointer.
1a75a222f5ca106 Daniel Vetter 2016-06-14 53 * \param context locking context.
1a75a222f5ca106 Daniel Vetter 2016-06-14 54 * \return one if the lock is held, or zero otherwise.
1a75a222f5ca106 Daniel Vetter 2016-06-14 55 *
1a75a222f5ca106 Daniel Vetter 2016-06-14 56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
1a75a222f5ca106 Daniel Vetter 2016-06-14 57 */
1a75a222f5ca106 Daniel Vetter 2016-06-14 58 static
1a75a222f5ca106 Daniel Vetter 2016-06-14 59 int drm_lock_take(struct drm_lock_data *lock_data,
1a75a222f5ca106 Daniel Vetter 2016-06-14 60 unsigned int context)
1a75a222f5ca106 Daniel Vetter 2016-06-14 61 {
1a75a222f5ca106 Daniel Vetter 2016-06-14 62 unsigned int old, new, prev;
1a75a222f5ca106 Daniel Vetter 2016-06-14 63 volatile unsigned int *lock = &lock_data->hw_lock->lock;
1a75a222f5ca106 Daniel Vetter 2016-06-14 64
1a75a222f5ca106 Daniel Vetter 2016-06-14 65 spin_lock_bh(&lock_data->spinlock);
1a75a222f5ca106 Daniel Vetter 2016-06-14 66 do {
1a75a222f5ca106 Daniel Vetter 2016-06-14 67 old = *lock;
1a75a222f5ca106 Daniel Vetter 2016-06-14 68 if (old & _DRM_LOCK_HELD)
1a75a222f5ca106 Daniel Vetter 2016-06-14 69 new = old | _DRM_LOCK_CONT;
1a75a222f5ca106 Daniel Vetter 2016-06-14 70 else {
1a75a222f5ca106 Daniel Vetter 2016-06-14 71 new = context | _DRM_LOCK_HELD |
1a75a222f5ca106 Daniel Vetter 2016-06-14 72 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
1a75a222f5ca106 Daniel Vetter 2016-06-14 73 _DRM_LOCK_CONT : 0);
1a75a222f5ca106 Daniel Vetter 2016-06-14 74 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 @75 prev = cmpxchg(lock, old, new);
1a75a222f5ca106 Daniel Vetter 2016-06-14 76 } while (prev != old);
1a75a222f5ca106 Daniel Vetter 2016-06-14 77 spin_unlock_bh(&lock_data->spinlock);
1a75a222f5ca106 Daniel Vetter 2016-06-14 78
1a75a222f5ca106 Daniel Vetter 2016-06-14 79 if (_DRM_LOCKING_CONTEXT(old) == context) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 80 if (old & _DRM_LOCK_HELD) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 81 if (context != DRM_KERNEL_CONTEXT) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 82 DRM_ERROR("%d holds heavyweight lock\n",
1a75a222f5ca106 Daniel Vetter 2016-06-14 83 context);
1a75a222f5ca106 Daniel Vetter 2016-06-14 84 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 85 return 0;
1a75a222f5ca106 Daniel Vetter 2016-06-14 86 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 87 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 88
1a75a222f5ca106 Daniel Vetter 2016-06-14 89 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 90 /* Have lock */
1a75a222f5ca106 Daniel Vetter 2016-06-14 91 return 1;
1a75a222f5ca106 Daniel Vetter 2016-06-14 92 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 93 return 0;
1a75a222f5ca106 Daniel Vetter 2016-06-14 94 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 95
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 37481 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
@ 2020-11-13 5:05 ` kernel test robot
0 siblings, 0 replies; 28+ messages in thread
From: kernel test robot @ 2020-11-13 5:05 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 8046 bytes --]
Hi Nicholas,
I love your patch! Perhaps something to improve:
[auto build test WARNING on powerpc/next]
[also build test WARNING on asm-generic/master linus/master v5.10-rc3 next-20201112]
[cannot apply to scottwood/next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc64-randconfig-s031-20201111 (attached as .config)
compiler: powerpc-linux-gcc (GCC) 9.3.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.3-107-gaf3512a6-dirty
# https://github.com/0day-ci/linux/commit/9e1bec8fe216b0745c647e52c40d1f0033fb4efd
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nicholas-Piggin/powerpc-convert-to-use-ARCH_ATOMIC/20201111-190941
git checkout 9e1bec8fe216b0745c647e52c40d1f0033fb4efd
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=powerpc64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
"sparse warnings: (new ones prefixed by >>)"
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:75:24: sparse: expected void *ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: got unsigned int volatile *__ai_ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:75:24: sparse: expected void *ptr
>> drivers/gpu/drm/drm_lock.c:75:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:118:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:118:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:118:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:141:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:141:24: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:141:24: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:319:40: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: got unsigned int volatile *__ai_ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: sparse: incorrect type in argument 1 (different modifiers) @@ expected void *ptr @@ got unsigned int volatile *__ai_ptr @@
drivers/gpu/drm/drm_lock.c:319:40: sparse: expected void *ptr
drivers/gpu/drm/drm_lock.c:319:40: sparse: got unsigned int volatile *__ai_ptr
vim +75 drivers/gpu/drm/drm_lock.c
4ac5ec40ec70022 Daniel Vetter 2010-08-23 48
bd50d4a2168370b Benjamin Gaignard 2020-03-06 49 /*
1a75a222f5ca106 Daniel Vetter 2016-06-14 50 * Take the heavyweight lock.
1a75a222f5ca106 Daniel Vetter 2016-06-14 51 *
1a75a222f5ca106 Daniel Vetter 2016-06-14 52 * \param lock lock pointer.
1a75a222f5ca106 Daniel Vetter 2016-06-14 53 * \param context locking context.
1a75a222f5ca106 Daniel Vetter 2016-06-14 54 * \return one if the lock is held, or zero otherwise.
1a75a222f5ca106 Daniel Vetter 2016-06-14 55 *
1a75a222f5ca106 Daniel Vetter 2016-06-14 56 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
1a75a222f5ca106 Daniel Vetter 2016-06-14 57 */
1a75a222f5ca106 Daniel Vetter 2016-06-14 58 static
1a75a222f5ca106 Daniel Vetter 2016-06-14 59 int drm_lock_take(struct drm_lock_data *lock_data,
1a75a222f5ca106 Daniel Vetter 2016-06-14 60 unsigned int context)
1a75a222f5ca106 Daniel Vetter 2016-06-14 61 {
1a75a222f5ca106 Daniel Vetter 2016-06-14 62 unsigned int old, new, prev;
1a75a222f5ca106 Daniel Vetter 2016-06-14 63 volatile unsigned int *lock = &lock_data->hw_lock->lock;
1a75a222f5ca106 Daniel Vetter 2016-06-14 64
1a75a222f5ca106 Daniel Vetter 2016-06-14 65 spin_lock_bh(&lock_data->spinlock);
1a75a222f5ca106 Daniel Vetter 2016-06-14 66 do {
1a75a222f5ca106 Daniel Vetter 2016-06-14 67 old = *lock;
1a75a222f5ca106 Daniel Vetter 2016-06-14 68 if (old & _DRM_LOCK_HELD)
1a75a222f5ca106 Daniel Vetter 2016-06-14 69 new = old | _DRM_LOCK_CONT;
1a75a222f5ca106 Daniel Vetter 2016-06-14 70 else {
1a75a222f5ca106 Daniel Vetter 2016-06-14 71 new = context | _DRM_LOCK_HELD |
1a75a222f5ca106 Daniel Vetter 2016-06-14 72 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
1a75a222f5ca106 Daniel Vetter 2016-06-14 73 _DRM_LOCK_CONT : 0);
1a75a222f5ca106 Daniel Vetter 2016-06-14 74 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 @75 prev = cmpxchg(lock, old, new);
1a75a222f5ca106 Daniel Vetter 2016-06-14 76 } while (prev != old);
1a75a222f5ca106 Daniel Vetter 2016-06-14 77 spin_unlock_bh(&lock_data->spinlock);
1a75a222f5ca106 Daniel Vetter 2016-06-14 78
1a75a222f5ca106 Daniel Vetter 2016-06-14 79 if (_DRM_LOCKING_CONTEXT(old) == context) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 80 if (old & _DRM_LOCK_HELD) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 81 if (context != DRM_KERNEL_CONTEXT) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 82 DRM_ERROR("%d holds heavyweight lock\n",
1a75a222f5ca106 Daniel Vetter 2016-06-14 83 context);
1a75a222f5ca106 Daniel Vetter 2016-06-14 84 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 85 return 0;
1a75a222f5ca106 Daniel Vetter 2016-06-14 86 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 87 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 88
1a75a222f5ca106 Daniel Vetter 2016-06-14 89 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
1a75a222f5ca106 Daniel Vetter 2016-06-14 90 /* Have lock */
1a75a222f5ca106 Daniel Vetter 2016-06-14 91 return 1;
1a75a222f5ca106 Daniel Vetter 2016-06-14 92 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 93 return 0;
1a75a222f5ca106 Daniel Vetter 2016-06-14 94 }
1a75a222f5ca106 Daniel Vetter 2016-06-14 95
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 37481 bytes --]
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
2020-11-11 11:07 ` Nicholas Piggin
@ 2020-11-13 15:30 ` Boqun Feng
-1 siblings, 0 replies; 28+ messages in thread
From: Boqun Feng @ 2020-11-13 15:30 UTC (permalink / raw)
To: Nicholas Piggin
Cc: linuxppc-dev, Will Deacon, Peter Zijlstra, Michael Ellerman,
Arnd Bergmann, Christophe Leroy, Alexey Kardashevskiy,
linux-kernel, linux-arch
Hi Nicholas,
On Wed, Nov 11, 2020 at 09:07:23PM +1000, Nicholas Piggin wrote:
> All the cool kids are doing it.
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> arch/powerpc/include/asm/atomic.h | 681 ++++++++++-------------------
> arch/powerpc/include/asm/cmpxchg.h | 62 +--
> 2 files changed, 248 insertions(+), 495 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
> index 8a55eb8cc97b..899aa2403ba7 100644
> --- a/arch/powerpc/include/asm/atomic.h
> +++ b/arch/powerpc/include/asm/atomic.h
> @@ -11,185 +11,285 @@
> #include <asm/cmpxchg.h>
> #include <asm/barrier.h>
>
> +#define ARCH_ATOMIC
> +
> +#ifndef CONFIG_64BIT
> +#include <asm-generic/atomic64.h>
> +#endif
> +
> /*
> * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
> * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
> * on the platform without lwsync.
> */
> #define __atomic_acquire_fence() \
> - __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
> + asm volatile(PPC_ACQUIRE_BARRIER "" : : : "memory")
>
> #define __atomic_release_fence() \
> - __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
> + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory")
>
> -static __inline__ int atomic_read(const atomic_t *v)
> -{
> - int t;
> +#define __atomic_pre_full_fence smp_mb
>
> - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
> +#define __atomic_post_full_fence smp_mb
>
Do you need to define __atomic_{pre,post}_full_fence for PPC? IIRC, they
are default smp_mb__{before,atomic}_atomic(), so are smp_mb() defautly
on PPC.
> - return t;
> +#define arch_atomic_read(v) __READ_ONCE((v)->counter)
> +#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
> +#ifdef CONFIG_64BIT
> +#define ATOMIC64_INIT(i) { (i) }
> +#define arch_atomic64_read(v) __READ_ONCE((v)->counter)
> +#define arch_atomic64_set(v, i) __WRITE_ONCE(((v)->counter), (i))
> +#endif
> +
[...]
>
> +#define ATOMIC_FETCH_OP_UNLESS_RELAXED(name, type, dtype, width, asm_op) \
> +static inline int arch_##name##_relaxed(type *v, dtype a, dtype u) \
I don't think we have atomic_fetch_*_unless_relaxed() at atomic APIs,
ditto for:
atomic_fetch_add_unless_relaxed()
atomic_inc_not_zero_relaxed()
atomic_dec_if_positive_relaxed()
, and we don't have the _acquire() and _release() variants for them
either, and if you don't define their fully-ordered version (e.g.
atomic_inc_not_zero()), atomic-arch-fallback.h will use read and cmpxchg
to implement them, and I think not what we want.
[...]
>
> #endif /* __KERNEL__ */
> #endif /* _ASM_POWERPC_ATOMIC_H_ */
> diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
> index cf091c4c22e5..181f7e8b3281 100644
> --- a/arch/powerpc/include/asm/cmpxchg.h
> +++ b/arch/powerpc/include/asm/cmpxchg.h
> @@ -192,7 +192,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
> (unsigned long)_x_, sizeof(*(ptr))); \
> })
>
> -#define xchg_relaxed(ptr, x) \
> +#define arch_xchg_relaxed(ptr, x) \
> ({ \
> __typeof__(*(ptr)) _x_ = (x); \
> (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
> @@ -448,35 +448,7 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
> return old;
> }
>
> -static __always_inline unsigned long
> -__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
> - unsigned int size)
> -{
> - switch (size) {
> - case 1:
> - return __cmpxchg_u8_acquire(ptr, old, new);
> - case 2:
> - return __cmpxchg_u16_acquire(ptr, old, new);
> - case 4:
> - return __cmpxchg_u32_acquire(ptr, old, new);
> -#ifdef CONFIG_PPC64
> - case 8:
> - return __cmpxchg_u64_acquire(ptr, old, new);
> -#endif
> - }
> - BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
> - return old;
> -}
> -#define cmpxchg(ptr, o, n) \
> - ({ \
> - __typeof__(*(ptr)) _o_ = (o); \
> - __typeof__(*(ptr)) _n_ = (n); \
> - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
> - (unsigned long)_n_, sizeof(*(ptr))); \
> - })
> -
> -
If you remove {atomic_}_cmpxchg_{,_acquire}() and use the version
provided by atomic-arch-fallback.h, then a fail cmpxchg or
cmpxchg_acquire() will still result into a full barrier or a acquire
barrier after the RMW operation, the barrier is not necessary and
probably this is not what we want?
Regards,
Boqun
> -#define cmpxchg_local(ptr, o, n) \
> +#define arch_cmpxchg_local(ptr, o, n) \
> ({ \
> __typeof__(*(ptr)) _o_ = (o); \
> __typeof__(*(ptr)) _n_ = (n); \
> @@ -484,7 +456,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
> (unsigned long)_n_, sizeof(*(ptr))); \
> })
>
> -#define cmpxchg_relaxed(ptr, o, n) \
> +#define arch_cmpxchg_relaxed(ptr, o, n) \
> ({ \
> __typeof__(*(ptr)) _o_ = (o); \
> __typeof__(*(ptr)) _n_ = (n); \
> @@ -493,38 +465,20 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
> sizeof(*(ptr))); \
> })
>
> -#define cmpxchg_acquire(ptr, o, n) \
> -({ \
> - __typeof__(*(ptr)) _o_ = (o); \
> - __typeof__(*(ptr)) _n_ = (n); \
> - (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
> - (unsigned long)_o_, (unsigned long)_n_, \
> - sizeof(*(ptr))); \
> -})
> #ifdef CONFIG_PPC64
> -#define cmpxchg64(ptr, o, n) \
> - ({ \
> - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
> - cmpxchg((ptr), (o), (n)); \
> - })
> -#define cmpxchg64_local(ptr, o, n) \
> +#define arch_cmpxchg64_local(ptr, o, n) \
> ({ \
> BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
> - cmpxchg_local((ptr), (o), (n)); \
> + arch_cmpxchg_local((ptr), (o), (n)); \
> })
> -#define cmpxchg64_relaxed(ptr, o, n) \
> -({ \
> - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
> - cmpxchg_relaxed((ptr), (o), (n)); \
> -})
> -#define cmpxchg64_acquire(ptr, o, n) \
> +#define arch_cmpxchg64_relaxed(ptr, o, n) \
> ({ \
> BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
> - cmpxchg_acquire((ptr), (o), (n)); \
> + arch_cmpxchg_relaxed((ptr), (o), (n)); \
> })
> #else
> #include <asm-generic/cmpxchg-local.h>
> -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
> +#define arch_cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
> #endif
>
> #endif /* __KERNEL__ */
> --
> 2.23.0
>
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
@ 2020-11-13 15:30 ` Boqun Feng
0 siblings, 0 replies; 28+ messages in thread
From: Boqun Feng @ 2020-11-13 15:30 UTC (permalink / raw)
To: Nicholas Piggin
Cc: Christophe Leroy, linux-arch, Arnd Bergmann, Peter Zijlstra,
linuxppc-dev, linux-kernel, Alexey Kardashevskiy, Will Deacon
Hi Nicholas,
On Wed, Nov 11, 2020 at 09:07:23PM +1000, Nicholas Piggin wrote:
> All the cool kids are doing it.
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> arch/powerpc/include/asm/atomic.h | 681 ++++++++++-------------------
> arch/powerpc/include/asm/cmpxchg.h | 62 +--
> 2 files changed, 248 insertions(+), 495 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
> index 8a55eb8cc97b..899aa2403ba7 100644
> --- a/arch/powerpc/include/asm/atomic.h
> +++ b/arch/powerpc/include/asm/atomic.h
> @@ -11,185 +11,285 @@
> #include <asm/cmpxchg.h>
> #include <asm/barrier.h>
>
> +#define ARCH_ATOMIC
> +
> +#ifndef CONFIG_64BIT
> +#include <asm-generic/atomic64.h>
> +#endif
> +
> /*
> * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
> * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
> * on the platform without lwsync.
> */
> #define __atomic_acquire_fence() \
> - __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
> + asm volatile(PPC_ACQUIRE_BARRIER "" : : : "memory")
>
> #define __atomic_release_fence() \
> - __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
> + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory")
>
> -static __inline__ int atomic_read(const atomic_t *v)
> -{
> - int t;
> +#define __atomic_pre_full_fence smp_mb
>
> - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
> +#define __atomic_post_full_fence smp_mb
>
Do you need to define __atomic_{pre,post}_full_fence for PPC? IIRC, they
are default smp_mb__{before,atomic}_atomic(), so are smp_mb() defautly
on PPC.
> - return t;
> +#define arch_atomic_read(v) __READ_ONCE((v)->counter)
> +#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
> +#ifdef CONFIG_64BIT
> +#define ATOMIC64_INIT(i) { (i) }
> +#define arch_atomic64_read(v) __READ_ONCE((v)->counter)
> +#define arch_atomic64_set(v, i) __WRITE_ONCE(((v)->counter), (i))
> +#endif
> +
[...]
>
> +#define ATOMIC_FETCH_OP_UNLESS_RELAXED(name, type, dtype, width, asm_op) \
> +static inline int arch_##name##_relaxed(type *v, dtype a, dtype u) \
I don't think we have atomic_fetch_*_unless_relaxed() at atomic APIs,
ditto for:
atomic_fetch_add_unless_relaxed()
atomic_inc_not_zero_relaxed()
atomic_dec_if_positive_relaxed()
, and we don't have the _acquire() and _release() variants for them
either, and if you don't define their fully-ordered version (e.g.
atomic_inc_not_zero()), atomic-arch-fallback.h will use read and cmpxchg
to implement them, and I think not what we want.
[...]
>
> #endif /* __KERNEL__ */
> #endif /* _ASM_POWERPC_ATOMIC_H_ */
> diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
> index cf091c4c22e5..181f7e8b3281 100644
> --- a/arch/powerpc/include/asm/cmpxchg.h
> +++ b/arch/powerpc/include/asm/cmpxchg.h
> @@ -192,7 +192,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
> (unsigned long)_x_, sizeof(*(ptr))); \
> })
>
> -#define xchg_relaxed(ptr, x) \
> +#define arch_xchg_relaxed(ptr, x) \
> ({ \
> __typeof__(*(ptr)) _x_ = (x); \
> (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
> @@ -448,35 +448,7 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
> return old;
> }
>
> -static __always_inline unsigned long
> -__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
> - unsigned int size)
> -{
> - switch (size) {
> - case 1:
> - return __cmpxchg_u8_acquire(ptr, old, new);
> - case 2:
> - return __cmpxchg_u16_acquire(ptr, old, new);
> - case 4:
> - return __cmpxchg_u32_acquire(ptr, old, new);
> -#ifdef CONFIG_PPC64
> - case 8:
> - return __cmpxchg_u64_acquire(ptr, old, new);
> -#endif
> - }
> - BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
> - return old;
> -}
> -#define cmpxchg(ptr, o, n) \
> - ({ \
> - __typeof__(*(ptr)) _o_ = (o); \
> - __typeof__(*(ptr)) _n_ = (n); \
> - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
> - (unsigned long)_n_, sizeof(*(ptr))); \
> - })
> -
> -
If you remove {atomic_}_cmpxchg_{,_acquire}() and use the version
provided by atomic-arch-fallback.h, then a fail cmpxchg or
cmpxchg_acquire() will still result into a full barrier or a acquire
barrier after the RMW operation, the barrier is not necessary and
probably this is not what we want?
Regards,
Boqun
> -#define cmpxchg_local(ptr, o, n) \
> +#define arch_cmpxchg_local(ptr, o, n) \
> ({ \
> __typeof__(*(ptr)) _o_ = (o); \
> __typeof__(*(ptr)) _n_ = (n); \
> @@ -484,7 +456,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
> (unsigned long)_n_, sizeof(*(ptr))); \
> })
>
> -#define cmpxchg_relaxed(ptr, o, n) \
> +#define arch_cmpxchg_relaxed(ptr, o, n) \
> ({ \
> __typeof__(*(ptr)) _o_ = (o); \
> __typeof__(*(ptr)) _n_ = (n); \
> @@ -493,38 +465,20 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
> sizeof(*(ptr))); \
> })
>
> -#define cmpxchg_acquire(ptr, o, n) \
> -({ \
> - __typeof__(*(ptr)) _o_ = (o); \
> - __typeof__(*(ptr)) _n_ = (n); \
> - (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
> - (unsigned long)_o_, (unsigned long)_n_, \
> - sizeof(*(ptr))); \
> -})
> #ifdef CONFIG_PPC64
> -#define cmpxchg64(ptr, o, n) \
> - ({ \
> - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
> - cmpxchg((ptr), (o), (n)); \
> - })
> -#define cmpxchg64_local(ptr, o, n) \
> +#define arch_cmpxchg64_local(ptr, o, n) \
> ({ \
> BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
> - cmpxchg_local((ptr), (o), (n)); \
> + arch_cmpxchg_local((ptr), (o), (n)); \
> })
> -#define cmpxchg64_relaxed(ptr, o, n) \
> -({ \
> - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
> - cmpxchg_relaxed((ptr), (o), (n)); \
> -})
> -#define cmpxchg64_acquire(ptr, o, n) \
> +#define arch_cmpxchg64_relaxed(ptr, o, n) \
> ({ \
> BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
> - cmpxchg_acquire((ptr), (o), (n)); \
> + arch_cmpxchg_relaxed((ptr), (o), (n)); \
> })
> #else
> #include <asm-generic/cmpxchg-local.h>
> -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
> +#define arch_cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
> #endif
>
> #endif /* __KERNEL__ */
> --
> 2.23.0
>
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC
2020-11-11 13:44 ` Peter Zijlstra
@ 2020-11-16 1:48 ` Nicholas Piggin
-1 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-16 1:48 UTC (permalink / raw)
To: Christophe Leroy, Peter Zijlstra
Cc: Alexey Kardashevskiy, Arnd Bergmann, Boqun Feng,
Christophe Leroy, linux-arch, linux-kernel, linuxppc-dev,
Michael Ellerman, Will Deacon
Excerpts from Peter Zijlstra's message of November 11, 2020 11:44 pm:
> On Wed, Nov 11, 2020 at 02:39:01PM +0100, Christophe Leroy wrote:
>> Hello,
>>
>> Le 11/11/2020 à 12:07, Nicholas Piggin a écrit :
>> > This passes atomic64 selftest on ppc32 on qemu (uniprocessor only)
>> > both before and after powerpc is converted to use ARCH_ATOMIC.
>>
>> Can you explain what this change does and why it is needed ?
>
> That certainly should've been in the Changelog. This enables atomic
> instrumentation, see asm-generic/atomic-instrumented.h. IOW, it makes
> atomic ops visible to K*SAN.
>
Right. This specific patch doesn't actually "do" anything except
allow generic atomic64 to be used with ARCH_ATOMIC. It does that
by re-naming some things to avoid name collisions and also providing
the arch_ prefix that ARCH_ATOMIC expects.
I don't know what should be in the changelog. I suppose the latter,
the former is discoverable by looking at ARCH_ATOMIC code and
patches but I guess it's polite and doesn't hurt to include the
former as well.
I'll send an update before long.
Thanks,
Nick
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC
@ 2020-11-16 1:48 ` Nicholas Piggin
0 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-11-16 1:48 UTC (permalink / raw)
To: Christophe Leroy, Peter Zijlstra
Cc: Christophe Leroy, linux-arch, Arnd Bergmann,
Alexey Kardashevskiy, Boqun Feng, linux-kernel, Will Deacon,
linuxppc-dev
Excerpts from Peter Zijlstra's message of November 11, 2020 11:44 pm:
> On Wed, Nov 11, 2020 at 02:39:01PM +0100, Christophe Leroy wrote:
>> Hello,
>>
>> Le 11/11/2020 à 12:07, Nicholas Piggin a écrit :
>> > This passes atomic64 selftest on ppc32 on qemu (uniprocessor only)
>> > both before and after powerpc is converted to use ARCH_ATOMIC.
>>
>> Can you explain what this change does and why it is needed ?
>
> That certainly should've been in the Changelog. This enables atomic
> instrumentation, see asm-generic/atomic-instrumented.h. IOW, it makes
> atomic ops visible to K*SAN.
>
Right. This specific patch doesn't actually "do" anything except
allow generic atomic64 to be used with ARCH_ATOMIC. It does that
by re-naming some things to avoid name collisions and also providing
the arch_ prefix that ARCH_ATOMIC expects.
I don't know what should be in the changelog. I suppose the latter,
the former is discoverable by looking at ARCH_ATOMIC code and
patches but I guess it's polite and doesn't hurt to include the
former as well.
I'll send an update before long.
Thanks,
Nick
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0/3] powerpc: convert to use ARCH_ATOMIC
2020-11-11 11:07 ` Nicholas Piggin
@ 2020-12-15 11:01 ` Michael Ellerman
-1 siblings, 0 replies; 28+ messages in thread
From: Michael Ellerman @ 2020-12-15 11:01 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Cc: Alexey Kardashevskiy, Boqun Feng, Michael Ellerman, linux-arch,
Christophe Leroy, Will Deacon, Peter Zijlstra, Arnd Bergmann,
linux-kernel
On Wed, 11 Nov 2020 21:07:20 +1000, Nicholas Piggin wrote:
> This conversion seems to require generic atomic64 changes, looks
> like nothing else uses ARCH_ATOMIC and GENERIC_ATOMIC64 yet.
>
> Thanks,
> Nick
>
> Nicholas Piggin (3):
> asm-generic/atomic64: Add support for ARCH_ATOMIC
> powerpc/64s/iommu: don't use atomic_ function on atomic64_t type
> powerpc: rewrite atomics to use ARCH_ATOMIC
>
> [...]
Patch 2 applied to powerpc/next.
[2/3] powerpc/64s/iommu: Don't use atomic_ function on atomic64_t type
https://git.kernel.org/powerpc/c/c33cd1ed60013ec2ae50f91fed260def5f1d9851
cheers
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0/3] powerpc: convert to use ARCH_ATOMIC
@ 2020-12-15 11:01 ` Michael Ellerman
0 siblings, 0 replies; 28+ messages in thread
From: Michael Ellerman @ 2020-12-15 11:01 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev
Cc: Christophe Leroy, linux-arch, Arnd Bergmann,
Alexey Kardashevskiy, Boqun Feng, linux-kernel, Peter Zijlstra,
Will Deacon
On Wed, 11 Nov 2020 21:07:20 +1000, Nicholas Piggin wrote:
> This conversion seems to require generic atomic64 changes, looks
> like nothing else uses ARCH_ATOMIC and GENERIC_ATOMIC64 yet.
>
> Thanks,
> Nick
>
> Nicholas Piggin (3):
> asm-generic/atomic64: Add support for ARCH_ATOMIC
> powerpc/64s/iommu: don't use atomic_ function on atomic64_t type
> powerpc: rewrite atomics to use ARCH_ATOMIC
>
> [...]
Patch 2 applied to powerpc/next.
[2/3] powerpc/64s/iommu: Don't use atomic_ function on atomic64_t type
https://git.kernel.org/powerpc/c/c33cd1ed60013ec2ae50f91fed260def5f1d9851
cheers
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
2020-11-13 15:30 ` Boqun Feng
@ 2020-12-22 3:52 ` Nicholas Piggin
-1 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-12-22 3:52 UTC (permalink / raw)
To: Boqun Feng
Cc: Alexey Kardashevskiy, Arnd Bergmann, Christophe Leroy,
linux-arch, linux-kernel, linuxppc-dev, Michael Ellerman,
Peter Zijlstra, Will Deacon
Excerpts from Boqun Feng's message of November 14, 2020 1:30 am:
> Hi Nicholas,
>
> On Wed, Nov 11, 2020 at 09:07:23PM +1000, Nicholas Piggin wrote:
>> All the cool kids are doing it.
>>
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> ---
>> arch/powerpc/include/asm/atomic.h | 681 ++++++++++-------------------
>> arch/powerpc/include/asm/cmpxchg.h | 62 +--
>> 2 files changed, 248 insertions(+), 495 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
>> index 8a55eb8cc97b..899aa2403ba7 100644
>> --- a/arch/powerpc/include/asm/atomic.h
>> +++ b/arch/powerpc/include/asm/atomic.h
>> @@ -11,185 +11,285 @@
>> #include <asm/cmpxchg.h>
>> #include <asm/barrier.h>
>>
>> +#define ARCH_ATOMIC
>> +
>> +#ifndef CONFIG_64BIT
>> +#include <asm-generic/atomic64.h>
>> +#endif
>> +
>> /*
>> * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
>> * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
>> * on the platform without lwsync.
>> */
>> #define __atomic_acquire_fence() \
>> - __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
>> + asm volatile(PPC_ACQUIRE_BARRIER "" : : : "memory")
>>
>> #define __atomic_release_fence() \
>> - __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
>> + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory")
>>
>> -static __inline__ int atomic_read(const atomic_t *v)
>> -{
>> - int t;
>> +#define __atomic_pre_full_fence smp_mb
>>
>> - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
>> +#define __atomic_post_full_fence smp_mb
>>
Thanks for the review.
> Do you need to define __atomic_{pre,post}_full_fence for PPC? IIRC, they
> are default smp_mb__{before,atomic}_atomic(), so are smp_mb() defautly
> on PPC.
Okay I didn't realise that's not required.
>> - return t;
>> +#define arch_atomic_read(v) __READ_ONCE((v)->counter)
>> +#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
>> +#ifdef CONFIG_64BIT
>> +#define ATOMIC64_INIT(i) { (i) }
>> +#define arch_atomic64_read(v) __READ_ONCE((v)->counter)
>> +#define arch_atomic64_set(v, i) __WRITE_ONCE(((v)->counter), (i))
>> +#endif
>> +
> [...]
>>
>> +#define ATOMIC_FETCH_OP_UNLESS_RELAXED(name, type, dtype, width, asm_op) \
>> +static inline int arch_##name##_relaxed(type *v, dtype a, dtype u) \
>
> I don't think we have atomic_fetch_*_unless_relaxed() at atomic APIs,
> ditto for:
>
> atomic_fetch_add_unless_relaxed()
> atomic_inc_not_zero_relaxed()
> atomic_dec_if_positive_relaxed()
>
> , and we don't have the _acquire() and _release() variants for them
> either, and if you don't define their fully-ordered version (e.g.
> atomic_inc_not_zero()), atomic-arch-fallback.h will use read and cmpxchg
> to implement them, and I think not what we want.
Okay. How can those be added? The atoimc generation is pretty
complicated.
> [...]
>>
>> #endif /* __KERNEL__ */
>> #endif /* _ASM_POWERPC_ATOMIC_H_ */
>> diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
>> index cf091c4c22e5..181f7e8b3281 100644
>> --- a/arch/powerpc/include/asm/cmpxchg.h
>> +++ b/arch/powerpc/include/asm/cmpxchg.h
>> @@ -192,7 +192,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
>> (unsigned long)_x_, sizeof(*(ptr))); \
>> })
>>
>> -#define xchg_relaxed(ptr, x) \
>> +#define arch_xchg_relaxed(ptr, x) \
>> ({ \
>> __typeof__(*(ptr)) _x_ = (x); \
>> (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
>> @@ -448,35 +448,7 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
>> return old;
>> }
>>
>> -static __always_inline unsigned long
>> -__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
>> - unsigned int size)
>> -{
>> - switch (size) {
>> - case 1:
>> - return __cmpxchg_u8_acquire(ptr, old, new);
>> - case 2:
>> - return __cmpxchg_u16_acquire(ptr, old, new);
>> - case 4:
>> - return __cmpxchg_u32_acquire(ptr, old, new);
>> -#ifdef CONFIG_PPC64
>> - case 8:
>> - return __cmpxchg_u64_acquire(ptr, old, new);
>> -#endif
>> - }
>> - BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
>> - return old;
>> -}
>> -#define cmpxchg(ptr, o, n) \
>> - ({ \
>> - __typeof__(*(ptr)) _o_ = (o); \
>> - __typeof__(*(ptr)) _n_ = (n); \
>> - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
>> - (unsigned long)_n_, sizeof(*(ptr))); \
>> - })
>> -
>> -
>
> If you remove {atomic_}_cmpxchg_{,_acquire}() and use the version
> provided by atomic-arch-fallback.h, then a fail cmpxchg or
> cmpxchg_acquire() will still result into a full barrier or a acquire
> barrier after the RMW operation, the barrier is not necessary and
> probably this is not what we want?
Why is that done? That seems like a very subtle difference. Shouldn't
the fallback version skip the barrier?
Thanks,
Nick
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
@ 2020-12-22 3:52 ` Nicholas Piggin
0 siblings, 0 replies; 28+ messages in thread
From: Nicholas Piggin @ 2020-12-22 3:52 UTC (permalink / raw)
To: Boqun Feng
Cc: Christophe Leroy, linux-arch, Arnd Bergmann,
Alexey Kardashevskiy, linux-kernel, Peter Zijlstra, Will Deacon,
linuxppc-dev
Excerpts from Boqun Feng's message of November 14, 2020 1:30 am:
> Hi Nicholas,
>
> On Wed, Nov 11, 2020 at 09:07:23PM +1000, Nicholas Piggin wrote:
>> All the cool kids are doing it.
>>
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> ---
>> arch/powerpc/include/asm/atomic.h | 681 ++++++++++-------------------
>> arch/powerpc/include/asm/cmpxchg.h | 62 +--
>> 2 files changed, 248 insertions(+), 495 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
>> index 8a55eb8cc97b..899aa2403ba7 100644
>> --- a/arch/powerpc/include/asm/atomic.h
>> +++ b/arch/powerpc/include/asm/atomic.h
>> @@ -11,185 +11,285 @@
>> #include <asm/cmpxchg.h>
>> #include <asm/barrier.h>
>>
>> +#define ARCH_ATOMIC
>> +
>> +#ifndef CONFIG_64BIT
>> +#include <asm-generic/atomic64.h>
>> +#endif
>> +
>> /*
>> * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
>> * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
>> * on the platform without lwsync.
>> */
>> #define __atomic_acquire_fence() \
>> - __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
>> + asm volatile(PPC_ACQUIRE_BARRIER "" : : : "memory")
>>
>> #define __atomic_release_fence() \
>> - __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
>> + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory")
>>
>> -static __inline__ int atomic_read(const atomic_t *v)
>> -{
>> - int t;
>> +#define __atomic_pre_full_fence smp_mb
>>
>> - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
>> +#define __atomic_post_full_fence smp_mb
>>
Thanks for the review.
> Do you need to define __atomic_{pre,post}_full_fence for PPC? IIRC, they
> are default smp_mb__{before,atomic}_atomic(), so are smp_mb() defautly
> on PPC.
Okay I didn't realise that's not required.
>> - return t;
>> +#define arch_atomic_read(v) __READ_ONCE((v)->counter)
>> +#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
>> +#ifdef CONFIG_64BIT
>> +#define ATOMIC64_INIT(i) { (i) }
>> +#define arch_atomic64_read(v) __READ_ONCE((v)->counter)
>> +#define arch_atomic64_set(v, i) __WRITE_ONCE(((v)->counter), (i))
>> +#endif
>> +
> [...]
>>
>> +#define ATOMIC_FETCH_OP_UNLESS_RELAXED(name, type, dtype, width, asm_op) \
>> +static inline int arch_##name##_relaxed(type *v, dtype a, dtype u) \
>
> I don't think we have atomic_fetch_*_unless_relaxed() at atomic APIs,
> ditto for:
>
> atomic_fetch_add_unless_relaxed()
> atomic_inc_not_zero_relaxed()
> atomic_dec_if_positive_relaxed()
>
> , and we don't have the _acquire() and _release() variants for them
> either, and if you don't define their fully-ordered version (e.g.
> atomic_inc_not_zero()), atomic-arch-fallback.h will use read and cmpxchg
> to implement them, and I think not what we want.
Okay. How can those be added? The atoimc generation is pretty
complicated.
> [...]
>>
>> #endif /* __KERNEL__ */
>> #endif /* _ASM_POWERPC_ATOMIC_H_ */
>> diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
>> index cf091c4c22e5..181f7e8b3281 100644
>> --- a/arch/powerpc/include/asm/cmpxchg.h
>> +++ b/arch/powerpc/include/asm/cmpxchg.h
>> @@ -192,7 +192,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
>> (unsigned long)_x_, sizeof(*(ptr))); \
>> })
>>
>> -#define xchg_relaxed(ptr, x) \
>> +#define arch_xchg_relaxed(ptr, x) \
>> ({ \
>> __typeof__(*(ptr)) _x_ = (x); \
>> (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
>> @@ -448,35 +448,7 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
>> return old;
>> }
>>
>> -static __always_inline unsigned long
>> -__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
>> - unsigned int size)
>> -{
>> - switch (size) {
>> - case 1:
>> - return __cmpxchg_u8_acquire(ptr, old, new);
>> - case 2:
>> - return __cmpxchg_u16_acquire(ptr, old, new);
>> - case 4:
>> - return __cmpxchg_u32_acquire(ptr, old, new);
>> -#ifdef CONFIG_PPC64
>> - case 8:
>> - return __cmpxchg_u64_acquire(ptr, old, new);
>> -#endif
>> - }
>> - BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
>> - return old;
>> -}
>> -#define cmpxchg(ptr, o, n) \
>> - ({ \
>> - __typeof__(*(ptr)) _o_ = (o); \
>> - __typeof__(*(ptr)) _n_ = (n); \
>> - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
>> - (unsigned long)_n_, sizeof(*(ptr))); \
>> - })
>> -
>> -
>
> If you remove {atomic_}_cmpxchg_{,_acquire}() and use the version
> provided by atomic-arch-fallback.h, then a fail cmpxchg or
> cmpxchg_acquire() will still result into a full barrier or a acquire
> barrier after the RMW operation, the barrier is not necessary and
> probably this is not what we want?
Why is that done? That seems like a very subtle difference. Shouldn't
the fallback version skip the barrier?
Thanks,
Nick
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
2020-12-22 3:52 ` Nicholas Piggin
@ 2020-12-23 2:45 ` Boqun Feng
-1 siblings, 0 replies; 28+ messages in thread
From: Boqun Feng @ 2020-12-23 2:45 UTC (permalink / raw)
To: Nicholas Piggin
Cc: Alexey Kardashevskiy, Arnd Bergmann, Christophe Leroy,
linux-arch, linux-kernel, linuxppc-dev, Michael Ellerman,
Peter Zijlstra, Will Deacon
On Tue, Dec 22, 2020 at 01:52:50PM +1000, Nicholas Piggin wrote:
> Excerpts from Boqun Feng's message of November 14, 2020 1:30 am:
> > Hi Nicholas,
> >
> > On Wed, Nov 11, 2020 at 09:07:23PM +1000, Nicholas Piggin wrote:
> >> All the cool kids are doing it.
> >>
> >> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> >> ---
> >> arch/powerpc/include/asm/atomic.h | 681 ++++++++++-------------------
> >> arch/powerpc/include/asm/cmpxchg.h | 62 +--
> >> 2 files changed, 248 insertions(+), 495 deletions(-)
> >>
> >> diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
> >> index 8a55eb8cc97b..899aa2403ba7 100644
> >> --- a/arch/powerpc/include/asm/atomic.h
> >> +++ b/arch/powerpc/include/asm/atomic.h
> >> @@ -11,185 +11,285 @@
> >> #include <asm/cmpxchg.h>
> >> #include <asm/barrier.h>
> >>
> >> +#define ARCH_ATOMIC
> >> +
> >> +#ifndef CONFIG_64BIT
> >> +#include <asm-generic/atomic64.h>
> >> +#endif
> >> +
> >> /*
> >> * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
> >> * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
> >> * on the platform without lwsync.
> >> */
> >> #define __atomic_acquire_fence() \
> >> - __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
> >> + asm volatile(PPC_ACQUIRE_BARRIER "" : : : "memory")
> >>
> >> #define __atomic_release_fence() \
> >> - __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
> >> + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory")
> >>
> >> -static __inline__ int atomic_read(const atomic_t *v)
> >> -{
> >> - int t;
> >> +#define __atomic_pre_full_fence smp_mb
> >>
> >> - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
> >> +#define __atomic_post_full_fence smp_mb
> >>
>
> Thanks for the review.
>
> > Do you need to define __atomic_{pre,post}_full_fence for PPC? IIRC, they
> > are default smp_mb__{before,atomic}_atomic(), so are smp_mb() defautly
> > on PPC.
>
> Okay I didn't realise that's not required.
>
> >> - return t;
> >> +#define arch_atomic_read(v) __READ_ONCE((v)->counter)
> >> +#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
> >> +#ifdef CONFIG_64BIT
> >> +#define ATOMIC64_INIT(i) { (i) }
> >> +#define arch_atomic64_read(v) __READ_ONCE((v)->counter)
> >> +#define arch_atomic64_set(v, i) __WRITE_ONCE(((v)->counter), (i))
> >> +#endif
> >> +
> > [...]
> >>
> >> +#define ATOMIC_FETCH_OP_UNLESS_RELAXED(name, type, dtype, width, asm_op) \
> >> +static inline int arch_##name##_relaxed(type *v, dtype a, dtype u) \
> >
> > I don't think we have atomic_fetch_*_unless_relaxed() at atomic APIs,
> > ditto for:
> >
> > atomic_fetch_add_unless_relaxed()
> > atomic_inc_not_zero_relaxed()
> > atomic_dec_if_positive_relaxed()
> >
> > , and we don't have the _acquire() and _release() variants for them
> > either, and if you don't define their fully-ordered version (e.g.
> > atomic_inc_not_zero()), atomic-arch-fallback.h will use read and cmpxchg
> > to implement them, and I think not what we want.
>
> Okay. How can those be added? The atoimc generation is pretty
> complicated.
>
Yeah, I know ;-) I think you can just implement and define fully-ordered
verions:
arch_atomic_fetch_*_unless()
arch_atomic_inc_not_zero()
arch_atomic_dec_if_postive()
, that should work.
Rules of atomic generation, IIRC:
1. If you define _relaxed, _acquire, _release or fully-ordered
version, atomic generation will use that version
2. If you define _relaxed, atomic generation will use that and
barriers to generate _acquire, _release and fully-ordered
versions, unless they are already defined (as Rule #1 says)
3. If you don't define _relaxed, but define the fully-ordered
version, atomic generation will use the fully-ordered version
and use it as _relaxed variants and generate the rest using Rule
#2.
> > [...]
> >>
> >> #endif /* __KERNEL__ */
> >> #endif /* _ASM_POWERPC_ATOMIC_H_ */
> >> diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
> >> index cf091c4c22e5..181f7e8b3281 100644
> >> --- a/arch/powerpc/include/asm/cmpxchg.h
> >> +++ b/arch/powerpc/include/asm/cmpxchg.h
> >> @@ -192,7 +192,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
> >> (unsigned long)_x_, sizeof(*(ptr))); \
> >> })
> >>
> >> -#define xchg_relaxed(ptr, x) \
> >> +#define arch_xchg_relaxed(ptr, x) \
> >> ({ \
> >> __typeof__(*(ptr)) _x_ = (x); \
> >> (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
> >> @@ -448,35 +448,7 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
> >> return old;
> >> }
> >>
> >> -static __always_inline unsigned long
> >> -__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
> >> - unsigned int size)
> >> -{
> >> - switch (size) {
> >> - case 1:
> >> - return __cmpxchg_u8_acquire(ptr, old, new);
> >> - case 2:
> >> - return __cmpxchg_u16_acquire(ptr, old, new);
> >> - case 4:
> >> - return __cmpxchg_u32_acquire(ptr, old, new);
> >> -#ifdef CONFIG_PPC64
> >> - case 8:
> >> - return __cmpxchg_u64_acquire(ptr, old, new);
> >> -#endif
> >> - }
> >> - BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
> >> - return old;
> >> -}
> >> -#define cmpxchg(ptr, o, n) \
> >> - ({ \
> >> - __typeof__(*(ptr)) _o_ = (o); \
> >> - __typeof__(*(ptr)) _n_ = (n); \
> >> - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
> >> - (unsigned long)_n_, sizeof(*(ptr))); \
> >> - })
> >> -
> >> -
> >
> > If you remove {atomic_}_cmpxchg_{,_acquire}() and use the version
> > provided by atomic-arch-fallback.h, then a fail cmpxchg or
> > cmpxchg_acquire() will still result into a full barrier or a acquire
> > barrier after the RMW operation, the barrier is not necessary and
> > probably this is not what we want?
>
> Why is that done? That seems like a very subtle difference. Shouldn't
> the fallback version skip the barrier?
>
The fallback version is something like:
smp_mb__before_atomic();
cmpxchg_relaxed();
smp_mb__after_atomic();
, so there will be a full barrier on PPC after the cmpxchg no matter
whether the cmpxchg succeed or not. And the fallback version cannot skip
the barrier, because there is no way the fallback version tells whether
the cmpxchg_relaxed() succeed or not. So in my previous version of PPC
atomic variants support, I defined cmpxchg_acquire() in asm header
instead of using atomic generation.
That said, now I think about this, maybe we can implement the fallback
version as:
smp_mb__before_atomic();
ret = cmpxchg_relaxed(ptr, old, new);
if (old == ret)
smp_mb__after_atomic();
, in this way, the fallback version can handle the barrier skipping
better.
Regards,
Boqun
> Thanks,
> Nick
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC
@ 2020-12-23 2:45 ` Boqun Feng
0 siblings, 0 replies; 28+ messages in thread
From: Boqun Feng @ 2020-12-23 2:45 UTC (permalink / raw)
To: Nicholas Piggin
Cc: Christophe Leroy, linux-arch, Arnd Bergmann,
Alexey Kardashevskiy, linux-kernel, Peter Zijlstra, Will Deacon,
linuxppc-dev
On Tue, Dec 22, 2020 at 01:52:50PM +1000, Nicholas Piggin wrote:
> Excerpts from Boqun Feng's message of November 14, 2020 1:30 am:
> > Hi Nicholas,
> >
> > On Wed, Nov 11, 2020 at 09:07:23PM +1000, Nicholas Piggin wrote:
> >> All the cool kids are doing it.
> >>
> >> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> >> ---
> >> arch/powerpc/include/asm/atomic.h | 681 ++++++++++-------------------
> >> arch/powerpc/include/asm/cmpxchg.h | 62 +--
> >> 2 files changed, 248 insertions(+), 495 deletions(-)
> >>
> >> diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
> >> index 8a55eb8cc97b..899aa2403ba7 100644
> >> --- a/arch/powerpc/include/asm/atomic.h
> >> +++ b/arch/powerpc/include/asm/atomic.h
> >> @@ -11,185 +11,285 @@
> >> #include <asm/cmpxchg.h>
> >> #include <asm/barrier.h>
> >>
> >> +#define ARCH_ATOMIC
> >> +
> >> +#ifndef CONFIG_64BIT
> >> +#include <asm-generic/atomic64.h>
> >> +#endif
> >> +
> >> /*
> >> * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
> >> * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
> >> * on the platform without lwsync.
> >> */
> >> #define __atomic_acquire_fence() \
> >> - __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
> >> + asm volatile(PPC_ACQUIRE_BARRIER "" : : : "memory")
> >>
> >> #define __atomic_release_fence() \
> >> - __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
> >> + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory")
> >>
> >> -static __inline__ int atomic_read(const atomic_t *v)
> >> -{
> >> - int t;
> >> +#define __atomic_pre_full_fence smp_mb
> >>
> >> - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
> >> +#define __atomic_post_full_fence smp_mb
> >>
>
> Thanks for the review.
>
> > Do you need to define __atomic_{pre,post}_full_fence for PPC? IIRC, they
> > are default smp_mb__{before,atomic}_atomic(), so are smp_mb() defautly
> > on PPC.
>
> Okay I didn't realise that's not required.
>
> >> - return t;
> >> +#define arch_atomic_read(v) __READ_ONCE((v)->counter)
> >> +#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
> >> +#ifdef CONFIG_64BIT
> >> +#define ATOMIC64_INIT(i) { (i) }
> >> +#define arch_atomic64_read(v) __READ_ONCE((v)->counter)
> >> +#define arch_atomic64_set(v, i) __WRITE_ONCE(((v)->counter), (i))
> >> +#endif
> >> +
> > [...]
> >>
> >> +#define ATOMIC_FETCH_OP_UNLESS_RELAXED(name, type, dtype, width, asm_op) \
> >> +static inline int arch_##name##_relaxed(type *v, dtype a, dtype u) \
> >
> > I don't think we have atomic_fetch_*_unless_relaxed() at atomic APIs,
> > ditto for:
> >
> > atomic_fetch_add_unless_relaxed()
> > atomic_inc_not_zero_relaxed()
> > atomic_dec_if_positive_relaxed()
> >
> > , and we don't have the _acquire() and _release() variants for them
> > either, and if you don't define their fully-ordered version (e.g.
> > atomic_inc_not_zero()), atomic-arch-fallback.h will use read and cmpxchg
> > to implement them, and I think not what we want.
>
> Okay. How can those be added? The atoimc generation is pretty
> complicated.
>
Yeah, I know ;-) I think you can just implement and define fully-ordered
verions:
arch_atomic_fetch_*_unless()
arch_atomic_inc_not_zero()
arch_atomic_dec_if_postive()
, that should work.
Rules of atomic generation, IIRC:
1. If you define _relaxed, _acquire, _release or fully-ordered
version, atomic generation will use that version
2. If you define _relaxed, atomic generation will use that and
barriers to generate _acquire, _release and fully-ordered
versions, unless they are already defined (as Rule #1 says)
3. If you don't define _relaxed, but define the fully-ordered
version, atomic generation will use the fully-ordered version
and use it as _relaxed variants and generate the rest using Rule
#2.
> > [...]
> >>
> >> #endif /* __KERNEL__ */
> >> #endif /* _ASM_POWERPC_ATOMIC_H_ */
> >> diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
> >> index cf091c4c22e5..181f7e8b3281 100644
> >> --- a/arch/powerpc/include/asm/cmpxchg.h
> >> +++ b/arch/powerpc/include/asm/cmpxchg.h
> >> @@ -192,7 +192,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
> >> (unsigned long)_x_, sizeof(*(ptr))); \
> >> })
> >>
> >> -#define xchg_relaxed(ptr, x) \
> >> +#define arch_xchg_relaxed(ptr, x) \
> >> ({ \
> >> __typeof__(*(ptr)) _x_ = (x); \
> >> (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
> >> @@ -448,35 +448,7 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
> >> return old;
> >> }
> >>
> >> -static __always_inline unsigned long
> >> -__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
> >> - unsigned int size)
> >> -{
> >> - switch (size) {
> >> - case 1:
> >> - return __cmpxchg_u8_acquire(ptr, old, new);
> >> - case 2:
> >> - return __cmpxchg_u16_acquire(ptr, old, new);
> >> - case 4:
> >> - return __cmpxchg_u32_acquire(ptr, old, new);
> >> -#ifdef CONFIG_PPC64
> >> - case 8:
> >> - return __cmpxchg_u64_acquire(ptr, old, new);
> >> -#endif
> >> - }
> >> - BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
> >> - return old;
> >> -}
> >> -#define cmpxchg(ptr, o, n) \
> >> - ({ \
> >> - __typeof__(*(ptr)) _o_ = (o); \
> >> - __typeof__(*(ptr)) _n_ = (n); \
> >> - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
> >> - (unsigned long)_n_, sizeof(*(ptr))); \
> >> - })
> >> -
> >> -
> >
> > If you remove {atomic_}_cmpxchg_{,_acquire}() and use the version
> > provided by atomic-arch-fallback.h, then a fail cmpxchg or
> > cmpxchg_acquire() will still result into a full barrier or a acquire
> > barrier after the RMW operation, the barrier is not necessary and
> > probably this is not what we want?
>
> Why is that done? That seems like a very subtle difference. Shouldn't
> the fallback version skip the barrier?
>
The fallback version is something like:
smp_mb__before_atomic();
cmpxchg_relaxed();
smp_mb__after_atomic();
, so there will be a full barrier on PPC after the cmpxchg no matter
whether the cmpxchg succeed or not. And the fallback version cannot skip
the barrier, because there is no way the fallback version tells whether
the cmpxchg_relaxed() succeed or not. So in my previous version of PPC
atomic variants support, I defined cmpxchg_acquire() in asm header
instead of using atomic generation.
That said, now I think about this, maybe we can implement the fallback
version as:
smp_mb__before_atomic();
ret = cmpxchg_relaxed(ptr, old, new);
if (old == ret)
smp_mb__after_atomic();
, in this way, the fallback version can handle the barrier skipping
better.
Regards,
Boqun
> Thanks,
> Nick
^ permalink raw reply [flat|nested] 28+ messages in thread
end of thread, other threads:[~2020-12-23 2:49 UTC | newest]
Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-11 11:07 [PATCH 0/3] powerpc: convert to use ARCH_ATOMIC Nicholas Piggin
2020-11-11 11:07 ` Nicholas Piggin
2020-11-11 11:07 ` [PATCH 1/3] asm-generic/atomic64: Add support for ARCH_ATOMIC Nicholas Piggin
2020-11-11 11:07 ` Nicholas Piggin
2020-11-11 13:39 ` Christophe Leroy
2020-11-11 13:39 ` Christophe Leroy
2020-11-11 13:44 ` Peter Zijlstra
2020-11-11 13:44 ` Peter Zijlstra
2020-11-16 1:48 ` Nicholas Piggin
2020-11-16 1:48 ` Nicholas Piggin
2020-11-11 11:07 ` [PATCH 2/3] powerpc/64s/iommu: don't use atomic_ function on atomic64_t type Nicholas Piggin
2020-11-11 11:07 ` Nicholas Piggin
2020-11-11 11:07 ` [PATCH 3/3] powerpc: rewrite atomics to use ARCH_ATOMIC Nicholas Piggin
2020-11-11 11:07 ` Nicholas Piggin
2020-11-11 19:07 ` kernel test robot
2020-11-11 19:07 ` kernel test robot
2020-11-11 19:07 ` kernel test robot
2020-11-13 5:05 ` kernel test robot
2020-11-13 5:05 ` kernel test robot
2020-11-13 5:05 ` kernel test robot
2020-11-13 15:30 ` Boqun Feng
2020-11-13 15:30 ` Boqun Feng
2020-12-22 3:52 ` Nicholas Piggin
2020-12-22 3:52 ` Nicholas Piggin
2020-12-23 2:45 ` Boqun Feng
2020-12-23 2:45 ` Boqun Feng
2020-12-15 11:01 ` [PATCH 0/3] powerpc: convert " Michael Ellerman
2020-12-15 11:01 ` Michael Ellerman
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.