All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32
@ 2022-08-02  9:02 ` Christophe Leroy
  0 siblings, 0 replies; 11+ messages in thread
From: Christophe Leroy @ 2022-08-02  9:02 UTC (permalink / raw)
  To: Michael Ellerman, Nicholas Piggin
  Cc: Christophe Leroy, linux-kernel, linuxppc-dev, Pali Rohár, stable

Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
in asm/bitops.h but failed to clear it for PPC32 in
asm/simple_spinlock.h

So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
returns 1 when CONFIG_PPC64 is set and 0 otherwise.

Reported-by: Pali Rohár <pali@kernel.org>
Fixes: 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/simple_spinlock.h | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 7ae6aeef8464..5095c636a680 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
 {
 	unsigned long tmp, token;
+	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
 
 	token = LOCK_TOKEN;
 	__asm__ __volatile__(
-"1:	lwarx		%0,0,%2,1\n\
+"1:	lwarx		%0,0,%2,%3\n\
 	cmpwi		0,%0,0\n\
 	bne-		2f\n\
 	stwcx.		%1,0,%2\n\
@@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
 	PPC_ACQUIRE_BARRIER
 "2:"
 	: "=&r" (tmp)
-	: "r" (token), "r" (&lock->slock)
+	: "r" (token), "r" (&lock->slock), "i" (eh)
 	: "cr0", "memory");
 
 	return tmp;
@@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 static inline long __arch_read_trylock(arch_rwlock_t *rw)
 {
 	long tmp;
+	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
 
 	__asm__ __volatile__(
-"1:	lwarx		%0,0,%1,1\n"
+"1:	lwarx		%0,0,%1,%2\n"
 	__DO_SIGN_EXTEND
 "	addic.		%0,%0,1\n\
 	ble-		2f\n"
@@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
 	bne-		1b\n"
 	PPC_ACQUIRE_BARRIER
 "2:"	: "=&r" (tmp)
-	: "r" (&rw->lock)
+	: "r" (&rw->lock), "i" (eh)
 	: "cr0", "xer", "memory");
 
 	return tmp;
@@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
 static inline long __arch_write_trylock(arch_rwlock_t *rw)
 {
 	long tmp, token;
+	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
 
 	token = WRLOCK_TOKEN;
 	__asm__ __volatile__(
-"1:	lwarx		%0,0,%2,1\n\
+"1:	lwarx		%0,0,%2,%3\n\
 	cmpwi		0,%0,0\n\
 	bne-		2f\n"
 "	stwcx.		%1,0,%2\n\
 	bne-		1b\n"
 	PPC_ACQUIRE_BARRIER
 "2:"	: "=&r" (tmp)
-	: "r" (token), "r" (&rw->lock)
+	: "r" (token), "r" (&rw->lock), "i" (eh)
 	: "cr0", "memory");
 
 	return tmp;
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32
@ 2022-08-02  9:02 ` Christophe Leroy
  0 siblings, 0 replies; 11+ messages in thread
From: Christophe Leroy @ 2022-08-02  9:02 UTC (permalink / raw)
  To: Michael Ellerman, Nicholas Piggin
  Cc: Pali Rohár, stable, linuxppc-dev, linux-kernel

Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
in asm/bitops.h but failed to clear it for PPC32 in
asm/simple_spinlock.h

So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
returns 1 when CONFIG_PPC64 is set and 0 otherwise.

Reported-by: Pali Rohár <pali@kernel.org>
Fixes: 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros")
Cc: stable@vger.kernel.org
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/simple_spinlock.h | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 7ae6aeef8464..5095c636a680 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
 {
 	unsigned long tmp, token;
+	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
 
 	token = LOCK_TOKEN;
 	__asm__ __volatile__(
-"1:	lwarx		%0,0,%2,1\n\
+"1:	lwarx		%0,0,%2,%3\n\
 	cmpwi		0,%0,0\n\
 	bne-		2f\n\
 	stwcx.		%1,0,%2\n\
@@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
 	PPC_ACQUIRE_BARRIER
 "2:"
 	: "=&r" (tmp)
-	: "r" (token), "r" (&lock->slock)
+	: "r" (token), "r" (&lock->slock), "i" (eh)
 	: "cr0", "memory");
 
 	return tmp;
@@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 static inline long __arch_read_trylock(arch_rwlock_t *rw)
 {
 	long tmp;
+	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
 
 	__asm__ __volatile__(
-"1:	lwarx		%0,0,%1,1\n"
+"1:	lwarx		%0,0,%1,%2\n"
 	__DO_SIGN_EXTEND
 "	addic.		%0,%0,1\n\
 	ble-		2f\n"
@@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
 	bne-		1b\n"
 	PPC_ACQUIRE_BARRIER
 "2:"	: "=&r" (tmp)
-	: "r" (&rw->lock)
+	: "r" (&rw->lock), "i" (eh)
 	: "cr0", "xer", "memory");
 
 	return tmp;
@@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
 static inline long __arch_write_trylock(arch_rwlock_t *rw)
 {
 	long tmp, token;
+	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
 
 	token = WRLOCK_TOKEN;
 	__asm__ __volatile__(
-"1:	lwarx		%0,0,%2,1\n\
+"1:	lwarx		%0,0,%2,%3\n\
 	cmpwi		0,%0,0\n\
 	bne-		2f\n"
 "	stwcx.		%1,0,%2\n\
 	bne-		1b\n"
 	PPC_ACQUIRE_BARRIER
 "2:"	: "=&r" (tmp)
-	: "r" (token), "r" (&rw->lock)
+	: "r" (token), "r" (&rw->lock), "i" (eh)
 	: "cr0", "memory");
 
 	return tmp;
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v1 2/3] powerpc: Don't hide eh field of lwarx behind a macro
  2022-08-02  9:02 ` Christophe Leroy
@ 2022-08-02  9:02   ` Christophe Leroy
  -1 siblings, 0 replies; 11+ messages in thread
From: Christophe Leroy @ 2022-08-02  9:02 UTC (permalink / raw)
  To: Michael Ellerman, Nicholas Piggin
  Cc: Christophe Leroy, linux-kernel, linuxppc-dev

The eh field must remain 0 for PPC32 and is only used
by PPC64.

Don't hide that behind a macro, just leave the responsibility
to the user.

At the time being, the only users of PPC_RAW_L{WDQ}ARX are
setting the eh field to 0, so the special handling of __PPC_EH
is useless. Just take the value given by the caller.

Same for DEFINE_TESTOP(), don't do special handling in that
macro, ensure the caller hands over the proper eh value.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/bitops.h     |  4 ++--
 arch/powerpc/include/asm/ppc-opcode.h | 11 +----------
 2 files changed, 3 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 344fba3b16eb..c2b8c53e0dcb 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -163,7 +163,7 @@ static inline unsigned long fn(			\
 	"bne- 1b\n"					\
 	postfix						\
 	: "=&r" (old), "=&r" (t)			\
-	: "rK" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0)	\
+	: "rK" (mask), "r" (p), "i" (eh)		\
 	: "cc", "memory");				\
 	return (old & mask);				\
 }
@@ -171,7 +171,7 @@ static inline unsigned long fn(			\
 DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
 	      PPC_ATOMIC_EXIT_BARRIER, 0)
 DEFINE_TESTOP(test_and_set_bits_lock, or, "",
-	      PPC_ACQUIRE_BARRIER, 1)
+	      PPC_ACQUIRE_BARRIER, IS_ENABLED(CONFIG_PPC64))
 DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
 	      PPC_ATOMIC_EXIT_BARRIER, 0)
 
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 7b81b37a191e..d9703c5fd713 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -343,6 +343,7 @@
 #define __PPC_SPR(r)	((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
 #define __PPC_RC21	(0x1 << 10)
 #define __PPC_PRFX_R(r)	(((r) & 0x1) << 20)
+#define __PPC_EH(eh)	(((eh) & 0x1) << 0)
 
 /*
  * Both low and high 16 bits are added as SIGNED additions, so if low 16 bits
@@ -359,16 +360,6 @@
 #define PPC_LI_MASK	0x03fffffc
 #define PPC_LI(v)	((v) & PPC_LI_MASK)
 
-/*
- * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
- * larx with EH set as an illegal instruction.
- */
-#ifdef CONFIG_PPC64
-#define __PPC_EH(eh)	(((eh) & 0x1) << 0)
-#else
-#define __PPC_EH(eh)	0
-#endif
-
 /* Base instruction encoding */
 #define PPC_RAW_CP_ABORT		(0x7c00068c)
 #define PPC_RAW_COPY(a, b)		(PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b))
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v1 2/3] powerpc: Don't hide eh field of lwarx behind a macro
@ 2022-08-02  9:02   ` Christophe Leroy
  0 siblings, 0 replies; 11+ messages in thread
From: Christophe Leroy @ 2022-08-02  9:02 UTC (permalink / raw)
  To: Michael Ellerman, Nicholas Piggin; +Cc: linuxppc-dev, linux-kernel

The eh field must remain 0 for PPC32 and is only used
by PPC64.

Don't hide that behind a macro, just leave the responsibility
to the user.

At the time being, the only users of PPC_RAW_L{WDQ}ARX are
setting the eh field to 0, so the special handling of __PPC_EH
is useless. Just take the value given by the caller.

Same for DEFINE_TESTOP(), don't do special handling in that
macro, ensure the caller hands over the proper eh value.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/bitops.h     |  4 ++--
 arch/powerpc/include/asm/ppc-opcode.h | 11 +----------
 2 files changed, 3 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 344fba3b16eb..c2b8c53e0dcb 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -163,7 +163,7 @@ static inline unsigned long fn(			\
 	"bne- 1b\n"					\
 	postfix						\
 	: "=&r" (old), "=&r" (t)			\
-	: "rK" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0)	\
+	: "rK" (mask), "r" (p), "i" (eh)		\
 	: "cc", "memory");				\
 	return (old & mask);				\
 }
@@ -171,7 +171,7 @@ static inline unsigned long fn(			\
 DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
 	      PPC_ATOMIC_EXIT_BARRIER, 0)
 DEFINE_TESTOP(test_and_set_bits_lock, or, "",
-	      PPC_ACQUIRE_BARRIER, 1)
+	      PPC_ACQUIRE_BARRIER, IS_ENABLED(CONFIG_PPC64))
 DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
 	      PPC_ATOMIC_EXIT_BARRIER, 0)
 
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 7b81b37a191e..d9703c5fd713 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -343,6 +343,7 @@
 #define __PPC_SPR(r)	((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
 #define __PPC_RC21	(0x1 << 10)
 #define __PPC_PRFX_R(r)	(((r) & 0x1) << 20)
+#define __PPC_EH(eh)	(((eh) & 0x1) << 0)
 
 /*
  * Both low and high 16 bits are added as SIGNED additions, so if low 16 bits
@@ -359,16 +360,6 @@
 #define PPC_LI_MASK	0x03fffffc
 #define PPC_LI(v)	((v) & PPC_LI_MASK)
 
-/*
- * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
- * larx with EH set as an illegal instruction.
- */
-#ifdef CONFIG_PPC64
-#define __PPC_EH(eh)	(((eh) & 0x1) << 0)
-#else
-#define __PPC_EH(eh)	0
-#endif
-
 /* Base instruction encoding */
 #define PPC_RAW_CP_ABORT		(0x7c00068c)
 #define PPC_RAW_COPY(a, b)		(PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b))
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v1 3/3] powerpc: Make eh value more explicit when using lwarx
  2022-08-02  9:02 ` Christophe Leroy
@ 2022-08-02  9:02   ` Christophe Leroy
  -1 siblings, 0 replies; 11+ messages in thread
From: Christophe Leroy @ 2022-08-02  9:02 UTC (permalink / raw)
  To: Michael Ellerman, Nicholas Piggin
  Cc: Christophe Leroy, linux-kernel, linuxppc-dev

Just like first patch of this series, define a local 'eh'
in order to make the code clearer.

And IS_ENABLED() returns either 1 or 0 so no need to
do IS_ENABLED(CONFIG_PPC64) ? 1 : 0.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/atomic.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 853dc86864f4..0204e77613ec 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -140,6 +140,7 @@ static __always_inline bool
 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
 {
 	int r, o = *old;
+	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
 
 	__asm__ __volatile__ (
 "1:	lwarx	%0,0,%2,%5	# atomic_try_cmpxchg_acquire		\n"
@@ -150,7 +151,7 @@ arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
 "\t"	PPC_ACQUIRE_BARRIER "						\n"
 "2:									\n"
 	: "=&r" (r), "+m" (v->counter)
-	: "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0)
+	: "r" (&v->counter), "r" (o), "r" (new), "i" (eh)
 	: "cr0", "memory");
 
 	if (unlikely(r != o))
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v1 3/3] powerpc: Make eh value more explicit when using lwarx
@ 2022-08-02  9:02   ` Christophe Leroy
  0 siblings, 0 replies; 11+ messages in thread
From: Christophe Leroy @ 2022-08-02  9:02 UTC (permalink / raw)
  To: Michael Ellerman, Nicholas Piggin; +Cc: linuxppc-dev, linux-kernel

Just like first patch of this series, define a local 'eh'
in order to make the code clearer.

And IS_ENABLED() returns either 1 or 0 so no need to
do IS_ENABLED(CONFIG_PPC64) ? 1 : 0.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/atomic.h | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 853dc86864f4..0204e77613ec 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -140,6 +140,7 @@ static __always_inline bool
 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
 {
 	int r, o = *old;
+	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
 
 	__asm__ __volatile__ (
 "1:	lwarx	%0,0,%2,%5	# atomic_try_cmpxchg_acquire		\n"
@@ -150,7 +151,7 @@ arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
 "\t"	PPC_ACQUIRE_BARRIER "						\n"
 "2:									\n"
 	: "=&r" (r), "+m" (v->counter)
-	: "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0)
+	: "r" (&v->counter), "r" (o), "r" (new), "i" (eh)
 	: "cr0", "memory");
 
 	if (unlikely(r != o))
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32
  2022-08-02  9:02 ` Christophe Leroy
@ 2022-08-02 18:13   ` Segher Boessenkool
  -1 siblings, 0 replies; 11+ messages in thread
From: Segher Boessenkool @ 2022-08-02 18:13 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Pali Rohár, linux-kernel, stable, Nicholas Piggin, linuxppc-dev

Hi!

On Tue, Aug 02, 2022 at 11:02:36AM +0200, Christophe Leroy wrote:
> Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
> PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
> in asm/bitops.h but failed to clear it for PPC32 in
> asm/simple_spinlock.h
> 
> So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
> but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
> returns 1 when CONFIG_PPC64 is set and 0 otherwise.
> 
> Reported-by: Pali Rohár <pali@kernel.org>

Reviewed-by: Segher Boessenkool <segher@kernel.crashing.org>

> +	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>  
>  	token = LOCK_TOKEN;
>  	__asm__ __volatile__(
> -"1:	lwarx		%0,0,%2,1\n\
> +"1:	lwarx		%0,0,%2,%3\n\
>  	cmpwi		0,%0,0\n\
>  	bne-		2f\n\
>  	stwcx.		%1,0,%2\n\
> @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
>  	PPC_ACQUIRE_BARRIER
>  "2:"
>  	: "=&r" (tmp)
> -	: "r" (token), "r" (&lock->slock)
> +	: "r" (token), "r" (&lock->slock), "i" (eh)
>  	: "cr0", "memory");

That should work yes.  But please note that "n" is prefered if a number
is required (like here), not some other constant, as allowed by "i".

Thanks!


Segher

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32
@ 2022-08-02 18:13   ` Segher Boessenkool
  0 siblings, 0 replies; 11+ messages in thread
From: Segher Boessenkool @ 2022-08-02 18:13 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Michael Ellerman, Nicholas Piggin, Pali Rohár, stable,
	linuxppc-dev, linux-kernel

Hi!

On Tue, Aug 02, 2022 at 11:02:36AM +0200, Christophe Leroy wrote:
> Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
> PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
> in asm/bitops.h but failed to clear it for PPC32 in
> asm/simple_spinlock.h
> 
> So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
> but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
> returns 1 when CONFIG_PPC64 is set and 0 otherwise.
> 
> Reported-by: Pali Rohár <pali@kernel.org>

Reviewed-by: Segher Boessenkool <segher@kernel.crashing.org>

> +	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>  
>  	token = LOCK_TOKEN;
>  	__asm__ __volatile__(
> -"1:	lwarx		%0,0,%2,1\n\
> +"1:	lwarx		%0,0,%2,%3\n\
>  	cmpwi		0,%0,0\n\
>  	bne-		2f\n\
>  	stwcx.		%1,0,%2\n\
> @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
>  	PPC_ACQUIRE_BARRIER
>  "2:"
>  	: "=&r" (tmp)
> -	: "r" (token), "r" (&lock->slock)
> +	: "r" (token), "r" (&lock->slock), "i" (eh)
>  	: "cr0", "memory");

That should work yes.  But please note that "n" is prefered if a number
is required (like here), not some other constant, as allowed by "i".

Thanks!


Segher

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32
  2022-08-02  9:02 ` Christophe Leroy
@ 2022-08-02 19:10   ` Pali Rohár
  -1 siblings, 0 replies; 11+ messages in thread
From: Pali Rohár @ 2022-08-02 19:10 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Michael Ellerman, Nicholas Piggin, linux-kernel, linuxppc-dev, stable

On Tuesday 02 August 2022 11:02:36 Christophe Leroy wrote:
> Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
> PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
> in asm/bitops.h but failed to clear it for PPC32 in
> asm/simple_spinlock.h
> 
> So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
> but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
> returns 1 when CONFIG_PPC64 is set and 0 otherwise.
> 
> Reported-by: Pali Rohár <pali@kernel.org>
> Fixes: 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros")
> Cc: stable@vger.kernel.org
> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>

This fix works perfectly. Thanks!

Tested-by: Pali Rohár <pali@kernel.org>

> ---
>  arch/powerpc/include/asm/simple_spinlock.h | 15 +++++++++------
>  1 file changed, 9 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
> index 7ae6aeef8464..5095c636a680 100644
> --- a/arch/powerpc/include/asm/simple_spinlock.h
> +++ b/arch/powerpc/include/asm/simple_spinlock.h
> @@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
>  static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
>  {
>  	unsigned long tmp, token;
> +	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>  
>  	token = LOCK_TOKEN;
>  	__asm__ __volatile__(
> -"1:	lwarx		%0,0,%2,1\n\
> +"1:	lwarx		%0,0,%2,%3\n\
>  	cmpwi		0,%0,0\n\
>  	bne-		2f\n\
>  	stwcx.		%1,0,%2\n\
> @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
>  	PPC_ACQUIRE_BARRIER
>  "2:"
>  	: "=&r" (tmp)
> -	: "r" (token), "r" (&lock->slock)
> +	: "r" (token), "r" (&lock->slock), "i" (eh)
>  	: "cr0", "memory");
>  
>  	return tmp;
> @@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
>  static inline long __arch_read_trylock(arch_rwlock_t *rw)
>  {
>  	long tmp;
> +	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>  
>  	__asm__ __volatile__(
> -"1:	lwarx		%0,0,%1,1\n"
> +"1:	lwarx		%0,0,%1,%2\n"
>  	__DO_SIGN_EXTEND
>  "	addic.		%0,%0,1\n\
>  	ble-		2f\n"
> @@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
>  	bne-		1b\n"
>  	PPC_ACQUIRE_BARRIER
>  "2:"	: "=&r" (tmp)
> -	: "r" (&rw->lock)
> +	: "r" (&rw->lock), "i" (eh)
>  	: "cr0", "xer", "memory");
>  
>  	return tmp;
> @@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
>  static inline long __arch_write_trylock(arch_rwlock_t *rw)
>  {
>  	long tmp, token;
> +	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>  
>  	token = WRLOCK_TOKEN;
>  	__asm__ __volatile__(
> -"1:	lwarx		%0,0,%2,1\n\
> +"1:	lwarx		%0,0,%2,%3\n\
>  	cmpwi		0,%0,0\n\
>  	bne-		2f\n"
>  "	stwcx.		%1,0,%2\n\
>  	bne-		1b\n"
>  	PPC_ACQUIRE_BARRIER
>  "2:"	: "=&r" (tmp)
> -	: "r" (token), "r" (&rw->lock)
> +	: "r" (token), "r" (&rw->lock), "i" (eh)
>  	: "cr0", "memory");
>  
>  	return tmp;
> -- 
> 2.36.1
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32
@ 2022-08-02 19:10   ` Pali Rohár
  0 siblings, 0 replies; 11+ messages in thread
From: Pali Rohár @ 2022-08-02 19:10 UTC (permalink / raw)
  To: Christophe Leroy; +Cc: stable, linuxppc-dev, linux-kernel, Nicholas Piggin

On Tuesday 02 August 2022 11:02:36 Christophe Leroy wrote:
> Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
> PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
> in asm/bitops.h but failed to clear it for PPC32 in
> asm/simple_spinlock.h
> 
> So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
> but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
> returns 1 when CONFIG_PPC64 is set and 0 otherwise.
> 
> Reported-by: Pali Rohár <pali@kernel.org>
> Fixes: 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros")
> Cc: stable@vger.kernel.org
> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>

This fix works perfectly. Thanks!

Tested-by: Pali Rohár <pali@kernel.org>

> ---
>  arch/powerpc/include/asm/simple_spinlock.h | 15 +++++++++------
>  1 file changed, 9 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
> index 7ae6aeef8464..5095c636a680 100644
> --- a/arch/powerpc/include/asm/simple_spinlock.h
> +++ b/arch/powerpc/include/asm/simple_spinlock.h
> @@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
>  static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
>  {
>  	unsigned long tmp, token;
> +	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>  
>  	token = LOCK_TOKEN;
>  	__asm__ __volatile__(
> -"1:	lwarx		%0,0,%2,1\n\
> +"1:	lwarx		%0,0,%2,%3\n\
>  	cmpwi		0,%0,0\n\
>  	bne-		2f\n\
>  	stwcx.		%1,0,%2\n\
> @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
>  	PPC_ACQUIRE_BARRIER
>  "2:"
>  	: "=&r" (tmp)
> -	: "r" (token), "r" (&lock->slock)
> +	: "r" (token), "r" (&lock->slock), "i" (eh)
>  	: "cr0", "memory");
>  
>  	return tmp;
> @@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
>  static inline long __arch_read_trylock(arch_rwlock_t *rw)
>  {
>  	long tmp;
> +	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>  
>  	__asm__ __volatile__(
> -"1:	lwarx		%0,0,%1,1\n"
> +"1:	lwarx		%0,0,%1,%2\n"
>  	__DO_SIGN_EXTEND
>  "	addic.		%0,%0,1\n\
>  	ble-		2f\n"
> @@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
>  	bne-		1b\n"
>  	PPC_ACQUIRE_BARRIER
>  "2:"	: "=&r" (tmp)
> -	: "r" (&rw->lock)
> +	: "r" (&rw->lock), "i" (eh)
>  	: "cr0", "xer", "memory");
>  
>  	return tmp;
> @@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
>  static inline long __arch_write_trylock(arch_rwlock_t *rw)
>  {
>  	long tmp, token;
> +	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
>  
>  	token = WRLOCK_TOKEN;
>  	__asm__ __volatile__(
> -"1:	lwarx		%0,0,%2,1\n\
> +"1:	lwarx		%0,0,%2,%3\n\
>  	cmpwi		0,%0,0\n\
>  	bne-		2f\n"
>  "	stwcx.		%1,0,%2\n\
>  	bne-		1b\n"
>  	PPC_ACQUIRE_BARRIER
>  "2:"	: "=&r" (tmp)
> -	: "r" (token), "r" (&rw->lock)
> +	: "r" (token), "r" (&rw->lock), "i" (eh)
>  	: "cr0", "memory");
>  
>  	return tmp;
> -- 
> 2.36.1
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32
  2022-08-02  9:02 ` Christophe Leroy
                   ` (4 preceding siblings ...)
  (?)
@ 2022-08-13 22:38 ` Michael Ellerman
  -1 siblings, 0 replies; 11+ messages in thread
From: Michael Ellerman @ 2022-08-13 22:38 UTC (permalink / raw)
  To: Michael Ellerman, Christophe Leroy, Nicholas Piggin
  Cc: linuxppc-dev, Pali Rohár, linux-kernel, stable

On Tue, 2 Aug 2022 11:02:36 +0200, Christophe Leroy wrote:
> Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of
> PPC_LWARX/LDARX macros") properly handled the eh field of lwarx
> in asm/bitops.h but failed to clear it for PPC32 in
> asm/simple_spinlock.h
> 
> So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64
> but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which
> returns 1 when CONFIG_PPC64 is set and 0 otherwise.
> 
> [...]

Applied to powerpc/fixes.

[1/3] powerpc: Fix eh field when calling lwarx on PPC32
      https://git.kernel.org/powerpc/c/18db466a9a306406dab3b134014d9f6ed642471c
[2/3] powerpc: Don't hide eh field of lwarx behind a macro
      https://git.kernel.org/powerpc/c/eb5a33ea31190c189ca4a59de4687b0877662c06
[3/3] powerpc: Make eh value more explicit when using lwarx
      https://git.kernel.org/powerpc/c/5cccf7a5215d12027e55e247907817631b413c28

cheers

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2022-08-13 22:39 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-02  9:02 [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32 Christophe Leroy
2022-08-02  9:02 ` Christophe Leroy
2022-08-02  9:02 ` [PATCH v1 2/3] powerpc: Don't hide eh field of lwarx behind a macro Christophe Leroy
2022-08-02  9:02   ` Christophe Leroy
2022-08-02  9:02 ` [PATCH v1 3/3] powerpc: Make eh value more explicit when using lwarx Christophe Leroy
2022-08-02  9:02   ` Christophe Leroy
2022-08-02 18:13 ` [PATCH v1 1/3] powerpc: Fix eh field when calling lwarx on PPC32 Segher Boessenkool
2022-08-02 18:13   ` Segher Boessenkool
2022-08-02 19:10 ` Pali Rohár
2022-08-02 19:10   ` Pali Rohár
2022-08-13 22:38 ` Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.