All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] Fix oops in shared-processor spinlocks
@ 2019-07-28 12:54 Christopher M. Riedl
  2019-07-28 12:54 ` [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR Christopher M. Riedl
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Christopher M. Riedl @ 2019-07-28 12:54 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Christopher M. Riedl, ajd

Fixes an oops when calling the shared-processor spinlock implementation
from a non-SP LPAR. Also take this opportunity to refactor
SHARED_PROCESSOR a bit.

Reference:  https://github.com/linuxppc/issues/issues/229

Christopher M. Riedl (3):
  powerpc/spinlocks: Refactor SHARED_PROCESSOR
  powerpc/spinlocks: Rename SPLPAR-only spinlocks
  powerpc/spinlocks: Fix oops in shared-processor spinlocks

 arch/powerpc/include/asm/spinlock.h | 59 ++++++++++++++++++++---------
 arch/powerpc/lib/locks.c            |  6 +--
 2 files changed, 45 insertions(+), 20 deletions(-)

-- 
2.22.0


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR
  2019-07-28 12:54 [PATCH 0/3] Fix oops in shared-processor spinlocks Christopher M. Riedl
@ 2019-07-28 12:54 ` Christopher M. Riedl
  2019-07-30 21:31   ` Thiago Jung Bauermann
  2019-08-01  3:20   ` Andrew Donnellan
  2019-07-28 12:54 ` [PATCH 2/3] powerpc/spinlocks: Rename SPLPAR-only spinlocks Christopher M. Riedl
  2019-07-28 12:54 ` [PATCH 3/3] powerpc/spinlock: Fix oops in shared-processor spinlocks Christopher M. Riedl
  2 siblings, 2 replies; 11+ messages in thread
From: Christopher M. Riedl @ 2019-07-28 12:54 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Christopher M. Riedl, ajd

Determining if a processor is in shared processor mode is not a constant
so don't hide it behind a #define.

Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>
---
 arch/powerpc/include/asm/spinlock.h | 21 +++++++++++++++------
 1 file changed, 15 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index a47f827bc5f1..8631b0b4e109 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -101,15 +101,24 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 #if defined(CONFIG_PPC_SPLPAR)
 /* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
 extern void __spin_yield(arch_spinlock_t *lock);
 extern void __rw_yield(arch_rwlock_t *lock);
 #else /* SPLPAR */
 #define __spin_yield(x)	barrier()
 #define __rw_yield(x)	barrier()
-#define SHARED_PROCESSOR	0
 #endif
 
+static inline bool is_shared_processor(void)
+{
+/* Only server processors have an lppaca struct */
+#ifdef CONFIG_PPC_BOOK3S
+	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
+		lppaca_shared_proc(local_paca->lppaca_ptr));
+#else
+	return false;
+#endif
+}
+
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
 	while (1) {
@@ -117,7 +126,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 			break;
 		do {
 			HMT_low();
-			if (SHARED_PROCESSOR)
+			if (is_shared_processor())
 				__spin_yield(lock);
 		} while (unlikely(lock->slock != 0));
 		HMT_medium();
@@ -136,7 +145,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 		local_irq_restore(flags);
 		do {
 			HMT_low();
-			if (SHARED_PROCESSOR)
+			if (is_shared_processor())
 				__spin_yield(lock);
 		} while (unlikely(lock->slock != 0));
 		HMT_medium();
@@ -226,7 +235,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 			break;
 		do {
 			HMT_low();
-			if (SHARED_PROCESSOR)
+			if (is_shared_processor())
 				__rw_yield(rw);
 		} while (unlikely(rw->lock < 0));
 		HMT_medium();
@@ -240,7 +249,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 			break;
 		do {
 			HMT_low();
-			if (SHARED_PROCESSOR)
+			if (is_shared_processor())
 				__rw_yield(rw);
 		} while (unlikely(rw->lock != 0));
 		HMT_medium();
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/3] powerpc/spinlocks: Rename SPLPAR-only spinlocks
  2019-07-28 12:54 [PATCH 0/3] Fix oops in shared-processor spinlocks Christopher M. Riedl
  2019-07-28 12:54 ` [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR Christopher M. Riedl
@ 2019-07-28 12:54 ` Christopher M. Riedl
  2019-08-01  3:27   ` Andrew Donnellan
  2019-07-28 12:54 ` [PATCH 3/3] powerpc/spinlock: Fix oops in shared-processor spinlocks Christopher M. Riedl
  2 siblings, 1 reply; 11+ messages in thread
From: Christopher M. Riedl @ 2019-07-28 12:54 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Christopher M. Riedl, ajd

The __rw_yield and __spin_yield locks only pertain to SPLPAR mode.
Rename them to make this relationship obvious.

Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>
---
 arch/powerpc/include/asm/spinlock.h | 6 ++++--
 arch/powerpc/lib/locks.c            | 6 +++---
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 8631b0b4e109..1e7721176f39 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -101,8 +101,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 #if defined(CONFIG_PPC_SPLPAR)
 /* We only yield to the hypervisor if we are in shared processor mode */
-extern void __spin_yield(arch_spinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
+void splpar_spin_yield(arch_spinlock_t *lock);
+void splpar_rw_yield(arch_rwlock_t *lock);
+#define __spin_yield(x) splpar_spin_yield(x)
+#define __rw_yield(x) splpar_rw_yield(x)
 #else /* SPLPAR */
 #define __spin_yield(x)	barrier()
 #define __rw_yield(x)	barrier()
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 6550b9e5ce5f..6440d5943c00 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -18,7 +18,7 @@
 #include <asm/hvcall.h>
 #include <asm/smp.h>
 
-void __spin_yield(arch_spinlock_t *lock)
+void splpar_spin_yield(arch_spinlock_t *lock)
 {
 	unsigned int lock_value, holder_cpu, yield_count;
 
@@ -36,14 +36,14 @@ void __spin_yield(arch_spinlock_t *lock)
 	plpar_hcall_norets(H_CONFER,
 		get_hard_smp_processor_id(holder_cpu), yield_count);
 }
-EXPORT_SYMBOL_GPL(__spin_yield);
+EXPORT_SYMBOL_GPL(splpar_spin_yield);
 
 /*
  * Waiting for a read lock or a write lock on a rwlock...
  * This turns out to be the same for read and write locks, since
  * we only know the holder if it is write-locked.
  */
-void __rw_yield(arch_rwlock_t *rw)
+void splpar_rw_yield(arch_rwlock_t *rw)
 {
 	int lock_value;
 	unsigned int holder_cpu, yield_count;
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/3] powerpc/spinlock: Fix oops in shared-processor spinlocks
  2019-07-28 12:54 [PATCH 0/3] Fix oops in shared-processor spinlocks Christopher M. Riedl
  2019-07-28 12:54 ` [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR Christopher M. Riedl
  2019-07-28 12:54 ` [PATCH 2/3] powerpc/spinlocks: Rename SPLPAR-only spinlocks Christopher M. Riedl
@ 2019-07-28 12:54 ` Christopher M. Riedl
  2019-08-01  3:33   ` Andrew Donnellan
  2 siblings, 1 reply; 11+ messages in thread
From: Christopher M. Riedl @ 2019-07-28 12:54 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Christopher M. Riedl, ajd

Booting w/ ppc64le_defconfig + CONFIG_PREEMPT results in the attached
kernel trace due to calling shared-processor spinlocks while not running
in an SPLPAR. Previously, the out-of-line spinlocks implementations were
selected based on CONFIG_PPC_SPLPAR at compile time without a runtime
shared-processor LPAR check.

To fix, call the actual spinlock implementations from a set of common
functions, spin_yield() and rw_yield(), which check for shared-processor
LPAR during runtime and select the appropriate lock implementation.

[    0.430878] BUG: Kernel NULL pointer dereference at 0x00000100
[    0.431991] Faulting instruction address: 0xc000000000097f88
[    0.432934] Oops: Kernel access of bad area, sig: 7 [#1]
[    0.433448] LE PAGE_SIZE=64K MMU=Radix MMU=Hash PREEMPT SMP NR_CPUS=2048 NUMA PowerNV
[    0.434479] Modules linked in:
[    0.435055] CPU: 0 PID: 2 Comm: kthreadd Not tainted 5.2.0-rc6-00491-g249155c20f9b #28
[    0.435730] NIP:  c000000000097f88 LR: c000000000c07a88 CTR: c00000000015ca10
[    0.436383] REGS: c0000000727079f0 TRAP: 0300   Not tainted  (5.2.0-rc6-00491-g249155c20f9b)
[    0.437004] MSR:  9000000002009033 <SF,HV,VEC,EE,ME,IR,DR,RI,LE>  CR: 84000424  XER: 20040000
[    0.437874] CFAR: c000000000c07a84 DAR: 0000000000000100 DSISR: 00080000 IRQMASK: 1
[    0.437874] GPR00: c000000000c07a88 c000000072707c80 c000000001546300 c00000007be38a80
[    0.437874] GPR04: c0000000726f0c00 0000000000000002 c00000007279c980 0000000000000100
[    0.437874] GPR08: c000000001581b78 0000000080000001 0000000000000008 c00000007279c9b0
[    0.437874] GPR12: 0000000000000000 c000000001730000 c000000000142558 0000000000000000
[    0.437874] GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
[    0.437874] GPR20: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
[    0.437874] GPR24: c00000007be38a80 c000000000c002f4 0000000000000000 0000000000000000
[    0.437874] GPR28: c000000072221a00 c0000000726c2600 c00000007be38a80 c00000007be38a80
[    0.443992] NIP [c000000000097f88] __spin_yield+0x48/0xa0
[    0.444523] LR [c000000000c07a88] __raw_spin_lock+0xb8/0xc0
[    0.445080] Call Trace:
[    0.445670] [c000000072707c80] [c000000072221a00] 0xc000000072221a00 (unreliable)
[    0.446425] [c000000072707cb0] [c000000000bffb0c] __schedule+0xbc/0x850
[    0.447078] [c000000072707d70] [c000000000c002f4] schedule+0x54/0x130
[    0.447694] [c000000072707da0] [c0000000001427dc] kthreadd+0x28c/0x2b0
[    0.448389] [c000000072707e20] [c00000000000c1cc] ret_from_kernel_thread+0x5c/0x70
[    0.449143] Instruction dump:
[    0.449821] 4d9e0020 552a043e 210a07ff 79080fe0 0b080000 3d020004 3908b878 794a1f24
[    0.450587] e8e80000 7ce7502a e8e70000 38e70100 <7ca03c2c> 70a70001 78a50020 4d820020
[    0.452808] ---[ end trace 474d6b2b8fc5cb7e ]---

Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>
---
 arch/powerpc/include/asm/spinlock.h | 36 ++++++++++++++++++++---------
 1 file changed, 25 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 1e7721176f39..8161809c6be1 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -103,11 +103,9 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 /* We only yield to the hypervisor if we are in shared processor mode */
 void splpar_spin_yield(arch_spinlock_t *lock);
 void splpar_rw_yield(arch_rwlock_t *lock);
-#define __spin_yield(x) splpar_spin_yield(x)
-#define __rw_yield(x) splpar_rw_yield(x)
 #else /* SPLPAR */
-#define __spin_yield(x)	barrier()
-#define __rw_yield(x)	barrier()
+#define splpar_spin_yield(lock)
+#define splpar_rw_yield(lock)
 #endif
 
 static inline bool is_shared_processor(void)
@@ -121,6 +119,22 @@ static inline bool is_shared_processor(void)
 #endif
 }
 
+static inline void spin_yield(arch_spinlock_t *lock)
+{
+	if (is_shared_processor())
+		splpar_spin_yield(lock);
+	else
+		barrier();
+}
+
+static inline void rw_yield(arch_rwlock_t *lock)
+{
+	if (is_shared_processor())
+		splpar_rw_yield(lock);
+	else
+		barrier();
+}
+
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
 	while (1) {
@@ -129,7 +143,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 		do {
 			HMT_low();
 			if (is_shared_processor())
-				__spin_yield(lock);
+				spin_yield(lock);
 		} while (unlikely(lock->slock != 0));
 		HMT_medium();
 	}
@@ -148,7 +162,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 		do {
 			HMT_low();
 			if (is_shared_processor())
-				__spin_yield(lock);
+				spin_yield(lock);
 		} while (unlikely(lock->slock != 0));
 		HMT_medium();
 		local_irq_restore(flags_dis);
@@ -238,7 +252,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 		do {
 			HMT_low();
 			if (is_shared_processor())
-				__rw_yield(rw);
+				rw_yield(rw);
 		} while (unlikely(rw->lock < 0));
 		HMT_medium();
 	}
@@ -252,7 +266,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 		do {
 			HMT_low();
 			if (is_shared_processor())
-				__rw_yield(rw);
+				rw_yield(rw);
 		} while (unlikely(rw->lock != 0));
 		HMT_medium();
 	}
@@ -292,9 +306,9 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 	rw->lock = 0;
 }
 
-#define arch_spin_relax(lock)	__spin_yield(lock)
-#define arch_read_relax(lock)	__rw_yield(lock)
-#define arch_write_relax(lock)	__rw_yield(lock)
+#define arch_spin_relax(lock)	spin_yield(lock)
+#define arch_read_relax(lock)	rw_yield(lock)
+#define arch_write_relax(lock)	rw_yield(lock)
 
 /* See include/linux/spinlock.h */
 #define smp_mb__after_spinlock()   smp_mb()
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR
  2019-07-28 12:54 ` [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR Christopher M. Riedl
@ 2019-07-30 21:31   ` Thiago Jung Bauermann
  2019-07-30 23:31     ` Christopher M Riedl
  2019-08-01  3:20   ` Andrew Donnellan
  1 sibling, 1 reply; 11+ messages in thread
From: Thiago Jung Bauermann @ 2019-07-30 21:31 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: linuxppc-dev, Christopher M. Riedl, ajd


Christopher M. Riedl <cmr@informatik.wtf> writes:

> Determining if a processor is in shared processor mode is not a constant
> so don't hide it behind a #define.
>
> Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>
> ---
>  arch/powerpc/include/asm/spinlock.h | 21 +++++++++++++++------
>  1 file changed, 15 insertions(+), 6 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
> index a47f827bc5f1..8631b0b4e109 100644
> --- a/arch/powerpc/include/asm/spinlock.h
> +++ b/arch/powerpc/include/asm/spinlock.h
> @@ -101,15 +101,24 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
>
>  #if defined(CONFIG_PPC_SPLPAR)
>  /* We only yield to the hypervisor if we are in shared processor mode */
> -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
>  extern void __spin_yield(arch_spinlock_t *lock);
>  extern void __rw_yield(arch_rwlock_t *lock);
>  #else /* SPLPAR */
>  #define __spin_yield(x)	barrier()
>  #define __rw_yield(x)	barrier()
> -#define SHARED_PROCESSOR	0
>  #endif
>
> +static inline bool is_shared_processor(void)
> +{
> +/* Only server processors have an lppaca struct */
> +#ifdef CONFIG_PPC_BOOK3S
> +	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
> +		lppaca_shared_proc(local_paca->lppaca_ptr));
> +#else
> +	return false;
> +#endif
> +}
> +

CONFIG_PPC_SPLPAR depends on CONFIG_PPC_PSERIES, which depends on
CONFIG_PPC_BOOK3S so the #ifdef above is unnecessary:

if CONFIG_PPC_BOOK3S is unset then CONFIG_PPC_SPLPAR will be unset as
well and the return expression should short-circuit to false.

--
Thiago Jung Bauermann
IBM Linux Technology Center

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR
  2019-07-30 21:31   ` Thiago Jung Bauermann
@ 2019-07-30 23:31     ` Christopher M Riedl
  2019-07-31  0:11       ` Thiago Jung Bauermann
  0 siblings, 1 reply; 11+ messages in thread
From: Christopher M Riedl @ 2019-07-30 23:31 UTC (permalink / raw)
  To: Thiago Jung Bauermann, linuxppc-dev; +Cc: linuxppc-dev, ajd

> On July 30, 2019 at 4:31 PM Thiago Jung Bauermann <bauerman@linux.ibm.com> wrote:
> 
> 
> 
> Christopher M. Riedl <cmr@informatik.wtf> writes:
> 
> > Determining if a processor is in shared processor mode is not a constant
> > so don't hide it behind a #define.
> >
> > Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>
> > ---
> >  arch/powerpc/include/asm/spinlock.h | 21 +++++++++++++++------
> >  1 file changed, 15 insertions(+), 6 deletions(-)
> >
> > diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
> > index a47f827bc5f1..8631b0b4e109 100644
> > --- a/arch/powerpc/include/asm/spinlock.h
> > +++ b/arch/powerpc/include/asm/spinlock.h
> > @@ -101,15 +101,24 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
> >
> >  #if defined(CONFIG_PPC_SPLPAR)
> >  /* We only yield to the hypervisor if we are in shared processor mode */
> > -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
> >  extern void __spin_yield(arch_spinlock_t *lock);
> >  extern void __rw_yield(arch_rwlock_t *lock);
> >  #else /* SPLPAR */
> >  #define __spin_yield(x)	barrier()
> >  #define __rw_yield(x)	barrier()
> > -#define SHARED_PROCESSOR	0
> >  #endif
> >
> > +static inline bool is_shared_processor(void)
> > +{
> > +/* Only server processors have an lppaca struct */
> > +#ifdef CONFIG_PPC_BOOK3S
> > +	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
> > +		lppaca_shared_proc(local_paca->lppaca_ptr));
> > +#else
> > +	return false;
> > +#endif
> > +}
> > +
> 
> CONFIG_PPC_SPLPAR depends on CONFIG_PPC_PSERIES, which depends on
> CONFIG_PPC_BOOK3S so the #ifdef above is unnecessary:
> 
> if CONFIG_PPC_BOOK3S is unset then CONFIG_PPC_SPLPAR will be unset as
> well and the return expression should short-circuit to false.
> 

Agreed, but the #ifdef is necessary to compile platforms which include
this header but do not implement lppaca_shared_proc(...) and friends.
I can reword the comment if that helps.

> --
> Thiago Jung Bauermann
> IBM Linux Technology Center

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR
  2019-07-30 23:31     ` Christopher M Riedl
@ 2019-07-31  0:11       ` Thiago Jung Bauermann
  2019-07-31  2:36         ` Christopher M Riedl
  0 siblings, 1 reply; 11+ messages in thread
From: Thiago Jung Bauermann @ 2019-07-31  0:11 UTC (permalink / raw)
  To: Christopher M Riedl; +Cc: linuxppc-dev, linuxppc-dev, ajd


Christopher M Riedl <cmr@informatik.wtf> writes:

>> On July 30, 2019 at 4:31 PM Thiago Jung Bauermann <bauerman@linux.ibm.com> wrote:
>>
>>
>>
>> Christopher M. Riedl <cmr@informatik.wtf> writes:
>>
>> > Determining if a processor is in shared processor mode is not a constant
>> > so don't hide it behind a #define.
>> >
>> > Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>
>> > ---
>> >  arch/powerpc/include/asm/spinlock.h | 21 +++++++++++++++------
>> >  1 file changed, 15 insertions(+), 6 deletions(-)
>> >
>> > diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
>> > index a47f827bc5f1..8631b0b4e109 100644
>> > --- a/arch/powerpc/include/asm/spinlock.h
>> > +++ b/arch/powerpc/include/asm/spinlock.h
>> > @@ -101,15 +101,24 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
>> >
>> >  #if defined(CONFIG_PPC_SPLPAR)
>> >  /* We only yield to the hypervisor if we are in shared processor mode */
>> > -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
>> >  extern void __spin_yield(arch_spinlock_t *lock);
>> >  extern void __rw_yield(arch_rwlock_t *lock);
>> >  #else /* SPLPAR */
>> >  #define __spin_yield(x)	barrier()
>> >  #define __rw_yield(x)	barrier()
>> > -#define SHARED_PROCESSOR	0
>> >  #endif
>> >
>> > +static inline bool is_shared_processor(void)
>> > +{
>> > +/* Only server processors have an lppaca struct */
>> > +#ifdef CONFIG_PPC_BOOK3S
>> > +	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
>> > +		lppaca_shared_proc(local_paca->lppaca_ptr));
>> > +#else
>> > +	return false;
>> > +#endif
>> > +}
>> > +
>>
>> CONFIG_PPC_SPLPAR depends on CONFIG_PPC_PSERIES, which depends on
>> CONFIG_PPC_BOOK3S so the #ifdef above is unnecessary:
>>
>> if CONFIG_PPC_BOOK3S is unset then CONFIG_PPC_SPLPAR will be unset as
>> well and the return expression should short-circuit to false.
>>
>
> Agreed, but the #ifdef is necessary to compile platforms which include
> this header but do not implement lppaca_shared_proc(...) and friends.
> I can reword the comment if that helps.

Ah, indeed. Yes, if you could mention that in the commit I think it
would help. These #ifdefs are becoming démodé so it's good to know why
they're there.

Another alternative is to provide a dummy lppaca_shared_proc() which
always returns false when CONFIG_PPC_BOOK3S isn't set (just mentioning
it, I don't have a preference).

--
Thiago Jung Bauermann
IBM Linux Technology Center

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR
  2019-07-31  0:11       ` Thiago Jung Bauermann
@ 2019-07-31  2:36         ` Christopher M Riedl
  0 siblings, 0 replies; 11+ messages in thread
From: Christopher M Riedl @ 2019-07-31  2:36 UTC (permalink / raw)
  To: Thiago Jung Bauermann; +Cc: linuxppc-dev, linuxppc-dev, ajd


> On July 30, 2019 at 7:11 PM Thiago Jung Bauermann <bauerman@linux.ibm.com> wrote:
> 
> 
> 
> Christopher M Riedl <cmr@informatik.wtf> writes:
> 
> >> On July 30, 2019 at 4:31 PM Thiago Jung Bauermann <bauerman@linux.ibm.com> wrote:
> >>
> >>
> >>
> >> Christopher M. Riedl <cmr@informatik.wtf> writes:
> >>
> >> > Determining if a processor is in shared processor mode is not a constant
> >> > so don't hide it behind a #define.
> >> >
> >> > Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>
> >> > ---
> >> >  arch/powerpc/include/asm/spinlock.h | 21 +++++++++++++++------
> >> >  1 file changed, 15 insertions(+), 6 deletions(-)
> >> >
> >> > diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
> >> > index a47f827bc5f1..8631b0b4e109 100644
> >> > --- a/arch/powerpc/include/asm/spinlock.h
> >> > +++ b/arch/powerpc/include/asm/spinlock.h
> >> > @@ -101,15 +101,24 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
> >> >
> >> >  #if defined(CONFIG_PPC_SPLPAR)
> >> >  /* We only yield to the hypervisor if we are in shared processor mode */
> >> > -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
> >> >  extern void __spin_yield(arch_spinlock_t *lock);
> >> >  extern void __rw_yield(arch_rwlock_t *lock);
> >> >  #else /* SPLPAR */
> >> >  #define __spin_yield(x)	barrier()
> >> >  #define __rw_yield(x)	barrier()
> >> > -#define SHARED_PROCESSOR	0
> >> >  #endif
> >> >
> >> > +static inline bool is_shared_processor(void)
> >> > +{
> >> > +/* Only server processors have an lppaca struct */
> >> > +#ifdef CONFIG_PPC_BOOK3S
> >> > +	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
> >> > +		lppaca_shared_proc(local_paca->lppaca_ptr));
> >> > +#else
> >> > +	return false;
> >> > +#endif
> >> > +}
> >> > +
> >>
> >> CONFIG_PPC_SPLPAR depends on CONFIG_PPC_PSERIES, which depends on
> >> CONFIG_PPC_BOOK3S so the #ifdef above is unnecessary:
> >>
> >> if CONFIG_PPC_BOOK3S is unset then CONFIG_PPC_SPLPAR will be unset as
> >> well and the return expression should short-circuit to false.
> >>
> >
> > Agreed, but the #ifdef is necessary to compile platforms which include
> > this header but do not implement lppaca_shared_proc(...) and friends.
> > I can reword the comment if that helps.
> 
> Ah, indeed. Yes, if you could mention that in the commit I think it
> would help. These #ifdefs are becoming démodé so it's good to know why
> they're there.
> 
> Another alternative is to provide a dummy lppaca_shared_proc() which
> always returns false when CONFIG_PPC_BOOK3S isn't set (just mentioning
> it, I don't have a preference).
> 

Yeah, I tried that first, but the declaration and definition for lppaca_shared_proc()
and arguments are nested within several includes and arch/platform #ifdefs that I
decided the #ifdef in is_shared_processor() is simpler.
I am not sure if unraveling all that makes sense for implementing this fix, maybe
someone can convince me hah.

In any case, next version will have an improved commit message and comment.

> --
> Thiago Jung Bauermann
> IBM Linux Technology Center

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR
  2019-07-28 12:54 ` [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR Christopher M. Riedl
  2019-07-30 21:31   ` Thiago Jung Bauermann
@ 2019-08-01  3:20   ` Andrew Donnellan
  1 sibling, 0 replies; 11+ messages in thread
From: Andrew Donnellan @ 2019-08-01  3:20 UTC (permalink / raw)
  To: Christopher M. Riedl, linuxppc-dev

On 28/7/19 10:54 pm, Christopher M. Riedl wrote:
> Determining if a processor is in shared processor mode is not a constant
> so don't hide it behind a #define.
> 
> Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>

This seems aesthetically more right.

Reviewed-by: Andrew Donnellan <ajd@linux.ibm.com>

> ---
>   arch/powerpc/include/asm/spinlock.h | 21 +++++++++++++++------
>   1 file changed, 15 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
> index a47f827bc5f1..8631b0b4e109 100644
> --- a/arch/powerpc/include/asm/spinlock.h
> +++ b/arch/powerpc/include/asm/spinlock.h
> @@ -101,15 +101,24 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
>   
>   #if defined(CONFIG_PPC_SPLPAR)
>   /* We only yield to the hypervisor if we are in shared processor mode */
> -#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
>   extern void __spin_yield(arch_spinlock_t *lock);
>   extern void __rw_yield(arch_rwlock_t *lock);
>   #else /* SPLPAR */
>   #define __spin_yield(x)	barrier()
>   #define __rw_yield(x)	barrier()
> -#define SHARED_PROCESSOR	0
>   #endif
>   
> +static inline bool is_shared_processor(void)
> +{
> +/* Only server processors have an lppaca struct */
> +#ifdef CONFIG_PPC_BOOK3S
> +	return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
> +		lppaca_shared_proc(local_paca->lppaca_ptr));
> +#else
> +	return false;
> +#endif
> +}
> +
>   static inline void arch_spin_lock(arch_spinlock_t *lock)
>   {
>   	while (1) {
> @@ -117,7 +126,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
>   			break;
>   		do {
>   			HMT_low();
> -			if (SHARED_PROCESSOR)
> +			if (is_shared_processor())
>   				__spin_yield(lock);
>   		} while (unlikely(lock->slock != 0));
>   		HMT_medium();
> @@ -136,7 +145,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
>   		local_irq_restore(flags);
>   		do {
>   			HMT_low();
> -			if (SHARED_PROCESSOR)
> +			if (is_shared_processor())
>   				__spin_yield(lock);
>   		} while (unlikely(lock->slock != 0));
>   		HMT_medium();
> @@ -226,7 +235,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
>   			break;
>   		do {
>   			HMT_low();
> -			if (SHARED_PROCESSOR)
> +			if (is_shared_processor())
>   				__rw_yield(rw);
>   		} while (unlikely(rw->lock < 0));
>   		HMT_medium();
> @@ -240,7 +249,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
>   			break;
>   		do {
>   			HMT_low();
> -			if (SHARED_PROCESSOR)
> +			if (is_shared_processor())
>   				__rw_yield(rw);
>   		} while (unlikely(rw->lock != 0));
>   		HMT_medium();
> 

-- 
Andrew Donnellan              OzLabs, ADL Canberra
ajd@linux.ibm.com             IBM Australia Limited


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/3] powerpc/spinlocks: Rename SPLPAR-only spinlocks
  2019-07-28 12:54 ` [PATCH 2/3] powerpc/spinlocks: Rename SPLPAR-only spinlocks Christopher M. Riedl
@ 2019-08-01  3:27   ` Andrew Donnellan
  0 siblings, 0 replies; 11+ messages in thread
From: Andrew Donnellan @ 2019-08-01  3:27 UTC (permalink / raw)
  To: Christopher M. Riedl, linuxppc-dev

On 28/7/19 10:54 pm, Christopher M. Riedl wrote:
> The __rw_yield and __spin_yield locks only pertain to SPLPAR mode.
> Rename them to make this relationship obvious.
> 
> Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>

Reviewed-by: Andrew Donnellan <ajd@linux.ibm.com>

> ---
>   arch/powerpc/include/asm/spinlock.h | 6 ++++--
>   arch/powerpc/lib/locks.c            | 6 +++---
>   2 files changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
> index 8631b0b4e109..1e7721176f39 100644
> --- a/arch/powerpc/include/asm/spinlock.h
> +++ b/arch/powerpc/include/asm/spinlock.h
> @@ -101,8 +101,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
>   
>   #if defined(CONFIG_PPC_SPLPAR)
>   /* We only yield to the hypervisor if we are in shared processor mode */
> -extern void __spin_yield(arch_spinlock_t *lock);
> -extern void __rw_yield(arch_rwlock_t *lock);
> +void splpar_spin_yield(arch_spinlock_t *lock);
> +void splpar_rw_yield(arch_rwlock_t *lock);
> +#define __spin_yield(x) splpar_spin_yield(x)
> +#define __rw_yield(x) splpar_rw_yield(x)
>   #else /* SPLPAR */
>   #define __spin_yield(x)	barrier()
>   #define __rw_yield(x)	barrier()
> diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
> index 6550b9e5ce5f..6440d5943c00 100644
> --- a/arch/powerpc/lib/locks.c
> +++ b/arch/powerpc/lib/locks.c
> @@ -18,7 +18,7 @@
>   #include <asm/hvcall.h>
>   #include <asm/smp.h>
>   
> -void __spin_yield(arch_spinlock_t *lock)
> +void splpar_spin_yield(arch_spinlock_t *lock)
>   {
>   	unsigned int lock_value, holder_cpu, yield_count;
>   
> @@ -36,14 +36,14 @@ void __spin_yield(arch_spinlock_t *lock)
>   	plpar_hcall_norets(H_CONFER,
>   		get_hard_smp_processor_id(holder_cpu), yield_count);
>   }
> -EXPORT_SYMBOL_GPL(__spin_yield);
> +EXPORT_SYMBOL_GPL(splpar_spin_yield);
>   
>   /*
>    * Waiting for a read lock or a write lock on a rwlock...
>    * This turns out to be the same for read and write locks, since
>    * we only know the holder if it is write-locked.
>    */
> -void __rw_yield(arch_rwlock_t *rw)
> +void splpar_rw_yield(arch_rwlock_t *rw)
>   {
>   	int lock_value;
>   	unsigned int holder_cpu, yield_count;
> 

-- 
Andrew Donnellan              OzLabs, ADL Canberra
ajd@linux.ibm.com             IBM Australia Limited


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/3] powerpc/spinlock: Fix oops in shared-processor spinlocks
  2019-07-28 12:54 ` [PATCH 3/3] powerpc/spinlock: Fix oops in shared-processor spinlocks Christopher M. Riedl
@ 2019-08-01  3:33   ` Andrew Donnellan
  0 siblings, 0 replies; 11+ messages in thread
From: Andrew Donnellan @ 2019-08-01  3:33 UTC (permalink / raw)
  To: Christopher M. Riedl, linuxppc-dev

On 28/7/19 10:54 pm, Christopher M. Riedl wrote:
> Booting w/ ppc64le_defconfig + CONFIG_PREEMPT results in the attached
> kernel trace due to calling shared-processor spinlocks while not running
> in an SPLPAR. Previously, the out-of-line spinlocks implementations were
> selected based on CONFIG_PPC_SPLPAR at compile time without a runtime
> shared-processor LPAR check.
> 
> To fix, call the actual spinlock implementations from a set of common
> functions, spin_yield() and rw_yield(), which check for shared-processor
> LPAR during runtime and select the appropriate lock implementation.
> 
> [    0.430878] BUG: Kernel NULL pointer dereference at 0x00000100
> [    0.431991] Faulting instruction address: 0xc000000000097f88
> [    0.432934] Oops: Kernel access of bad area, sig: 7 [#1]
> [    0.433448] LE PAGE_SIZE=64K MMU=Radix MMU=Hash PREEMPT SMP NR_CPUS=2048 NUMA PowerNV
> [    0.434479] Modules linked in:
> [    0.435055] CPU: 0 PID: 2 Comm: kthreadd Not tainted 5.2.0-rc6-00491-g249155c20f9b #28
> [    0.435730] NIP:  c000000000097f88 LR: c000000000c07a88 CTR: c00000000015ca10
> [    0.436383] REGS: c0000000727079f0 TRAP: 0300   Not tainted  (5.2.0-rc6-00491-g249155c20f9b)
> [    0.437004] MSR:  9000000002009033 <SF,HV,VEC,EE,ME,IR,DR,RI,LE>  CR: 84000424  XER: 20040000
> [    0.437874] CFAR: c000000000c07a84 DAR: 0000000000000100 DSISR: 00080000 IRQMASK: 1
> [    0.437874] GPR00: c000000000c07a88 c000000072707c80 c000000001546300 c00000007be38a80
> [    0.437874] GPR04: c0000000726f0c00 0000000000000002 c00000007279c980 0000000000000100
> [    0.437874] GPR08: c000000001581b78 0000000080000001 0000000000000008 c00000007279c9b0
> [    0.437874] GPR12: 0000000000000000 c000000001730000 c000000000142558 0000000000000000
> [    0.437874] GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
> [    0.437874] GPR20: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
> [    0.437874] GPR24: c00000007be38a80 c000000000c002f4 0000000000000000 0000000000000000
> [    0.437874] GPR28: c000000072221a00 c0000000726c2600 c00000007be38a80 c00000007be38a80
> [    0.443992] NIP [c000000000097f88] __spin_yield+0x48/0xa0
> [    0.444523] LR [c000000000c07a88] __raw_spin_lock+0xb8/0xc0
> [    0.445080] Call Trace:
> [    0.445670] [c000000072707c80] [c000000072221a00] 0xc000000072221a00 (unreliable)
> [    0.446425] [c000000072707cb0] [c000000000bffb0c] __schedule+0xbc/0x850
> [    0.447078] [c000000072707d70] [c000000000c002f4] schedule+0x54/0x130
> [    0.447694] [c000000072707da0] [c0000000001427dc] kthreadd+0x28c/0x2b0
> [    0.448389] [c000000072707e20] [c00000000000c1cc] ret_from_kernel_thread+0x5c/0x70
> [    0.449143] Instruction dump:
> [    0.449821] 4d9e0020 552a043e 210a07ff 79080fe0 0b080000 3d020004 3908b878 794a1f24
> [    0.450587] e8e80000 7ce7502a e8e70000 38e70100 <7ca03c2c> 70a70001 78a50020 4d820020
> [    0.452808] ---[ end trace 474d6b2b8fc5cb7e ]---
> 
> Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>

This should probably head to stable?

> ---
>   arch/powerpc/include/asm/spinlock.h | 36 ++++++++++++++++++++---------
>   1 file changed, 25 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
> index 1e7721176f39..8161809c6be1 100644
> --- a/arch/powerpc/include/asm/spinlock.h
> +++ b/arch/powerpc/include/asm/spinlock.h
> @@ -103,11 +103,9 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
>   /* We only yield to the hypervisor if we are in shared processor mode */
>   void splpar_spin_yield(arch_spinlock_t *lock);
>   void splpar_rw_yield(arch_rwlock_t *lock);
> -#define __spin_yield(x) splpar_spin_yield(x)
> -#define __rw_yield(x) splpar_rw_yield(x)
>   #else /* SPLPAR */
> -#define __spin_yield(x)	barrier()
> -#define __rw_yield(x)	barrier()
> +#define splpar_spin_yield(lock)
> +#define splpar_rw_yield(lock)

I prefer using #ifdef on the function definition and declaring an 
alternative function with an empty body for the !SPLPAR case, seeing an 
empty #define just feels a bit weird

>   #endif
>   
>   static inline bool is_shared_processor(void)
> @@ -121,6 +119,22 @@ static inline bool is_shared_processor(void)
>   #endif
>   }
>   
> +static inline void spin_yield(arch_spinlock_t *lock)
> +{
> +	if (is_shared_processor())
> +		splpar_spin_yield(lock);
> +	else
> +		barrier();
> +}
> +
> +static inline void rw_yield(arch_rwlock_t *lock)
> +{
> +	if (is_shared_processor())
> +		splpar_rw_yield(lock);
> +	else
> +		barrier();
> +}
> +
>   static inline void arch_spin_lock(arch_spinlock_t *lock)
>   {
>   	while (1) {
> @@ -129,7 +143,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
>   		do {
>   			HMT_low();
>   			if (is_shared_processor())
> -				__spin_yield(lock);
> +				spin_yield(lock);
>   		} while (unlikely(lock->slock != 0));
>   		HMT_medium();
>   	}
> @@ -148,7 +162,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
>   		do {
>   			HMT_low();
>   			if (is_shared_processor())
> -				__spin_yield(lock);
> +				spin_yield(lock);
>   		} while (unlikely(lock->slock != 0));
>   		HMT_medium();
>   		local_irq_restore(flags_dis);
> @@ -238,7 +252,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
>   		do {
>   			HMT_low();
>   			if (is_shared_processor())
> -				__rw_yield(rw);
> +				rw_yield(rw);
>   		} while (unlikely(rw->lock < 0));
>   		HMT_medium();
>   	}
> @@ -252,7 +266,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
>   		do {
>   			HMT_low();
>   			if (is_shared_processor())
> -				__rw_yield(rw);
> +				rw_yield(rw);
>   		} while (unlikely(rw->lock != 0));
>   		HMT_medium();
>   	}
> @@ -292,9 +306,9 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
>   	rw->lock = 0;
>   }
>   
> -#define arch_spin_relax(lock)	__spin_yield(lock)
> -#define arch_read_relax(lock)	__rw_yield(lock)
> -#define arch_write_relax(lock)	__rw_yield(lock)
> +#define arch_spin_relax(lock)	spin_yield(lock)
> +#define arch_read_relax(lock)	rw_yield(lock)
> +#define arch_write_relax(lock)	rw_yield(lock)
>   
>   /* See include/linux/spinlock.h */
>   #define smp_mb__after_spinlock()   smp_mb()
> 

-- 
Andrew Donnellan              OzLabs, ADL Canberra
ajd@linux.ibm.com             IBM Australia Limited


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2019-08-01  3:35 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-28 12:54 [PATCH 0/3] Fix oops in shared-processor spinlocks Christopher M. Riedl
2019-07-28 12:54 ` [PATCH 1/3] powerpc/spinlocks: Refactor SHARED_PROCESSOR Christopher M. Riedl
2019-07-30 21:31   ` Thiago Jung Bauermann
2019-07-30 23:31     ` Christopher M Riedl
2019-07-31  0:11       ` Thiago Jung Bauermann
2019-07-31  2:36         ` Christopher M Riedl
2019-08-01  3:20   ` Andrew Donnellan
2019-07-28 12:54 ` [PATCH 2/3] powerpc/spinlocks: Rename SPLPAR-only spinlocks Christopher M. Riedl
2019-08-01  3:27   ` Andrew Donnellan
2019-07-28 12:54 ` [PATCH 3/3] powerpc/spinlock: Fix oops in shared-processor spinlocks Christopher M. Riedl
2019-08-01  3:33   ` Andrew Donnellan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.