All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock
@ 2022-10-13 23:07 Nicholas Piggin
  2022-10-13 23:07 ` [PATCH 2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe Nicholas Piggin
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Nicholas Piggin @ 2022-10-13 23:07 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Guenter Roeck, Nicholas Piggin, Nicholas Miehlbradt

Add lockdep annotation for the HPTE bit-spinlock. Modern systems don't
take the tlbie lock, so this shows up some of the same lockdep warnings
that were being reported by the ppc970. And they're not taken in exactly
the same places so this is nice to have in its own right.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/mm/book3s64/hash_native.c | 42 +++++++++++++++++++++-----
 1 file changed, 35 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 623a7b7ab38b..02d0c210a1ce 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -43,6 +43,29 @@
 
 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map hpte_lock_map =
+        STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map);
+
+static void acquire_hpte_lock(void)
+{
+	lock_map_acquire(&hpte_lock_map);
+}
+
+static void release_hpte_lock(void)
+{
+	lock_map_release(&hpte_lock_map);
+}
+#else
+static void acquire_hpte_lock(void)
+{
+}
+
+static void release_hpte_lock(void)
+{
+}
+#endif
+
 static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
 						int apsize, int ssize)
 {
@@ -220,6 +243,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
 {
 	unsigned long *word = (unsigned long *)&hptep->v;
 
+	acquire_hpte_lock();
 	while (1) {
 		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
 			break;
@@ -234,6 +258,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
 {
 	unsigned long *word = (unsigned long *)&hptep->v;
 
+	release_hpte_lock();
 	clear_bit_unlock(HPTE_LOCK_BIT, word);
 }
 
@@ -279,6 +304,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 		hpte_v = hpte_old_to_new_v(hpte_v);
 	}
 
+	release_hpte_lock();
 	hptep->r = cpu_to_be64(hpte_r);
 	/* Guarantee the second dword is visible before the valid bit */
 	eieio();
@@ -327,6 +353,7 @@ static long native_hpte_remove(unsigned long hpte_group)
 		return -1;
 
 	/* Invalidate the hpte. NOTE: this also unlocks it */
+	release_hpte_lock();
 	hptep->v = 0;
 
 	return i;
@@ -517,10 +544,11 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
 		/* recheck with locks held */
 		hpte_v = hpte_get_old_v(hptep);
 
-		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
+		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
 			/* Invalidate the hpte. NOTE: this also unlocks it */
+			release_hpte_lock();
 			hptep->v = 0;
-		else
+		} else
 			native_unlock_hpte(hptep);
 	}
 	/*
@@ -580,10 +608,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
 			hpte_v = hpte_get_old_v(hptep);
 
 			if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
-				/*
-				 * Invalidate the hpte. NOTE: this also unlocks it
-				 */
-
+				/* Invalidate the hpte. NOTE: this also unlocks it */
+				release_hpte_lock();
 				hptep->v = 0;
 			} else
 				native_unlock_hpte(hptep);
@@ -765,8 +791,10 @@ static void native_flush_hash_range(unsigned long number, int local)
 
 			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
 				native_unlock_hpte(hptep);
-			else
+			else {
+				release_hpte_lock();
 				hptep->v = 0;
+			}
 
 		} pte_iterate_hashed_end();
 	}
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe
  2022-10-13 23:07 [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock Nicholas Piggin
@ 2022-10-13 23:07 ` Nicholas Piggin
  2022-10-14  0:18   ` Guenter Roeck
  2022-10-13 23:07 ` [PATCH 3/3] powerpc/64s: make linear_map_hash_lock a raw spinlock Nicholas Piggin
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 7+ messages in thread
From: Nicholas Piggin @ 2022-10-13 23:07 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Guenter Roeck, Nicholas Piggin, Nicholas Miehlbradt

With kfence enabled, there are several cases where HPTE and TLBIE locks
are called from softirq context, for example:

  WARNING: inconsistent lock state
  6.0.0-11845-g0cbbc95b12ac #1 Tainted: G                 N
  --------------------------------
  inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
  swapper/0/1 [HC0[0]:SC0[0]:HE1:SE1] takes:
  c000000002734de8 (native_tlbie_lock){+.?.}-{2:2}, at: .native_hpte_updateboltedpp+0x1a4/0x600
  {IN-SOFTIRQ-W} state was registered at:
    .lock_acquire+0x20c/0x520
    ._raw_spin_lock+0x4c/0x70
    .native_hpte_invalidate+0x62c/0x840
    .hash__kernel_map_pages+0x450/0x640
    .kfence_protect+0x58/0xc0
    .kfence_guarded_free+0x374/0x5a0
    .__slab_free+0x3d0/0x630
    .put_cred_rcu+0xcc/0x120
    .rcu_core+0x3c4/0x14e0
    .__do_softirq+0x1dc/0x7dc
    .do_softirq_own_stack+0x40/0x60

Fix this by consistently disabling irqs while taking either of these
locks. Don't just disable bh because several of the more common cases
already disable irqs, so this just makes the locks always irq-safe.

Reported-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/mm/book3s64/hash_native.c | 27 ++++++++++++++++++++++++--
 1 file changed, 25 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 02d0c210a1ce..065f9c542a63 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -268,8 +268,11 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 {
 	struct hash_pte *hptep = htab_address + hpte_group;
 	unsigned long hpte_v, hpte_r;
+	unsigned long flags;
 	int i;
 
+	local_irq_save(flags);
+
 	if (!(vflags & HPTE_V_BOLTED)) {
 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
 			" rflags=%lx, vflags=%lx, psize=%d)\n",
@@ -288,8 +291,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 		hptep++;
 	}
 
-	if (i == HPTES_PER_GROUP)
+	if (i == HPTES_PER_GROUP) {
+		local_irq_restore(flags);
 		return -1;
+	}
 
 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
@@ -304,7 +309,6 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 		hpte_v = hpte_old_to_new_v(hpte_v);
 	}
 
-	release_hpte_lock();
 	hptep->r = cpu_to_be64(hpte_r);
 	/* Guarantee the second dword is visible before the valid bit */
 	eieio();
@@ -312,10 +316,13 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 	 * Now set the first dword including the valid bit
 	 * NOTE: this also unlocks the hpte
 	 */
+	release_hpte_lock();
 	hptep->v = cpu_to_be64(hpte_v);
 
 	__asm__ __volatile__ ("ptesync" : : : "memory");
 
+	local_irq_restore(flags);
+
 	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
 }
 
@@ -366,6 +373,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 	struct hash_pte *hptep = htab_address + slot;
 	unsigned long hpte_v, want_v;
 	int ret = 0, local = 0;
+	unsigned long irqflags;
+
+	local_irq_save(irqflags);
 
 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
 
@@ -409,6 +419,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 	if (!(flags & HPTE_NOHPTE_UPDATE))
 		tlbie(vpn, bpsize, apsize, ssize, local);
 
+	local_irq_restore(irqflags);
+
 	return ret;
 }
 
@@ -472,6 +484,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 	unsigned long vsid;
 	long slot;
 	struct hash_pte *hptep;
+	unsigned long flags;
+
+	local_irq_save(flags);
 
 	vsid = get_kernel_vsid(ea, ssize);
 	vpn = hpt_vpn(ea, vsid, ssize);
@@ -490,6 +505,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 	 * actual page size will be same.
 	 */
 	tlbie(vpn, psize, psize, ssize, 0);
+
+	local_irq_restore(flags);
 }
 
 /*
@@ -503,6 +520,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
 	unsigned long vsid;
 	long slot;
 	struct hash_pte *hptep;
+	unsigned long flags;
+
+	local_irq_save(flags);
 
 	vsid = get_kernel_vsid(ea, ssize);
 	vpn = hpt_vpn(ea, vsid, ssize);
@@ -520,6 +540,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
 
 	/* Invalidate the TLB */
 	tlbie(vpn, psize, psize, ssize, 0);
+
+	local_irq_restore(flags);
+
 	return 0;
 }
 
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/3] powerpc/64s: make linear_map_hash_lock a raw spinlock
  2022-10-13 23:07 [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock Nicholas Piggin
  2022-10-13 23:07 ` [PATCH 2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe Nicholas Piggin
@ 2022-10-13 23:07 ` Nicholas Piggin
  2022-10-14  0:18   ` Guenter Roeck
  2022-10-14  0:18 ` [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock Guenter Roeck
  2022-10-28 11:49 ` Michael Ellerman
  3 siblings, 1 reply; 7+ messages in thread
From: Nicholas Piggin @ 2022-10-13 23:07 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Guenter Roeck, Nicholas Piggin, Nicholas Miehlbradt

This lock is taken while the raw kfence_freelist_lock is held, so it
must also be a raw spinlock, as reported by lockdep when raw lock
nesting checking is enabled.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/mm/book3s64/hash_utils.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index df008edf7be0..6df4c6d38b66 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1981,7 +1981,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
 }
 
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
-static DEFINE_SPINLOCK(linear_map_hash_lock);
+static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
 
 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 {
@@ -2005,10 +2005,10 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 				    mmu_linear_psize, mmu_kernel_ssize);
 
 	BUG_ON (ret < 0);
-	spin_lock(&linear_map_hash_lock);
+	raw_spin_lock(&linear_map_hash_lock);
 	BUG_ON(linear_map_hash_slots[lmi] & 0x80);
 	linear_map_hash_slots[lmi] = ret | 0x80;
-	spin_unlock(&linear_map_hash_lock);
+	raw_spin_unlock(&linear_map_hash_lock);
 }
 
 static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2018,14 +2018,14 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 	unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
 
 	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
-	spin_lock(&linear_map_hash_lock);
+	raw_spin_lock(&linear_map_hash_lock);
 	if (!(linear_map_hash_slots[lmi] & 0x80)) {
-		spin_unlock(&linear_map_hash_lock);
+		raw_spin_unlock(&linear_map_hash_lock);
 		return;
 	}
 	hidx = linear_map_hash_slots[lmi] & 0x7f;
 	linear_map_hash_slots[lmi] = 0;
-	spin_unlock(&linear_map_hash_lock);
+	raw_spin_unlock(&linear_map_hash_lock);
 	if (hidx & _PTEIDX_SECONDARY)
 		hash = ~hash;
 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock
  2022-10-13 23:07 [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock Nicholas Piggin
  2022-10-13 23:07 ` [PATCH 2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe Nicholas Piggin
  2022-10-13 23:07 ` [PATCH 3/3] powerpc/64s: make linear_map_hash_lock a raw spinlock Nicholas Piggin
@ 2022-10-14  0:18 ` Guenter Roeck
  2022-10-28 11:49 ` Michael Ellerman
  3 siblings, 0 replies; 7+ messages in thread
From: Guenter Roeck @ 2022-10-14  0:18 UTC (permalink / raw)
  To: Nicholas Piggin; +Cc: linuxppc-dev, Nicholas Miehlbradt

On Fri, Oct 14, 2022 at 09:07:08AM +1000, Nicholas Piggin wrote:
> Add lockdep annotation for the HPTE bit-spinlock. Modern systems don't
> take the tlbie lock, so this shows up some of the same lockdep warnings
> that were being reported by the ppc970. And they're not taken in exactly
> the same places so this is nice to have in its own right.
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>

Tested-by: Guenter Roeck <linux@roeck-us.net>

> ---
>  arch/powerpc/mm/book3s64/hash_native.c | 42 +++++++++++++++++++++-----
>  1 file changed, 35 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
> index 623a7b7ab38b..02d0c210a1ce 100644
> --- a/arch/powerpc/mm/book3s64/hash_native.c
> +++ b/arch/powerpc/mm/book3s64/hash_native.c
> @@ -43,6 +43,29 @@
>  
>  static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
>  
> +#ifdef CONFIG_LOCKDEP
> +static struct lockdep_map hpte_lock_map =
> +        STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map);
> +
> +static void acquire_hpte_lock(void)
> +{
> +	lock_map_acquire(&hpte_lock_map);
> +}
> +
> +static void release_hpte_lock(void)
> +{
> +	lock_map_release(&hpte_lock_map);
> +}
> +#else
> +static void acquire_hpte_lock(void)
> +{
> +}
> +
> +static void release_hpte_lock(void)
> +{
> +}
> +#endif
> +
>  static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
>  						int apsize, int ssize)
>  {
> @@ -220,6 +243,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
>  {
>  	unsigned long *word = (unsigned long *)&hptep->v;
>  
> +	acquire_hpte_lock();
>  	while (1) {
>  		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
>  			break;
> @@ -234,6 +258,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
>  {
>  	unsigned long *word = (unsigned long *)&hptep->v;
>  
> +	release_hpte_lock();
>  	clear_bit_unlock(HPTE_LOCK_BIT, word);
>  }
>  
> @@ -279,6 +304,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  		hpte_v = hpte_old_to_new_v(hpte_v);
>  	}
>  
> +	release_hpte_lock();
>  	hptep->r = cpu_to_be64(hpte_r);
>  	/* Guarantee the second dword is visible before the valid bit */
>  	eieio();
> @@ -327,6 +353,7 @@ static long native_hpte_remove(unsigned long hpte_group)
>  		return -1;
>  
>  	/* Invalidate the hpte. NOTE: this also unlocks it */
> +	release_hpte_lock();
>  	hptep->v = 0;
>  
>  	return i;
> @@ -517,10 +544,11 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
>  		/* recheck with locks held */
>  		hpte_v = hpte_get_old_v(hptep);
>  
> -		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
> +		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
>  			/* Invalidate the hpte. NOTE: this also unlocks it */
> +			release_hpte_lock();
>  			hptep->v = 0;
> -		else
> +		} else
>  			native_unlock_hpte(hptep);
>  	}
>  	/*
> @@ -580,10 +608,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
>  			hpte_v = hpte_get_old_v(hptep);
>  
>  			if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
> -				/*
> -				 * Invalidate the hpte. NOTE: this also unlocks it
> -				 */
> -
> +				/* Invalidate the hpte. NOTE: this also unlocks it */
> +				release_hpte_lock();
>  				hptep->v = 0;
>  			} else
>  				native_unlock_hpte(hptep);
> @@ -765,8 +791,10 @@ static void native_flush_hash_range(unsigned long number, int local)
>  
>  			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
>  				native_unlock_hpte(hptep);
> -			else
> +			else {
> +				release_hpte_lock();
>  				hptep->v = 0;
> +			}
>  
>  		} pte_iterate_hashed_end();
>  	}
> -- 
> 2.37.2
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe
  2022-10-13 23:07 ` [PATCH 2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe Nicholas Piggin
@ 2022-10-14  0:18   ` Guenter Roeck
  0 siblings, 0 replies; 7+ messages in thread
From: Guenter Roeck @ 2022-10-14  0:18 UTC (permalink / raw)
  To: Nicholas Piggin; +Cc: linuxppc-dev, Nicholas Miehlbradt

On Fri, Oct 14, 2022 at 09:07:09AM +1000, Nicholas Piggin wrote:
> With kfence enabled, there are several cases where HPTE and TLBIE locks
> are called from softirq context, for example:
> 
>   WARNING: inconsistent lock state
>   6.0.0-11845-g0cbbc95b12ac #1 Tainted: G                 N
>   --------------------------------
>   inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
>   swapper/0/1 [HC0[0]:SC0[0]:HE1:SE1] takes:
>   c000000002734de8 (native_tlbie_lock){+.?.}-{2:2}, at: .native_hpte_updateboltedpp+0x1a4/0x600
>   {IN-SOFTIRQ-W} state was registered at:
>     .lock_acquire+0x20c/0x520
>     ._raw_spin_lock+0x4c/0x70
>     .native_hpte_invalidate+0x62c/0x840
>     .hash__kernel_map_pages+0x450/0x640
>     .kfence_protect+0x58/0xc0
>     .kfence_guarded_free+0x374/0x5a0
>     .__slab_free+0x3d0/0x630
>     .put_cred_rcu+0xcc/0x120
>     .rcu_core+0x3c4/0x14e0
>     .__do_softirq+0x1dc/0x7dc
>     .do_softirq_own_stack+0x40/0x60
> 
> Fix this by consistently disabling irqs while taking either of these
> locks. Don't just disable bh because several of the more common cases
> already disable irqs, so this just makes the locks always irq-safe.
> 
> Reported-by: Guenter Roeck <linux@roeck-us.net>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>

Tested-by: Guenter Roeck <linux@roeck-us.net>

> ---
>  arch/powerpc/mm/book3s64/hash_native.c | 27 ++++++++++++++++++++++++--
>  1 file changed, 25 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
> index 02d0c210a1ce..065f9c542a63 100644
> --- a/arch/powerpc/mm/book3s64/hash_native.c
> +++ b/arch/powerpc/mm/book3s64/hash_native.c
> @@ -268,8 +268,11 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  {
>  	struct hash_pte *hptep = htab_address + hpte_group;
>  	unsigned long hpte_v, hpte_r;
> +	unsigned long flags;
>  	int i;
>  
> +	local_irq_save(flags);
> +
>  	if (!(vflags & HPTE_V_BOLTED)) {
>  		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
>  			" rflags=%lx, vflags=%lx, psize=%d)\n",
> @@ -288,8 +291,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  		hptep++;
>  	}
>  
> -	if (i == HPTES_PER_GROUP)
> +	if (i == HPTES_PER_GROUP) {
> +		local_irq_restore(flags);
>  		return -1;
> +	}
>  
>  	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
>  	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
> @@ -304,7 +309,6 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  		hpte_v = hpte_old_to_new_v(hpte_v);
>  	}
>  
> -	release_hpte_lock();
>  	hptep->r = cpu_to_be64(hpte_r);
>  	/* Guarantee the second dword is visible before the valid bit */
>  	eieio();
> @@ -312,10 +316,13 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
>  	 * Now set the first dword including the valid bit
>  	 * NOTE: this also unlocks the hpte
>  	 */
> +	release_hpte_lock();
>  	hptep->v = cpu_to_be64(hpte_v);
>  
>  	__asm__ __volatile__ ("ptesync" : : : "memory");
>  
> +	local_irq_restore(flags);
> +
>  	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
>  }
>  
> @@ -366,6 +373,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
>  	struct hash_pte *hptep = htab_address + slot;
>  	unsigned long hpte_v, want_v;
>  	int ret = 0, local = 0;
> +	unsigned long irqflags;
> +
> +	local_irq_save(irqflags);
>  
>  	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
>  
> @@ -409,6 +419,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
>  	if (!(flags & HPTE_NOHPTE_UPDATE))
>  		tlbie(vpn, bpsize, apsize, ssize, local);
>  
> +	local_irq_restore(irqflags);
> +
>  	return ret;
>  }
>  
> @@ -472,6 +484,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
>  	unsigned long vsid;
>  	long slot;
>  	struct hash_pte *hptep;
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
>  
>  	vsid = get_kernel_vsid(ea, ssize);
>  	vpn = hpt_vpn(ea, vsid, ssize);
> @@ -490,6 +505,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
>  	 * actual page size will be same.
>  	 */
>  	tlbie(vpn, psize, psize, ssize, 0);
> +
> +	local_irq_restore(flags);
>  }
>  
>  /*
> @@ -503,6 +520,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
>  	unsigned long vsid;
>  	long slot;
>  	struct hash_pte *hptep;
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
>  
>  	vsid = get_kernel_vsid(ea, ssize);
>  	vpn = hpt_vpn(ea, vsid, ssize);
> @@ -520,6 +540,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
>  
>  	/* Invalidate the TLB */
>  	tlbie(vpn, psize, psize, ssize, 0);
> +
> +	local_irq_restore(flags);
> +
>  	return 0;
>  }
>  
> -- 
> 2.37.2
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/3] powerpc/64s: make linear_map_hash_lock a raw spinlock
  2022-10-13 23:07 ` [PATCH 3/3] powerpc/64s: make linear_map_hash_lock a raw spinlock Nicholas Piggin
@ 2022-10-14  0:18   ` Guenter Roeck
  0 siblings, 0 replies; 7+ messages in thread
From: Guenter Roeck @ 2022-10-14  0:18 UTC (permalink / raw)
  To: Nicholas Piggin; +Cc: linuxppc-dev, Nicholas Miehlbradt

On Fri, Oct 14, 2022 at 09:07:10AM +1000, Nicholas Piggin wrote:
> This lock is taken while the raw kfence_freelist_lock is held, so it
> must also be a raw spinlock, as reported by lockdep when raw lock
> nesting checking is enabled.
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>

Tested-by: Guenter Roeck <linux@roeck-us.net>

> ---
>  arch/powerpc/mm/book3s64/hash_utils.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
> index df008edf7be0..6df4c6d38b66 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -1981,7 +1981,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
>  }
>  
>  #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
> -static DEFINE_SPINLOCK(linear_map_hash_lock);
> +static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
>  
>  static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
>  {
> @@ -2005,10 +2005,10 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
>  				    mmu_linear_psize, mmu_kernel_ssize);
>  
>  	BUG_ON (ret < 0);
> -	spin_lock(&linear_map_hash_lock);
> +	raw_spin_lock(&linear_map_hash_lock);
>  	BUG_ON(linear_map_hash_slots[lmi] & 0x80);
>  	linear_map_hash_slots[lmi] = ret | 0x80;
> -	spin_unlock(&linear_map_hash_lock);
> +	raw_spin_unlock(&linear_map_hash_lock);
>  }
>  
>  static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
> @@ -2018,14 +2018,14 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
>  	unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
>  
>  	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
> -	spin_lock(&linear_map_hash_lock);
> +	raw_spin_lock(&linear_map_hash_lock);
>  	if (!(linear_map_hash_slots[lmi] & 0x80)) {
> -		spin_unlock(&linear_map_hash_lock);
> +		raw_spin_unlock(&linear_map_hash_lock);
>  		return;
>  	}
>  	hidx = linear_map_hash_slots[lmi] & 0x7f;
>  	linear_map_hash_slots[lmi] = 0;
> -	spin_unlock(&linear_map_hash_lock);
> +	raw_spin_unlock(&linear_map_hash_lock);
>  	if (hidx & _PTEIDX_SECONDARY)
>  		hash = ~hash;
>  	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
> -- 
> 2.37.2
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock
  2022-10-13 23:07 [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock Nicholas Piggin
                   ` (2 preceding siblings ...)
  2022-10-14  0:18 ` [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock Guenter Roeck
@ 2022-10-28 11:49 ` Michael Ellerman
  3 siblings, 0 replies; 7+ messages in thread
From: Michael Ellerman @ 2022-10-28 11:49 UTC (permalink / raw)
  To: linuxppc-dev, Nicholas Piggin; +Cc: Nicholas Miehlbradt, Guenter Roeck

On Fri, 14 Oct 2022 09:07:08 +1000, Nicholas Piggin wrote:
> Add lockdep annotation for the HPTE bit-spinlock. Modern systems don't
> take the tlbie lock, so this shows up some of the same lockdep warnings
> that were being reported by the ppc970. And they're not taken in exactly
> the same places so this is nice to have in its own right.
> 
> 

Applied to powerpc/fixes.

[1/3] powerpc/64s: Add lockdep for HPTE lock
      https://git.kernel.org/powerpc/c/be83d5485da549d934ec65463ea831709f2827b1
[2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe
      https://git.kernel.org/powerpc/c/35159b5717fa9c6031fdd6a2193c7a3dc717ce33
[3/3] powerpc/64s: make linear_map_hash_lock a raw spinlock
      https://git.kernel.org/powerpc/c/b12eb279ff552bd67c167b0fe701ae602aa7311e

cheers

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2022-10-28 11:51 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-13 23:07 [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock Nicholas Piggin
2022-10-13 23:07 ` [PATCH 2/3] powerpc/64s: make HPTE lock and native_tlbie_lock irq-safe Nicholas Piggin
2022-10-14  0:18   ` Guenter Roeck
2022-10-13 23:07 ` [PATCH 3/3] powerpc/64s: make linear_map_hash_lock a raw spinlock Nicholas Piggin
2022-10-14  0:18   ` Guenter Roeck
2022-10-14  0:18 ` [PATCH 1/3] powerpc/64s: Add lockdep for HPTE lock Guenter Roeck
2022-10-28 11:49 ` Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.