All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
@ 2022-09-19  1:44 Nicholas Miehlbradt
  2022-09-19  1:44 ` [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
                   ` (4 more replies)
  0 siblings, 5 replies; 10+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-19  1:44 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Miehlbradt

There is support for DEBUG_PAGEALLOC on hash but not on radix.
Add support on radix.

Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
 arch/powerpc/mm/book3s64/radix_pgtable.c | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index db2f3d193448..483c99bfbde5 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -30,6 +30,7 @@
 #include <asm/trace.h>
 #include <asm/uaccess.h>
 #include <asm/ultravisor.h>
+#include <asm/set_memory.h>
 
 #include <trace/events/thp.h>
 
@@ -503,6 +504,9 @@ static unsigned long __init radix_memory_block_size(void)
 {
 	unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
 
+	if (debug_pagealloc_enabled())
+		return PAGE_SIZE;
+
 	/*
 	 * OPAL firmware feature is set by now. Hence we are ok
 	 * to test OPAL feature.
@@ -519,6 +523,9 @@ static unsigned long __init radix_memory_block_size(void)
 
 static unsigned long __init radix_memory_block_size(void)
 {
+	if (debug_pagealloc_enabled())
+		return PAGE_SIZE;
+
 	return 1UL * 1024 * 1024 * 1024;
 }
 
@@ -899,7 +906,14 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
 {
-	pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
+	unsigned long addr;
+
+	addr = (unsigned long)page_address(page);
+
+	if (enable)
+		set_memory_p(addr, numpages);
+	else
+		set_memory_np(addr, numpages);
 }
 #endif
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils
  2022-09-19  1:44 [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
@ 2022-09-19  1:44 ` Nicholas Miehlbradt
  2022-09-19  1:44 ` [PATCH 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 10+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-19  1:44 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Miehlbradt

From: Christophe Leroy <christophe.leroy@csgroup.eu>

debug_pagealloc_enabled() is always defined and constant folds to
'false' when CONFIG_DEBUG_PAGEALLOC is not enabled.

Remove the #ifdefs, the code and associated static variables will
be optimised out by the compiler when CONFIG_DEBUG_PAGEALLOC is
not defined.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
 arch/powerpc/mm/book3s64/hash_utils.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index fc92613dc2bf..e63ff401a6ea 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -123,11 +123,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size);
 #ifdef CONFIG_PPC_64K_PAGES
 int mmu_ci_restrictions;
 #endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
 static u8 *linear_map_hash_slots;
 static unsigned long linear_map_hash_count;
-static DEFINE_SPINLOCK(linear_map_hash_lock);
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 struct mmu_hash_ops mmu_hash_ops;
 EXPORT_SYMBOL(mmu_hash_ops);
 
@@ -427,11 +424,9 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 			break;
 
 		cond_resched();
-#ifdef CONFIG_DEBUG_PAGEALLOC
 		if (debug_pagealloc_enabled() &&
 			(paddr >> PAGE_SHIFT) < linear_map_hash_count)
 			linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 	}
 	return ret < 0 ? ret : 0;
 }
@@ -1066,7 +1061,6 @@ static void __init htab_initialize(void)
 
 	prot = pgprot_val(PAGE_KERNEL);
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
 	if (debug_pagealloc_enabled()) {
 		linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
 		linear_map_hash_slots = memblock_alloc_try_nid(
@@ -1076,7 +1070,6 @@ static void __init htab_initialize(void)
 			panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
 			      __func__, linear_map_hash_count, &ppc64_rma_size);
 	}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 
 	/* create bolted the linear mapping in the hash table */
 	for_each_mem_range(i, &base, &end) {
@@ -1991,6 +1984,8 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
+static DEFINE_SPINLOCK(linear_map_hash_lock);
+
 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 {
 	unsigned long hash;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page()
  2022-09-19  1:44 [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
  2022-09-19  1:44 ` [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
@ 2022-09-19  1:44 ` Nicholas Miehlbradt
  2022-09-19  1:44 ` [PATCH 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 10+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-19  1:44 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Miehlbradt

From: Christophe Leroy <christophe.leroy@csgroup.eu>

If the page is already mapped resp. already unmapped, bail out.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
 arch/powerpc/mm/book3s64/hash_utils.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index e63ff401a6ea..b37412fe5930 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -2000,6 +2000,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 	if (!vsid)
 		return;
 
+	if (linear_map_hash_slots[lmi] & 0x80)
+		return;
+
 	ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
 				    HPTE_V_BOLTED,
 				    mmu_linear_psize, mmu_kernel_ssize);
@@ -2019,7 +2022,10 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 
 	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
 	spin_lock(&linear_map_hash_lock);
-	BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+	if (!(linear_map_hash_slots[lmi] & 0x80)) {
+		spin_unlock(&linear_map_hash_lock);
+		return;
+	}
 	hidx = linear_map_hash_slots[lmi] & 0x7f;
 	linear_map_hash_slots[lmi] = 0;
 	spin_unlock(&linear_map_hash_lock);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/4] powerpc/64s: Enable KFENCE on book3s64
  2022-09-19  1:44 [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
  2022-09-19  1:44 ` [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
  2022-09-19  1:44 ` [PATCH 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
@ 2022-09-19  1:44 ` Nicholas Miehlbradt
  2022-09-19  6:20   ` Christophe Leroy
  2022-09-19  6:17 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Christophe Leroy
  2022-09-19  7:00 ` Michael Ellerman
  4 siblings, 1 reply; 10+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-19  1:44 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Miehlbradt

KFENCE support was added for ppc32 in commit 90cbac0e995d
("powerpc: Enable KFENCE for PPC32").
Enable KFENCE on ppc64 architecture with hash and radix MMUs.
It uses the same mechanism as debug pagealloc to
protect/unprotect pages. All KFENCE kunit tests pass on both
MMUs.

KFENCE memory is initially allocated using memblock but is
later marked as SLAB allocated. This necessitates the change
to __pud_free to ensure that the KFENCE pages are freed
appropriately.

Based on previous work by Christophe Leroy and Jordan Niethe.

Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
 arch/powerpc/Kconfig                         |  2 +-
 arch/powerpc/include/asm/book3s/64/pgalloc.h |  6 ++++--
 arch/powerpc/include/asm/book3s/64/pgtable.h |  2 +-
 arch/powerpc/include/asm/kfence.h            | 18 ++++++++++++++++++
 arch/powerpc/mm/book3s64/hash_utils.c        | 10 +++++-----
 arch/powerpc/mm/book3s64/radix_pgtable.c     |  8 +++++---
 6 files changed, 34 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a4f8a5276e5c..f7dd0f49510d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -194,7 +194,7 @@ config PPC
 	select HAVE_ARCH_KASAN			if PPC32 && PPC_PAGE_SHIFT <= 14
 	select HAVE_ARCH_KASAN			if PPC_RADIX_MMU
 	select HAVE_ARCH_KASAN_VMALLOC		if HAVE_ARCH_KASAN
-	select HAVE_ARCH_KFENCE			if PPC_BOOK3S_32 || PPC_8xx || 40x
+	select HAVE_ARCH_KFENCE			if ARCH_SUPPORTS_DEBUG_PAGEALLOC
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_MMAP_RND_BITS
 	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if COMPAT
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index e1af0b394ceb..dd2cff53a111 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud)
 
 	/*
 	 * Early pud pages allocated via memblock allocator
-	 * can't be directly freed to slab
+	 * can't be directly freed to slab. KFENCE pages have
+	 * both reserved and slab flags set so need to be freed
+	 * kmem_cache_free.
 	 */
-	if (PageReserved(page))
+	if (PageReserved(page) && !PageSlab(page))
 		free_reserved_page(page);
 	else
 		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index cb9d5fd39d7f..fd5d800f2836 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1123,7 +1123,7 @@ static inline void vmemmap_remove_mapping(unsigned long start,
 }
 #endif
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	if (radix_enabled())
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index a9846b68c6b9..33edbc312a51 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -11,11 +11,28 @@
 #include <linux/mm.h>
 #include <asm/pgtable.h>
 
+#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC64_ELF_ABI_V2)
+#define ARCH_FUNC_PREFIX "."
+#endif
+
 static inline bool arch_kfence_init_pool(void)
 {
 	return true;
 }
 
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+	struct page *page = virt_to_page(addr);
+
+	if (protect)
+		__kernel_map_pages(page, 1, 0);
+	else
+		__kernel_map_pages(page, 1, 1);
+
+	return true;
+}
+#else
 static inline bool kfence_protect_page(unsigned long addr, bool protect)
 {
 	pte_t *kpte = virt_to_kpte(addr);
@@ -29,5 +46,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
 
 	return true;
 }
+#endif
 
 #endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index b37412fe5930..9cceaa5998a3 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -424,7 +424,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 			break;
 
 		cond_resched();
-		if (debug_pagealloc_enabled() &&
+		if (debug_pagealloc_enabled_or_kfence() &&
 			(paddr >> PAGE_SHIFT) < linear_map_hash_count)
 			linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
 	}
@@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void)
 	bool aligned = true;
 	init_hpte_page_sizes();
 
-	if (!debug_pagealloc_enabled()) {
+	if (!debug_pagealloc_enabled_or_kfence()) {
 		/*
 		 * Pick a size for the linear mapping. Currently, we only
 		 * support 16M, 1M and 4K which is the default
@@ -1061,7 +1061,7 @@ static void __init htab_initialize(void)
 
 	prot = pgprot_val(PAGE_KERNEL);
 
-	if (debug_pagealloc_enabled()) {
+	if (debug_pagealloc_enabled_or_kfence()) {
 		linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
 		linear_map_hash_slots = memblock_alloc_try_nid(
 				linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
@@ -1983,7 +1983,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
 	return slot;
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 static DEFINE_SPINLOCK(linear_map_hash_lock);
 
 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2056,7 +2056,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
 	}
 	local_irq_restore(flags);
 }
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
 
 void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 				phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 483c99bfbde5..217833fe4f34 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -34,6 +34,8 @@
 
 #include <trace/events/thp.h>
 
+#include <mm/mmu_decl.h>
+
 unsigned int mmu_base_pid;
 unsigned long radix_mem_block_size __ro_after_init;
 
@@ -504,7 +506,7 @@ static unsigned long __init radix_memory_block_size(void)
 {
 	unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
 
-	if (debug_pagealloc_enabled())
+	if (debug_pagealloc_enabled_or_kfence())
 		return PAGE_SIZE;
 
 	/*
@@ -523,7 +525,7 @@ static unsigned long __init radix_memory_block_size(void)
 
 static unsigned long __init radix_memory_block_size(void)
 {
-	if (debug_pagealloc_enabled())
+	if (debug_pagealloc_enabled_or_kfence())
 		return PAGE_SIZE;
 
 	return 1UL * 1024 * 1024 * 1024;
@@ -903,7 +905,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
 #endif
 #endif
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	unsigned long addr;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
  2022-09-19  1:44 [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
                   ` (2 preceding siblings ...)
  2022-09-19  1:44 ` [PATCH 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
@ 2022-09-19  6:17 ` Christophe Leroy
  2022-09-19  7:00 ` Michael Ellerman
  4 siblings, 0 replies; 10+ messages in thread
From: Christophe Leroy @ 2022-09-19  6:17 UTC (permalink / raw)
  To: Nicholas Miehlbradt, linuxppc-dev



Le 19/09/2022 à 03:44, Nicholas Miehlbradt a écrit :
> [Vous ne recevez pas souvent de courriers de nicholas@linux.ibm.com. Découvrez pourquoi ceci est important à https://aka.ms/LearnAboutSenderIdentification ]
> 
> There is support for DEBUG_PAGEALLOC on hash but not on radix.
> Add support on radix.
> 
> Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
> ---
>   arch/powerpc/mm/book3s64/radix_pgtable.c | 16 +++++++++++++++-
>   1 file changed, 15 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index db2f3d193448..483c99bfbde5 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -30,6 +30,7 @@
>   #include <asm/trace.h>
>   #include <asm/uaccess.h>
>   #include <asm/ultravisor.h>
> +#include <asm/set_memory.h>
> 
>   #include <trace/events/thp.h>
> 
> @@ -503,6 +504,9 @@ static unsigned long __init radix_memory_block_size(void)
>   {
>          unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
> 
> +       if (debug_pagealloc_enabled())
> +               return PAGE_SIZE;
> +
>          /*
>           * OPAL firmware feature is set by now. Hence we are ok
>           * to test OPAL feature.
> @@ -519,6 +523,9 @@ static unsigned long __init radix_memory_block_size(void)
> 
>   static unsigned long __init radix_memory_block_size(void)
>   {
> +       if (debug_pagealloc_enabled())
> +               return PAGE_SIZE;
> +
>          return 1UL * 1024 * 1024 * 1024;

While at it, maybe you can replace the above by SZ_1G

>   }
> 
> @@ -899,7 +906,14 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
>   #ifdef CONFIG_DEBUG_PAGEALLOC
>   void radix__kernel_map_pages(struct page *page, int numpages, int enable)
>   {
> -       pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
> +       unsigned long addr;
> +
> +       addr = (unsigned long)page_address(page);
> +
> +       if (enable)
> +               set_memory_p(addr, numpages);
> +       else
> +               set_memory_np(addr, numpages);
>   }
>   #endif
> 
> --
> 2.34.1
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 4/4] powerpc/64s: Enable KFENCE on book3s64
  2022-09-19  1:44 ` [PATCH 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
@ 2022-09-19  6:20   ` Christophe Leroy
  0 siblings, 0 replies; 10+ messages in thread
From: Christophe Leroy @ 2022-09-19  6:20 UTC (permalink / raw)
  To: Nicholas Miehlbradt, linuxppc-dev



Le 19/09/2022 à 03:44, Nicholas Miehlbradt a écrit :
> KFENCE support was added for ppc32 in commit 90cbac0e995d
> ("powerpc: Enable KFENCE for PPC32").
> Enable KFENCE on ppc64 architecture with hash and radix MMUs.
> It uses the same mechanism as debug pagealloc to
> protect/unprotect pages. All KFENCE kunit tests pass on both
> MMUs.
> 
> KFENCE memory is initially allocated using memblock but is
> later marked as SLAB allocated. This necessitates the change
> to __pud_free to ensure that the KFENCE pages are freed
> appropriately.
> 
> Based on previous work by Christophe Leroy and Jordan Niethe.
> 
> Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
> ---
>   arch/powerpc/Kconfig                         |  2 +-
>   arch/powerpc/include/asm/book3s/64/pgalloc.h |  6 ++++--
>   arch/powerpc/include/asm/book3s/64/pgtable.h |  2 +-
>   arch/powerpc/include/asm/kfence.h            | 18 ++++++++++++++++++
>   arch/powerpc/mm/book3s64/hash_utils.c        | 10 +++++-----
>   arch/powerpc/mm/book3s64/radix_pgtable.c     |  8 +++++---
>   6 files changed, 34 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index a4f8a5276e5c..f7dd0f49510d 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -194,7 +194,7 @@ config PPC
>   	select HAVE_ARCH_KASAN			if PPC32 && PPC_PAGE_SHIFT <= 14
>   	select HAVE_ARCH_KASAN			if PPC_RADIX_MMU
>   	select HAVE_ARCH_KASAN_VMALLOC		if HAVE_ARCH_KASAN
> -	select HAVE_ARCH_KFENCE			if PPC_BOOK3S_32 || PPC_8xx || 40x
> +	select HAVE_ARCH_KFENCE			if ARCH_SUPPORTS_DEBUG_PAGEALLOC
>   	select HAVE_ARCH_KGDB
>   	select HAVE_ARCH_MMAP_RND_BITS
>   	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if COMPAT
> diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
> index e1af0b394ceb..dd2cff53a111 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
> @@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud)
>   
>   	/*
>   	 * Early pud pages allocated via memblock allocator
> -	 * can't be directly freed to slab
> +	 * can't be directly freed to slab. KFENCE pages have
> +	 * both reserved and slab flags set so need to be freed
> +	 * kmem_cache_free.
>   	 */
> -	if (PageReserved(page))
> +	if (PageReserved(page) && !PageSlab(page))
>   		free_reserved_page(page);
>   	else
>   		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index cb9d5fd39d7f..fd5d800f2836 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1123,7 +1123,7 @@ static inline void vmemmap_remove_mapping(unsigned long start,
>   }
>   #endif
>   
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
>   static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
>   {
>   	if (radix_enabled())
> diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
> index a9846b68c6b9..33edbc312a51 100644
> --- a/arch/powerpc/include/asm/kfence.h
> +++ b/arch/powerpc/include/asm/kfence.h
> @@ -11,11 +11,28 @@
>   #include <linux/mm.h>
>   #include <asm/pgtable.h>
>   
> +#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC64_ELF_ABI_V2)

Can be replaced by:

	#ifdef CONFIG_PPC64_ELF_ABI_V1

> +#define ARCH_FUNC_PREFIX "."
> +#endif
> +
>   static inline bool arch_kfence_init_pool(void)
>   {
>   	return true;
>   }
>   
> +#ifdef CONFIG_PPC64
> +static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +{
> +	struct page *page = virt_to_page(addr);
> +
> +	if (protect)
> +		__kernel_map_pages(page, 1, 0);
> +	else
> +		__kernel_map_pages(page, 1, 1);

Can be:
	__kernel_map_pages(virt_to_page(addr), !protect);

> +
> +	return true;
> +}
> +#else
>   static inline bool kfence_protect_page(unsigned long addr, bool protect)
>   {
>   	pte_t *kpte = virt_to_kpte(addr);
> @@ -29,5 +46,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
>   
>   	return true;
>   }
> +#endif
>   
>   #endif /* __ASM_POWERPC_KFENCE_H */
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
> index b37412fe5930..9cceaa5998a3 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -424,7 +424,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
>   			break;
>   
>   		cond_resched();
> -		if (debug_pagealloc_enabled() &&
> +		if (debug_pagealloc_enabled_or_kfence() &&
>   			(paddr >> PAGE_SHIFT) < linear_map_hash_count)
>   			linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
>   	}
> @@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void)
>   	bool aligned = true;
>   	init_hpte_page_sizes();
>   
> -	if (!debug_pagealloc_enabled()) {
> +	if (!debug_pagealloc_enabled_or_kfence()) {
>   		/*
>   		 * Pick a size for the linear mapping. Currently, we only
>   		 * support 16M, 1M and 4K which is the default
> @@ -1061,7 +1061,7 @@ static void __init htab_initialize(void)
>   
>   	prot = pgprot_val(PAGE_KERNEL);
>   
> -	if (debug_pagealloc_enabled()) {
> +	if (debug_pagealloc_enabled_or_kfence()) {
>   		linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
>   		linear_map_hash_slots = memblock_alloc_try_nid(
>   				linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
> @@ -1983,7 +1983,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
>   	return slot;
>   }
>   
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
>   static DEFINE_SPINLOCK(linear_map_hash_lock);
>   
>   static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
> @@ -2056,7 +2056,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
>   	}
>   	local_irq_restore(flags);
>   }
> -#endif /* CONFIG_DEBUG_PAGEALLOC */
> +#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
>   
>   void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
>   				phys_addr_t first_memblock_size)
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index 483c99bfbde5..217833fe4f34 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -34,6 +34,8 @@
>   
>   #include <trace/events/thp.h>
>   
> +#include <mm/mmu_decl.h>
> +
>   unsigned int mmu_base_pid;
>   unsigned long radix_mem_block_size __ro_after_init;
>   
> @@ -504,7 +506,7 @@ static unsigned long __init radix_memory_block_size(void)
>   {
>   	unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
>   
> -	if (debug_pagealloc_enabled())
> +	if (debug_pagealloc_enabled_or_kfence())
>   		return PAGE_SIZE;
>   
>   	/*
> @@ -523,7 +525,7 @@ static unsigned long __init radix_memory_block_size(void)
>   
>   static unsigned long __init radix_memory_block_size(void)
>   {
> -	if (debug_pagealloc_enabled())
> +	if (debug_pagealloc_enabled_or_kfence())
>   		return PAGE_SIZE;
>   
>   	return 1UL * 1024 * 1024 * 1024;
> @@ -903,7 +905,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
>   #endif
>   #endif
>   
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
>   void radix__kernel_map_pages(struct page *page, int numpages, int enable)
>   {
>   	unsigned long addr;

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
  2022-09-19  1:44 [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
                   ` (3 preceding siblings ...)
  2022-09-19  6:17 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Christophe Leroy
@ 2022-09-19  7:00 ` Michael Ellerman
  2022-09-19  7:05   ` Christophe Leroy
  4 siblings, 1 reply; 10+ messages in thread
From: Michael Ellerman @ 2022-09-19  7:00 UTC (permalink / raw)
  To: Nicholas Miehlbradt, linuxppc-dev; +Cc: Nicholas Miehlbradt

Nicholas Miehlbradt <nicholas@linux.ibm.com> writes:
> There is support for DEBUG_PAGEALLOC on hash but not on radix.
> Add support on radix.
>
> Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
> ---
>  arch/powerpc/mm/book3s64/radix_pgtable.c | 16 +++++++++++++++-
>  1 file changed, 15 insertions(+), 1 deletion(-)
>
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index db2f3d193448..483c99bfbde5 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -30,6 +30,7 @@
>  #include <asm/trace.h>
>  #include <asm/uaccess.h>
>  #include <asm/ultravisor.h>
> +#include <asm/set_memory.h>
>  
>  #include <trace/events/thp.h>
>  
> @@ -503,6 +504,9 @@ static unsigned long __init radix_memory_block_size(void)
>  {
>  	unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
>  
> +	if (debug_pagealloc_enabled())
> +		return PAGE_SIZE;
> +
>  	/*
>  	 * OPAL firmware feature is set by now. Hence we are ok
>  	 * to test OPAL feature.
> @@ -519,6 +523,9 @@ static unsigned long __init radix_memory_block_size(void)
>  
>  static unsigned long __init radix_memory_block_size(void)
>  {
> +	if (debug_pagealloc_enabled())
> +		return PAGE_SIZE;
> +
>  	return 1UL * 1024 * 1024 * 1024;
>  }
  
This value ends up in radix_mem_block_size, which is returned by 
pnv_memory_block_size(), which is wired up as ppc_md.memory_block_size,
and that's called by memory_block_size_bytes().

And I thought that value had to be >= MIN_MEMORY_BLOCK_SIZE.

#define MIN_MEMORY_BLOCK_SIZE     (1UL << SECTION_SIZE_BITS)
#define SECTION_SIZE_BITS       24


I would expect us to hit the panic in memory_dev_init().

So that's odd.

I suspect you need to leave radix_memory_block_size() alone, or at least
make sure you return MIN_MEMORY_BLOCK_SIZE when debug page alloc is
enabled.

We probably need a separate variable that holds the max page size used
for the linear mapping, and that would then be 1G in the normal case or
PAGE_SIZE in the debug page alloc case.

cheers

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
  2022-09-19  7:00 ` Michael Ellerman
@ 2022-09-19  7:05   ` Christophe Leroy
  0 siblings, 0 replies; 10+ messages in thread
From: Christophe Leroy @ 2022-09-19  7:05 UTC (permalink / raw)
  To: Michael Ellerman, Nicholas Miehlbradt, linuxppc-dev



Le 19/09/2022 à 09:00, Michael Ellerman a écrit :
> Nicholas Miehlbradt <nicholas@linux.ibm.com> writes:
>> There is support for DEBUG_PAGEALLOC on hash but not on radix.
>> Add support on radix.
>>
>> Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
>> ---
>>   arch/powerpc/mm/book3s64/radix_pgtable.c | 16 +++++++++++++++-
>>   1 file changed, 15 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
>> index db2f3d193448..483c99bfbde5 100644
>> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
>> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
>> @@ -30,6 +30,7 @@
>>   #include <asm/trace.h>
>>   #include <asm/uaccess.h>
>>   #include <asm/ultravisor.h>
>> +#include <asm/set_memory.h>
>>   
>>   #include <trace/events/thp.h>
>>   
>> @@ -503,6 +504,9 @@ static unsigned long __init radix_memory_block_size(void)
>>   {
>>   	unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
>>   
>> +	if (debug_pagealloc_enabled())
>> +		return PAGE_SIZE;
>> +
>>   	/*
>>   	 * OPAL firmware feature is set by now. Hence we are ok
>>   	 * to test OPAL feature.
>> @@ -519,6 +523,9 @@ static unsigned long __init radix_memory_block_size(void)
>>   
>>   static unsigned long __init radix_memory_block_size(void)
>>   {
>> +	if (debug_pagealloc_enabled())
>> +		return PAGE_SIZE;
>> +
>>   	return 1UL * 1024 * 1024 * 1024;
>>   }
>    
> This value ends up in radix_mem_block_size, which is returned by
> pnv_memory_block_size(), which is wired up as ppc_md.memory_block_size,
> and that's called by memory_block_size_bytes().
> 
> And I thought that value had to be >= MIN_MEMORY_BLOCK_SIZE.
> 
> #define MIN_MEMORY_BLOCK_SIZE     (1UL << SECTION_SIZE_BITS)
> #define SECTION_SIZE_BITS       24
> 
> 
> I would expect us to hit the panic in memory_dev_init().
> 
> So that's odd.
> 
> I suspect you need to leave radix_memory_block_size() alone, or at least
> make sure you return MIN_MEMORY_BLOCK_SIZE when debug page alloc is
> enabled.
> 
> We probably need a separate variable that holds the max page size used
> for the linear mapping, and that would then be 1G in the normal case or
> PAGE_SIZE in the debug page alloc case.
> 

I don't know the details of PPC64, but as you mention linear mapping, be 
aware that you don't need to map everything with pages. Only data. You 
can keep text mapped by blocks, it's what is done on 8xx and book3s/32.

Christophe

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
  2021-05-17  6:16 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Jordan Niethe
@ 2021-06-18  7:28   ` Daniel Axtens
  0 siblings, 0 replies; 10+ messages in thread
From: Daniel Axtens @ 2021-06-18  7:28 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

Jordan Niethe <jniethe5@gmail.com> writes:

> There is support for DEBUG_PAGEALLOC on hash but not on radix.
> Add support on radix.

Somewhat off-topic but I wonder at what point we can drop the weird !PPC
condition in mm/Kconfig.debug:

config DEBUG_PAGEALLOC
        bool "Debug page memory allocations"
        depends on DEBUG_KERNEL
        depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC

I can't figure out from git history why it exists or if it still serves
any function. Given that HIBERNATION isn't much use on Book3S systems it
probably never matters, it just bugs me a bit.

Again, nothing that has to block this series, just maybe something to
follow up at some vague and undefined point in the future!

> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index a666d561b44d..b89482aed82a 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -812,6 +822,15 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
>   * Generic functions with hash/radix callbacks
>   */
>  
> +#ifdef CONFIG_DEBUG_PAGEALLOC
> +static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
> +{
> +	if (radix_enabled())
> +		radix__kernel_map_pages(page, numpages, enable);
> +	hash__kernel_map_pages(page, numpages, enable);


Does this require an else? On radix we will call both radix__ and
hash__kernel_map_pages.

I notice we call both hash__ and radix__ in map_kernel_page under radix,
so maybe this makes sense?

I don't fully understand the mechanism by which memory removal works: it
looks like on radix you mark the page as 'absent', which I think is
enough? Then you fall through to the hash code here:

	for (i = 0; i < numpages; i++, page++) {
		vaddr = (unsigned long)page_address(page);
		lmi = __pa(vaddr) >> PAGE_SHIFT;
		if (lmi >= linear_map_hash_count)
			continue;

I think linear_map_hash_count will be zero unless it gets inited to a
non-zero value in htab_initialize(). I am fairly sure htab_initialize()
doesn't get called for a radix MMU. In that case you'll just `continue;`
out of every iteration of the loop, which would explain why nothing
weird would happen on radix.

Have I missed something here?

The rest of the patch looks good to me.

Kind regards,
Daniel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
  2021-05-17  6:16 [PATCH 0/4] powerpc/64s: Enable KFENCE Jordan Niethe
@ 2021-05-17  6:16 ` Jordan Niethe
  2021-06-18  7:28   ` Daniel Axtens
  0 siblings, 1 reply; 10+ messages in thread
From: Jordan Niethe @ 2021-05-17  6:16 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

There is support for DEBUG_PAGEALLOC on hash but not on radix.
Add support on radix.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/include/asm/book3s/32/pgtable.h | 10 ++++++++
 arch/powerpc/include/asm/book3s/64/hash.h    |  2 ++
 arch/powerpc/include/asm/book3s/64/pgtable.h | 19 ++++++++++++++
 arch/powerpc/include/asm/book3s/64/radix.h   |  2 ++
 arch/powerpc/include/asm/nohash/pgtable.h    | 10 ++++++++
 arch/powerpc/include/asm/set_memory.h        |  2 ++
 arch/powerpc/mm/book3s64/hash_utils.c        |  2 +-
 arch/powerpc/mm/book3s64/radix_pgtable.c     | 26 ++++++++++++++++++--
 arch/powerpc/mm/pageattr.c                   |  6 +++++
 9 files changed, 76 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 83c65845a1a9..30533d409f7f 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -417,6 +417,16 @@ static inline unsigned long pte_pfn(pte_t pte)
 }
 
 /* Generic modifiers for PTE bits */
+static inline pte_t pte_mkabsent(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_PRESENT);
+}
+
+static inline pte_t pte_mkpresent(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_PRESENT);
+}
+
 static inline pte_t pte_wrprotect(pte_t pte)
 {
 	return __pte(pte_val(pte) & ~_PAGE_RW);
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index d959b0195ad9..f6171633cdc2 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -179,6 +179,8 @@ static inline unsigned long hash__pte_update(struct mm_struct *mm,
 	return old;
 }
 
+void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+
 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
  * function doesn't need to flush the hash entry
  */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a666d561b44d..b89482aed82a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -651,6 +651,16 @@ static inline unsigned long pte_pfn(pte_t pte)
 }
 
 /* Generic modifiers for PTE bits */
+static inline pte_t pte_mkabsent(pte_t pte)
+{
+	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRESENT));
+}
+
+static inline pte_t pte_mkpresent(pte_t pte)
+{
+	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRESENT));
+}
+
 static inline pte_t pte_wrprotect(pte_t pte)
 {
 	if (unlikely(pte_savedwrite(pte)))
@@ -812,6 +822,15 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
  * Generic functions with hash/radix callbacks
  */
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	if (radix_enabled())
+		radix__kernel_map_pages(page, numpages, enable);
+	hash__kernel_map_pages(page, numpages, enable);
+}
+#endif
+
 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 					   pte_t *ptep, pte_t entry,
 					   unsigned long address,
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 59cab558e2f0..d4fa28a77cc6 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -137,6 +137,8 @@ extern void radix__mark_rodata_ro(void);
 extern void radix__mark_initmem_nx(void);
 #endif
 
+void radix__kernel_map_pages(struct page *page, int numpages, int enable);
+
 extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
 					 pte_t entry, unsigned long address,
 					 int psize);
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index ac75f4ab0dba..2a57bbb5820a 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -125,6 +125,16 @@ static inline unsigned long pte_pfn(pte_t pte)	{
 	return pte_val(pte) >> PTE_RPN_SHIFT; }
 
 /* Generic modifiers for PTE bits */
+static inline pte_t pte_mkabsent(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_PRESENT);
+}
+
+static inline pte_t pte_mkpresent(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_PRESENT);
+}
+
 static inline pte_t pte_exprotect(pte_t pte)
 {
 	return __pte(pte_val(pte) & ~_PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
index b040094f7920..4b6dfaad4cc9 100644
--- a/arch/powerpc/include/asm/set_memory.h
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -6,6 +6,8 @@
 #define SET_MEMORY_RW	1
 #define SET_MEMORY_NX	2
 #define SET_MEMORY_X	3
+#define SET_MEMORY_EN	4
+#define SET_MEMORY_DIS	5
 
 int change_memory_attr(unsigned long addr, int numpages, long action);
 
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 96d9aa164007..5b9709075fbd 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1990,7 +1990,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 				     mmu_kernel_ssize, 0);
 }
 
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void hash__kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	unsigned long flags, vaddr, lmi;
 	int i;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 5fef8db3b463..2aa81b9e354a 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -16,6 +16,7 @@
 #include <linux/hugetlb.h>
 #include <linux/string_helpers.h>
 #include <linux/memory.h>
+#include <linux/set_memory.h>
 
 #include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
@@ -330,9 +331,13 @@ static int __meminit create_physical_mapping(unsigned long start,
 static void __init radix_init_pgtable(void)
 {
 	unsigned long rts_field;
+	unsigned long size = radix_mem_block_size;
 	phys_addr_t start, end;
 	u64 i;
 
+	if (debug_pagealloc_enabled())
+		size = PAGE_SIZE;
+
 	/* We don't support slb for radix */
 	mmu_slb_size = 0;
 
@@ -352,7 +357,7 @@ static void __init radix_init_pgtable(void)
 		}
 
 		WARN_ON(create_physical_mapping(start, end,
-						radix_mem_block_size,
+						size,
 						-1, PAGE_KERNEL));
 	}
 
@@ -872,13 +877,18 @@ int __meminit radix__create_section_mapping(unsigned long start,
 					    unsigned long end, int nid,
 					    pgprot_t prot)
 {
+	unsigned long size = radix_mem_block_size;
+
+	if (debug_pagealloc_enabled())
+		size = PAGE_SIZE;
+
 	if (end >= RADIX_VMALLOC_START) {
 		pr_warn("Outside the supported range\n");
 		return -1;
 	}
 
 	return create_physical_mapping(__pa(start), __pa(end),
-				       radix_mem_block_size, nid, prot);
+				       size, nid, prot);
 }
 
 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
@@ -1165,3 +1175,15 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 
 	return 1;
 }
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void radix__kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	unsigned long addr = (unsigned long)page_address(page);
+
+	if (enable)
+		change_memory_attr(addr, numpages, SET_MEMORY_EN);
+	else
+		change_memory_attr(addr, numpages, SET_MEMORY_DIS);
+}
+#endif
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
index 0876216ceee6..d3db09447fa6 100644
--- a/arch/powerpc/mm/pageattr.c
+++ b/arch/powerpc/mm/pageattr.c
@@ -54,6 +54,12 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 	case SET_MEMORY_X:
 		pte = pte_mkexec(pte);
 		break;
+	case SET_MEMORY_DIS:
+		pte = pte_mkabsent(pte);
+		break;
+	case SET_MEMORY_EN:
+		pte = pte_mkpresent(pte);
+		break;
 	default:
 		WARN_ON_ONCE(1);
 		break;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2022-09-19  7:06 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-19  1:44 [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
2022-09-19  1:44 ` [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
2022-09-19  1:44 ` [PATCH 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
2022-09-19  1:44 ` [PATCH 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
2022-09-19  6:20   ` Christophe Leroy
2022-09-19  6:17 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Christophe Leroy
2022-09-19  7:00 ` Michael Ellerman
2022-09-19  7:05   ` Christophe Leroy
  -- strict thread matches above, loose matches on Subject: below --
2021-05-17  6:16 [PATCH 0/4] powerpc/64s: Enable KFENCE Jordan Niethe
2021-05-17  6:16 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Jordan Niethe
2021-06-18  7:28   ` Daniel Axtens

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.