All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
@ 2022-09-21  2:02 Nicholas Miehlbradt
  2022-09-21  2:02 ` [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-21  2:02 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Miehlbradt

There is support for DEBUG_PAGEALLOC on hash but not on radix.
Add support on radix.

Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
v2: Revert change to radix_memory_block_size, instead set the size
in radix_init_pgtable and radix__create_section_mapping directly.
---
 arch/powerpc/mm/book3s64/radix_pgtable.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index db2f3d193448..623455c195d8 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -30,6 +30,7 @@
 #include <asm/trace.h>
 #include <asm/uaccess.h>
 #include <asm/ultravisor.h>
+#include <asm/set_memory.h>
 
 #include <trace/events/thp.h>
 
@@ -332,6 +333,10 @@ static void __init radix_init_pgtable(void)
 	unsigned long rts_field;
 	phys_addr_t start, end;
 	u64 i;
+	unsigned long size = radix_mem_block_size;
+
+	if (debug_pagealloc_enabled())
+		size = PAGE_SIZE;
 
 	/* We don't support slb for radix */
 	slb_set_size(0);
@@ -352,7 +357,7 @@ static void __init radix_init_pgtable(void)
 		}
 
 		WARN_ON(create_physical_mapping(start, end,
-						radix_mem_block_size,
+						size,
 						-1, PAGE_KERNEL));
 	}
 
@@ -844,13 +849,18 @@ int __meminit radix__create_section_mapping(unsigned long start,
 					    unsigned long end, int nid,
 					    pgprot_t prot)
 {
+	unsigned long size = radix_mem_block_size;
+
+	if (debug_pagealloc_enabled())
+		size = PAGE_SIZE;
+
 	if (end >= RADIX_VMALLOC_START) {
 		pr_warn("Outside the supported range\n");
 		return -1;
 	}
 
 	return create_physical_mapping(__pa(start), __pa(end),
-				       radix_mem_block_size, nid, prot);
+				       size, nid, prot);
 }
 
 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
@@ -899,7 +909,14 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
 {
-	pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n");
+	unsigned long addr;
+
+	addr = (unsigned long)page_address(page);
+
+	if (enable)
+		set_memory_p(addr, numpages);
+	else
+		set_memory_np(addr, numpages);
 }
 #endif
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils
  2022-09-21  2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
@ 2022-09-21  2:02 ` Nicholas Miehlbradt
  2022-09-21  2:02 ` [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-21  2:02 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Miehlbradt

From: Christophe Leroy <christophe.leroy@csgroup.eu>

debug_pagealloc_enabled() is always defined and constant folds to
'false' when CONFIG_DEBUG_PAGEALLOC is not enabled.

Remove the #ifdefs, the code and associated static variables will
be optimised out by the compiler when CONFIG_DEBUG_PAGEALLOC is
not defined.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
 arch/powerpc/mm/book3s64/hash_utils.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index fc92613dc2bf..e63ff401a6ea 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -123,11 +123,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size);
 #ifdef CONFIG_PPC_64K_PAGES
 int mmu_ci_restrictions;
 #endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
 static u8 *linear_map_hash_slots;
 static unsigned long linear_map_hash_count;
-static DEFINE_SPINLOCK(linear_map_hash_lock);
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 struct mmu_hash_ops mmu_hash_ops;
 EXPORT_SYMBOL(mmu_hash_ops);
 
@@ -427,11 +424,9 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 			break;
 
 		cond_resched();
-#ifdef CONFIG_DEBUG_PAGEALLOC
 		if (debug_pagealloc_enabled() &&
 			(paddr >> PAGE_SHIFT) < linear_map_hash_count)
 			linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 	}
 	return ret < 0 ? ret : 0;
 }
@@ -1066,7 +1061,6 @@ static void __init htab_initialize(void)
 
 	prot = pgprot_val(PAGE_KERNEL);
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
 	if (debug_pagealloc_enabled()) {
 		linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
 		linear_map_hash_slots = memblock_alloc_try_nid(
@@ -1076,7 +1070,6 @@ static void __init htab_initialize(void)
 			panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
 			      __func__, linear_map_hash_count, &ppc64_rma_size);
 	}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 
 	/* create bolted the linear mapping in the hash table */
 	for_each_mem_range(i, &base, &end) {
@@ -1991,6 +1984,8 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
+static DEFINE_SPINLOCK(linear_map_hash_lock);
+
 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 {
 	unsigned long hash;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page()
  2022-09-21  2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
  2022-09-21  2:02 ` [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
@ 2022-09-21  2:02 ` Nicholas Miehlbradt
  2022-09-21  2:02 ` [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
  2022-09-21  3:31 ` [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Michael Ellerman
  3 siblings, 0 replies; 6+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-21  2:02 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Miehlbradt

From: Christophe Leroy <christophe.leroy@csgroup.eu>

If the page is already mapped resp. already unmapped, bail out.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
 arch/powerpc/mm/book3s64/hash_utils.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index e63ff401a6ea..b37412fe5930 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -2000,6 +2000,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 	if (!vsid)
 		return;
 
+	if (linear_map_hash_slots[lmi] & 0x80)
+		return;
+
 	ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
 				    HPTE_V_BOLTED,
 				    mmu_linear_psize, mmu_kernel_ssize);
@@ -2019,7 +2022,10 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 
 	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
 	spin_lock(&linear_map_hash_lock);
-	BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+	if (!(linear_map_hash_slots[lmi] & 0x80)) {
+		spin_unlock(&linear_map_hash_lock);
+		return;
+	}
 	hidx = linear_map_hash_slots[lmi] & 0x7f;
 	linear_map_hash_slots[lmi] = 0;
 	spin_unlock(&linear_map_hash_lock);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64
  2022-09-21  2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
  2022-09-21  2:02 ` [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
  2022-09-21  2:02 ` [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
@ 2022-09-21  2:02 ` Nicholas Miehlbradt
  2022-09-21  7:07   ` Christophe Leroy
  2022-09-21  3:31 ` [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Michael Ellerman
  3 siblings, 1 reply; 6+ messages in thread
From: Nicholas Miehlbradt @ 2022-09-21  2:02 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Nicholas Miehlbradt

KFENCE support was added for ppc32 in commit 90cbac0e995d
("powerpc: Enable KFENCE for PPC32").
Enable KFENCE on ppc64 architecture with hash and radix MMUs.
It uses the same mechanism as debug pagealloc to
protect/unprotect pages. All KFENCE kunit tests pass on both
MMUs.

KFENCE memory is initially allocated using memblock but is
later marked as SLAB allocated. This necessitates the change
to __pud_free to ensure that the KFENCE pages are freed
appropriately.

Based on previous work by Christophe Leroy and Jordan Niethe.

Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
---
v2: Refactor
---
 arch/powerpc/Kconfig                         |  2 +-
 arch/powerpc/include/asm/book3s/64/pgalloc.h |  6 ++++--
 arch/powerpc/include/asm/book3s/64/pgtable.h |  2 +-
 arch/powerpc/include/asm/kfence.h            | 15 +++++++++++++++
 arch/powerpc/mm/book3s64/hash_utils.c        | 10 +++++-----
 arch/powerpc/mm/book3s64/radix_pgtable.c     |  8 +++++---
 6 files changed, 31 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a4f8a5276e5c..f7dd0f49510d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -194,7 +194,7 @@ config PPC
 	select HAVE_ARCH_KASAN			if PPC32 && PPC_PAGE_SHIFT <= 14
 	select HAVE_ARCH_KASAN			if PPC_RADIX_MMU
 	select HAVE_ARCH_KASAN_VMALLOC		if HAVE_ARCH_KASAN
-	select HAVE_ARCH_KFENCE			if PPC_BOOK3S_32 || PPC_8xx || 40x
+	select HAVE_ARCH_KFENCE			if ARCH_SUPPORTS_DEBUG_PAGEALLOC
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_MMAP_RND_BITS
 	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if COMPAT
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index e1af0b394ceb..dd2cff53a111 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud)
 
 	/*
 	 * Early pud pages allocated via memblock allocator
-	 * can't be directly freed to slab
+	 * can't be directly freed to slab. KFENCE pages have
+	 * both reserved and slab flags set so need to be freed
+	 * kmem_cache_free.
 	 */
-	if (PageReserved(page))
+	if (PageReserved(page) && !PageSlab(page))
 		free_reserved_page(page);
 	else
 		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index cb9d5fd39d7f..fd5d800f2836 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1123,7 +1123,7 @@ static inline void vmemmap_remove_mapping(unsigned long start,
 }
 #endif
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	if (radix_enabled())
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index a9846b68c6b9..cff60983e88d 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -11,11 +11,25 @@
 #include <linux/mm.h>
 #include <asm/pgtable.h>
 
+#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC64_ELF_ABI_V2)
+#define ARCH_FUNC_PREFIX "."
+#endif
+
 static inline bool arch_kfence_init_pool(void)
 {
 	return true;
 }
 
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+	struct page *page = virt_to_page(addr);
+
+	__kernel_map_pages(page, 1, !protect);
+
+	return true;
+}
+#else
 static inline bool kfence_protect_page(unsigned long addr, bool protect)
 {
 	pte_t *kpte = virt_to_kpte(addr);
@@ -29,5 +43,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
 
 	return true;
 }
+#endif
 
 #endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index b37412fe5930..9cceaa5998a3 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -424,7 +424,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 			break;
 
 		cond_resched();
-		if (debug_pagealloc_enabled() &&
+		if (debug_pagealloc_enabled_or_kfence() &&
 			(paddr >> PAGE_SHIFT) < linear_map_hash_count)
 			linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
 	}
@@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void)
 	bool aligned = true;
 	init_hpte_page_sizes();
 
-	if (!debug_pagealloc_enabled()) {
+	if (!debug_pagealloc_enabled_or_kfence()) {
 		/*
 		 * Pick a size for the linear mapping. Currently, we only
 		 * support 16M, 1M and 4K which is the default
@@ -1061,7 +1061,7 @@ static void __init htab_initialize(void)
 
 	prot = pgprot_val(PAGE_KERNEL);
 
-	if (debug_pagealloc_enabled()) {
+	if (debug_pagealloc_enabled_or_kfence()) {
 		linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
 		linear_map_hash_slots = memblock_alloc_try_nid(
 				linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
@@ -1983,7 +1983,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
 	return slot;
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 static DEFINE_SPINLOCK(linear_map_hash_lock);
 
 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2056,7 +2056,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
 	}
 	local_irq_restore(flags);
 }
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
 
 void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 				phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 623455c195d8..e88f9c45f34a 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -34,6 +34,8 @@
 
 #include <trace/events/thp.h>
 
+#include <mm/mmu_decl.h>
+
 unsigned int mmu_base_pid;
 unsigned long radix_mem_block_size __ro_after_init;
 
@@ -335,7 +337,7 @@ static void __init radix_init_pgtable(void)
 	u64 i;
 	unsigned long size = radix_mem_block_size;
 
-	if (debug_pagealloc_enabled())
+	if (debug_pagealloc_enabled_or_kfence())
 		size = PAGE_SIZE;
 
 	/* We don't support slb for radix */
@@ -851,7 +853,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
 {
 	unsigned long size = radix_mem_block_size;
 
-	if (debug_pagealloc_enabled())
+	if (debug_pagealloc_enabled_or_kfence())
 		size = PAGE_SIZE;
 
 	if (end >= RADIX_VMALLOC_START) {
@@ -906,7 +908,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
 #endif
 #endif
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	unsigned long addr;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
  2022-09-21  2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
                   ` (2 preceding siblings ...)
  2022-09-21  2:02 ` [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
@ 2022-09-21  3:31 ` Michael Ellerman
  3 siblings, 0 replies; 6+ messages in thread
From: Michael Ellerman @ 2022-09-21  3:31 UTC (permalink / raw)
  To: Nicholas Miehlbradt, linuxppc-dev; +Cc: Nicholas Miehlbradt

Nicholas Miehlbradt <nicholas@linux.ibm.com> writes:
> There is support for DEBUG_PAGEALLOC on hash but not on radix.
> Add support on radix.
>
> Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
> ---
> v2: Revert change to radix_memory_block_size, instead set the size
> in radix_init_pgtable and radix__create_section_mapping directly.
> ---
>  arch/powerpc/mm/book3s64/radix_pgtable.c | 23 ++++++++++++++++++++---
>  1 file changed, 20 insertions(+), 3 deletions(-)
>
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index db2f3d193448..623455c195d8 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -30,6 +30,7 @@
>  #include <asm/trace.h>
>  #include <asm/uaccess.h>
>  #include <asm/ultravisor.h>
> +#include <asm/set_memory.h>
>  
>  #include <trace/events/thp.h>
>  
> @@ -332,6 +333,10 @@ static void __init radix_init_pgtable(void)
>  	unsigned long rts_field;
>  	phys_addr_t start, end;
>  	u64 i;
> +	unsigned long size = radix_mem_block_size;
> +
> +	if (debug_pagealloc_enabled())
> +		size = PAGE_SIZE;
>  
>  	/* We don't support slb for radix */
>  	slb_set_size(0);
> @@ -352,7 +357,7 @@ static void __init radix_init_pgtable(void)
>  		}
>  
>  		WARN_ON(create_physical_mapping(start, end,
> -						radix_mem_block_size,
> +						size,
>  						-1, PAGE_KERNEL));
>  	}

There's only two calls to create_physical_mapping().

Both pass the same value for size, and now both needed updating for
debug page alloc.

Seems like create_physical_mapping() doesn't need the size passed to it
at all, it may as well just use the right value.

> @@ -844,13 +849,18 @@ int __meminit radix__create_section_mapping(unsigned long start,
>  					    unsigned long end, int nid,
>  					    pgprot_t prot)
>  {
> +	unsigned long size = radix_mem_block_size;
> +
> +	if (debug_pagealloc_enabled())
> +		size = PAGE_SIZE;
> +
>  	if (end >= RADIX_VMALLOC_START) {
>  		pr_warn("Outside the supported range\n");
>  		return -1;
>  	}
>  
>  	return create_physical_mapping(__pa(start), __pa(end),
> -				       radix_mem_block_size, nid, prot);
> +				       size, nid, prot);
>  }

cheers

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64
  2022-09-21  2:02 ` [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
@ 2022-09-21  7:07   ` Christophe Leroy
  0 siblings, 0 replies; 6+ messages in thread
From: Christophe Leroy @ 2022-09-21  7:07 UTC (permalink / raw)
  To: Nicholas Miehlbradt, linuxppc-dev



Le 21/09/2022 à 04:02, Nicholas Miehlbradt a écrit :
> KFENCE support was added for ppc32 in commit 90cbac0e995d
> ("powerpc: Enable KFENCE for PPC32").
> Enable KFENCE on ppc64 architecture with hash and radix MMUs.
> It uses the same mechanism as debug pagealloc to
> protect/unprotect pages. All KFENCE kunit tests pass on both
> MMUs.
> 
> KFENCE memory is initially allocated using memblock but is
> later marked as SLAB allocated. This necessitates the change
> to __pud_free to ensure that the KFENCE pages are freed
> appropriately.
> 
> Based on previous work by Christophe Leroy and Jordan Niethe.
> 
> Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
> ---
> v2: Refactor
> ---
>   arch/powerpc/Kconfig                         |  2 +-
>   arch/powerpc/include/asm/book3s/64/pgalloc.h |  6 ++++--
>   arch/powerpc/include/asm/book3s/64/pgtable.h |  2 +-
>   arch/powerpc/include/asm/kfence.h            | 15 +++++++++++++++
>   arch/powerpc/mm/book3s64/hash_utils.c        | 10 +++++-----
>   arch/powerpc/mm/book3s64/radix_pgtable.c     |  8 +++++---
>   6 files changed, 31 insertions(+), 12 deletions(-)
> 

> diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
> index a9846b68c6b9..cff60983e88d 100644
> --- a/arch/powerpc/include/asm/kfence.h
> +++ b/arch/powerpc/include/asm/kfence.h
> @@ -11,11 +11,25 @@
>   #include <linux/mm.h>
>   #include <asm/pgtable.h>
>   
> +#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC64_ELF_ABI_V2)

CONFIG_PPC64 && !CONFIG_PPC64_ELF_ABI_V2

is the same as

CONFIG_PPC64_ELF_ABI_V1

> +#define ARCH_FUNC_PREFIX "."
> +#endif
> +



Christophe

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-09-21  7:15 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-21  2:02 [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Nicholas Miehlbradt
2022-09-21  2:02 ` [PATCH v2 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Nicholas Miehlbradt
2022-09-21  2:02 ` [PATCH v2 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Nicholas Miehlbradt
2022-09-21  2:02 ` [PATCH v2 4/4] powerpc/64s: Enable KFENCE on book3s64 Nicholas Miehlbradt
2022-09-21  7:07   ` Christophe Leroy
2022-09-21  3:31 ` [PATCH v2 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.