All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] powerpc/64s: Enable KFENCE
@ 2021-05-17  6:16 Jordan Niethe
  2021-05-17  6:16 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Jordan Niethe
                   ` (3 more replies)
  0 siblings, 4 replies; 10+ messages in thread
From: Jordan Niethe @ 2021-05-17  6:16 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

This adds support for radix to Christophe's series that enabled KFENCE on
powerpc/64s/hash:
https://lore.kernel.org/linuxppc-dev/8dfe1bd2abde26337c1d8c1ad0acfcc82185e0d5.1614868445.git.christophe.leroy@csgroup.eu/

First implement DEBUG_PAGEALLOC for radix so KFENCE can reuse the same
infrastructure. 

This requires the "powerpc: Further Strict RWX support" series:
https://lore.kernel.org/linuxppc-dev/20210517032810.129949-1-jniethe5@gmail.com/ 

Christophe Leroy (3):
  powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in
    hash_utils
  powerpc/64s: Allow double call of kernel_[un]map_linear_page()
  powerpc: Enable KFENCE on BOOK3S/64

Jordan Niethe (1):
  powerpc/64s: Add DEBUG_PAGEALLOC for radix

 arch/powerpc/Kconfig                         |  2 +-
 arch/powerpc/include/asm/book3s/32/pgtable.h | 10 +++++++
 arch/powerpc/include/asm/book3s/64/hash.h    |  2 ++
 arch/powerpc/include/asm/book3s/64/pgtable.h | 19 ++++++++++++
 arch/powerpc/include/asm/book3s/64/radix.h   |  2 ++
 arch/powerpc/include/asm/kfence.h            | 19 ++++++++++++
 arch/powerpc/include/asm/nohash/pgtable.h    | 10 +++++++
 arch/powerpc/include/asm/set_memory.h        |  2 ++
 arch/powerpc/mm/book3s64/hash_utils.c        | 31 ++++++++++----------
 arch/powerpc/mm/book3s64/radix_pgtable.c     | 28 ++++++++++++++++--
 arch/powerpc/mm/pageattr.c                   |  6 ++++
 11 files changed, 113 insertions(+), 18 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
  2021-05-17  6:16 [PATCH 0/4] powerpc/64s: Enable KFENCE Jordan Niethe
@ 2021-05-17  6:16 ` Jordan Niethe
  2021-06-18  7:28   ` Daniel Axtens
  2021-05-17  6:16 ` [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Jordan Niethe
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 10+ messages in thread
From: Jordan Niethe @ 2021-05-17  6:16 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

There is support for DEBUG_PAGEALLOC on hash but not on radix.
Add support on radix.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/include/asm/book3s/32/pgtable.h | 10 ++++++++
 arch/powerpc/include/asm/book3s/64/hash.h    |  2 ++
 arch/powerpc/include/asm/book3s/64/pgtable.h | 19 ++++++++++++++
 arch/powerpc/include/asm/book3s/64/radix.h   |  2 ++
 arch/powerpc/include/asm/nohash/pgtable.h    | 10 ++++++++
 arch/powerpc/include/asm/set_memory.h        |  2 ++
 arch/powerpc/mm/book3s64/hash_utils.c        |  2 +-
 arch/powerpc/mm/book3s64/radix_pgtable.c     | 26 ++++++++++++++++++--
 arch/powerpc/mm/pageattr.c                   |  6 +++++
 9 files changed, 76 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 83c65845a1a9..30533d409f7f 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -417,6 +417,16 @@ static inline unsigned long pte_pfn(pte_t pte)
 }
 
 /* Generic modifiers for PTE bits */
+static inline pte_t pte_mkabsent(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_PRESENT);
+}
+
+static inline pte_t pte_mkpresent(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_PRESENT);
+}
+
 static inline pte_t pte_wrprotect(pte_t pte)
 {
 	return __pte(pte_val(pte) & ~_PAGE_RW);
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index d959b0195ad9..f6171633cdc2 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -179,6 +179,8 @@ static inline unsigned long hash__pte_update(struct mm_struct *mm,
 	return old;
 }
 
+void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+
 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
  * function doesn't need to flush the hash entry
  */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a666d561b44d..b89482aed82a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -651,6 +651,16 @@ static inline unsigned long pte_pfn(pte_t pte)
 }
 
 /* Generic modifiers for PTE bits */
+static inline pte_t pte_mkabsent(pte_t pte)
+{
+	return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRESENT));
+}
+
+static inline pte_t pte_mkpresent(pte_t pte)
+{
+	return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRESENT));
+}
+
 static inline pte_t pte_wrprotect(pte_t pte)
 {
 	if (unlikely(pte_savedwrite(pte)))
@@ -812,6 +822,15 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
  * Generic functions with hash/radix callbacks
  */
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	if (radix_enabled())
+		radix__kernel_map_pages(page, numpages, enable);
+	hash__kernel_map_pages(page, numpages, enable);
+}
+#endif
+
 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 					   pte_t *ptep, pte_t entry,
 					   unsigned long address,
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 59cab558e2f0..d4fa28a77cc6 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -137,6 +137,8 @@ extern void radix__mark_rodata_ro(void);
 extern void radix__mark_initmem_nx(void);
 #endif
 
+void radix__kernel_map_pages(struct page *page, int numpages, int enable);
+
 extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
 					 pte_t entry, unsigned long address,
 					 int psize);
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index ac75f4ab0dba..2a57bbb5820a 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -125,6 +125,16 @@ static inline unsigned long pte_pfn(pte_t pte)	{
 	return pte_val(pte) >> PTE_RPN_SHIFT; }
 
 /* Generic modifiers for PTE bits */
+static inline pte_t pte_mkabsent(pte_t pte)
+{
+	return __pte(pte_val(pte) & ~_PAGE_PRESENT);
+}
+
+static inline pte_t pte_mkpresent(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_PRESENT);
+}
+
 static inline pte_t pte_exprotect(pte_t pte)
 {
 	return __pte(pte_val(pte) & ~_PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
index b040094f7920..4b6dfaad4cc9 100644
--- a/arch/powerpc/include/asm/set_memory.h
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -6,6 +6,8 @@
 #define SET_MEMORY_RW	1
 #define SET_MEMORY_NX	2
 #define SET_MEMORY_X	3
+#define SET_MEMORY_EN	4
+#define SET_MEMORY_DIS	5
 
 int change_memory_attr(unsigned long addr, int numpages, long action);
 
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 96d9aa164007..5b9709075fbd 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1990,7 +1990,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 				     mmu_kernel_ssize, 0);
 }
 
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void hash__kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	unsigned long flags, vaddr, lmi;
 	int i;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 5fef8db3b463..2aa81b9e354a 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -16,6 +16,7 @@
 #include <linux/hugetlb.h>
 #include <linux/string_helpers.h>
 #include <linux/memory.h>
+#include <linux/set_memory.h>
 
 #include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
@@ -330,9 +331,13 @@ static int __meminit create_physical_mapping(unsigned long start,
 static void __init radix_init_pgtable(void)
 {
 	unsigned long rts_field;
+	unsigned long size = radix_mem_block_size;
 	phys_addr_t start, end;
 	u64 i;
 
+	if (debug_pagealloc_enabled())
+		size = PAGE_SIZE;
+
 	/* We don't support slb for radix */
 	mmu_slb_size = 0;
 
@@ -352,7 +357,7 @@ static void __init radix_init_pgtable(void)
 		}
 
 		WARN_ON(create_physical_mapping(start, end,
-						radix_mem_block_size,
+						size,
 						-1, PAGE_KERNEL));
 	}
 
@@ -872,13 +877,18 @@ int __meminit radix__create_section_mapping(unsigned long start,
 					    unsigned long end, int nid,
 					    pgprot_t prot)
 {
+	unsigned long size = radix_mem_block_size;
+
+	if (debug_pagealloc_enabled())
+		size = PAGE_SIZE;
+
 	if (end >= RADIX_VMALLOC_START) {
 		pr_warn("Outside the supported range\n");
 		return -1;
 	}
 
 	return create_physical_mapping(__pa(start), __pa(end),
-				       radix_mem_block_size, nid, prot);
+				       size, nid, prot);
 }
 
 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
@@ -1165,3 +1175,15 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 
 	return 1;
 }
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void radix__kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	unsigned long addr = (unsigned long)page_address(page);
+
+	if (enable)
+		change_memory_attr(addr, numpages, SET_MEMORY_EN);
+	else
+		change_memory_attr(addr, numpages, SET_MEMORY_DIS);
+}
+#endif
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
index 0876216ceee6..d3db09447fa6 100644
--- a/arch/powerpc/mm/pageattr.c
+++ b/arch/powerpc/mm/pageattr.c
@@ -54,6 +54,12 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 	case SET_MEMORY_X:
 		pte = pte_mkexec(pte);
 		break;
+	case SET_MEMORY_DIS:
+		pte = pte_mkabsent(pte);
+		break;
+	case SET_MEMORY_EN:
+		pte = pte_mkpresent(pte);
+		break;
 	default:
 		WARN_ON_ONCE(1);
 		break;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils
  2021-05-17  6:16 [PATCH 0/4] powerpc/64s: Enable KFENCE Jordan Niethe
  2021-05-17  6:16 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Jordan Niethe
@ 2021-05-17  6:16 ` Jordan Niethe
  2021-06-18  7:49   ` Daniel Axtens
  2021-05-17  6:16 ` [PATCH 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Jordan Niethe
  2021-05-17  6:16 ` [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64 Jordan Niethe
  3 siblings, 1 reply; 10+ messages in thread
From: Jordan Niethe @ 2021-05-17  6:16 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

From: Christophe Leroy <christophe.leroy@csgroup.eu>

debug_pagealloc_enabled() is always defined and constant folds to
'false' when CONFIG_DEBUG_PAGEALLOC is not enabled.

Remove the #ifdefs, the code and associated static variables will
be optimised out by the compiler when CONFIG_DEBUG_PAGEALLOC is
not defined.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/mm/book3s64/hash_utils.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 5b9709075fbd..d74482cce064 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -126,11 +126,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size);
 #ifdef CONFIG_PPC_64K_PAGES
 int mmu_ci_restrictions;
 #endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
 static u8 *linear_map_hash_slots;
 static unsigned long linear_map_hash_count;
-static DEFINE_SPINLOCK(linear_map_hash_lock);
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 struct mmu_hash_ops mmu_hash_ops;
 EXPORT_SYMBOL(mmu_hash_ops);
 
@@ -326,11 +323,9 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 			break;
 
 		cond_resched();
-#ifdef CONFIG_DEBUG_PAGEALLOC
 		if (debug_pagealloc_enabled() &&
 			(paddr >> PAGE_SHIFT) < linear_map_hash_count)
 			linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 	}
 	return ret < 0 ? ret : 0;
 }
@@ -965,7 +960,6 @@ static void __init htab_initialize(void)
 
 	prot = pgprot_val(PAGE_KERNEL);
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
 	if (debug_pagealloc_enabled()) {
 		linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
 		linear_map_hash_slots = memblock_alloc_try_nid(
@@ -975,7 +969,6 @@ static void __init htab_initialize(void)
 			panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
 			      __func__, linear_map_hash_count, &ppc64_rma_size);
 	}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
 
 	/* create bolted the linear mapping in the hash table */
 	for_each_mem_range(i, &base, &end) {
@@ -1944,6 +1937,8 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
+static DEFINE_SPINLOCK(linear_map_hash_lock);
+
 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 {
 	unsigned long hash;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page()
  2021-05-17  6:16 [PATCH 0/4] powerpc/64s: Enable KFENCE Jordan Niethe
  2021-05-17  6:16 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Jordan Niethe
  2021-05-17  6:16 ` [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Jordan Niethe
@ 2021-05-17  6:16 ` Jordan Niethe
  2021-05-17  6:16 ` [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64 Jordan Niethe
  3 siblings, 0 replies; 10+ messages in thread
From: Jordan Niethe @ 2021-05-17  6:16 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

From: Christophe Leroy <christophe.leroy@csgroup.eu>

If the page is already mapped resp. already unmapped, bail out.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/mm/book3s64/hash_utils.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index d74482cce064..fe5cf1cf4dd5 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1953,6 +1953,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
 	if (!vsid)
 		return;
 
+	if (linear_map_hash_slots[lmi] & 0x80)
+		return;
+
 	ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
 				    HPTE_V_BOLTED,
 				    mmu_linear_psize, mmu_kernel_ssize);
@@ -1972,7 +1975,10 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 
 	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
 	spin_lock(&linear_map_hash_lock);
-	BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+	if (!(linear_map_hash_slots[lmi] & 0x80)) {
+		spin_unlock(&linear_map_hash_lock);
+		return;
+	}
 	hidx = linear_map_hash_slots[lmi] & 0x7f;
 	linear_map_hash_slots[lmi] = 0;
 	spin_unlock(&linear_map_hash_lock);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64
  2021-05-17  6:16 [PATCH 0/4] powerpc/64s: Enable KFENCE Jordan Niethe
                   ` (2 preceding siblings ...)
  2021-05-17  6:16 ` [PATCH 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Jordan Niethe
@ 2021-05-17  6:16 ` Jordan Niethe
  2021-06-18  8:00   ` Daniel Axtens
  2021-06-22  8:57   ` Michael Ellerman
  3 siblings, 2 replies; 10+ messages in thread
From: Jordan Niethe @ 2021-05-17  6:16 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

From: Christophe Leroy <christophe.leroy@csgroup.eu>

This reuses the DEBUG_PAGEALLOC logic.

Tested with CONFIG_KFENCE + CONFIG_KUNIT + CONFIG_KFENCE_KUNIT_TEST on
radix and hash.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
[jpn: Handle radix]
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
 arch/powerpc/Kconfig                         |  2 +-
 arch/powerpc/include/asm/book3s/64/pgtable.h |  2 +-
 arch/powerpc/include/asm/kfence.h            | 19 +++++++++++++++++++
 arch/powerpc/mm/book3s64/hash_utils.c        | 12 ++++++------
 arch/powerpc/mm/book3s64/radix_pgtable.c     |  8 +++++---
 5 files changed, 32 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6df64d6815df..1743364d7370 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -196,7 +196,7 @@ config PPC
 	select HAVE_ARCH_KASAN			if PPC32 && PPC_PAGE_SHIFT <= 14
 	select HAVE_ARCH_KASAN_VMALLOC		if PPC32 && PPC_PAGE_SHIFT <= 14
 	select HAVE_ARCH_KGDB
-	select HAVE_ARCH_KFENCE			if PPC32
+	select HAVE_ARCH_KFENCE			if ARCH_SUPPORTS_DEBUG_PAGEALLOC
 	select HAVE_ARCH_MMAP_RND_BITS
 	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if COMPAT
 	select HAVE_ARCH_NVRAM_OPS
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index b89482aed82a..35300f2ee5d0 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -822,7 +822,7 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
  * Generic functions with hash/radix callbacks
  */
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	if (radix_enabled())
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index a9846b68c6b9..9d388df7c1a8 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -11,11 +11,29 @@
 #include <linux/mm.h>
 #include <asm/pgtable.h>
 
+#if defined(CONFIG_PPC64) && !defined(PPC64_ELF_ABI_v2)
+#define ARCH_FUNC_PREFIX "."
+#endif
+
 static inline bool arch_kfence_init_pool(void)
 {
 	return true;
 }
 
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+	struct page *page;
+
+	page = virt_to_page(addr);
+	if (protect)
+		__kernel_map_pages(page, 1, 0);
+	else
+		__kernel_map_pages(page, 1, 1);
+
+	return true;
+}
+#else
 static inline bool kfence_protect_page(unsigned long addr, bool protect)
 {
 	pte_t *kpte = virt_to_kpte(addr);
@@ -29,5 +47,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
 
 	return true;
 }
+#endif
 
 #endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index fe5cf1cf4dd5..fecb379426e7 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -323,8 +323,8 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 			break;
 
 		cond_resched();
-		if (debug_pagealloc_enabled() &&
-			(paddr >> PAGE_SHIFT) < linear_map_hash_count)
+		if (debug_pagealloc_enabled_or_kfence() &&
+		    (paddr >> PAGE_SHIFT) < linear_map_hash_count)
 			linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
 	}
 	return ret < 0 ? ret : 0;
@@ -672,7 +672,7 @@ static void __init htab_init_page_sizes(void)
 	bool aligned = true;
 	init_hpte_page_sizes();
 
-	if (!debug_pagealloc_enabled()) {
+	if (!debug_pagealloc_enabled_or_kfence()) {
 		/*
 		 * Pick a size for the linear mapping. Currently, we only
 		 * support 16M, 1M and 4K which is the default
@@ -960,7 +960,7 @@ static void __init htab_initialize(void)
 
 	prot = pgprot_val(PAGE_KERNEL);
 
-	if (debug_pagealloc_enabled()) {
+	if (debug_pagealloc_enabled_or_kfence()) {
 		linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
 		linear_map_hash_slots = memblock_alloc_try_nid(
 				linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
@@ -1936,7 +1936,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
 	return slot;
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 static DEFINE_SPINLOCK(linear_map_hash_lock);
 
 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2009,7 +2009,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
 	}
 	local_irq_restore(flags);
 }
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
 
 void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 				phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 2aa81b9e354a..b984876ff1ca 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -33,6 +33,8 @@
 
 #include <trace/events/thp.h>
 
+#include <mm/mmu_decl.h>
+
 unsigned int mmu_pid_bits;
 unsigned int mmu_base_pid;
 unsigned long radix_mem_block_size __ro_after_init;
@@ -335,7 +337,7 @@ static void __init radix_init_pgtable(void)
 	phys_addr_t start, end;
 	u64 i;
 
-	if (debug_pagealloc_enabled())
+	if (debug_pagealloc_enabled_or_kfence())
 		size = PAGE_SIZE;
 
 	/* We don't support slb for radix */
@@ -879,7 +881,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
 {
 	unsigned long size = radix_mem_block_size;
 
-	if (debug_pagealloc_enabled())
+	if (debug_pagealloc_enabled_or_kfence())
 		size = PAGE_SIZE;
 
 	if (end >= RADIX_VMALLOC_START) {
@@ -1176,7 +1178,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 	return 1;
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	unsigned long addr = (unsigned long)page_address(page);
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix
  2021-05-17  6:16 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Jordan Niethe
@ 2021-06-18  7:28   ` Daniel Axtens
  0 siblings, 0 replies; 10+ messages in thread
From: Daniel Axtens @ 2021-06-18  7:28 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

Jordan Niethe <jniethe5@gmail.com> writes:

> There is support for DEBUG_PAGEALLOC on hash but not on radix.
> Add support on radix.

Somewhat off-topic but I wonder at what point we can drop the weird !PPC
condition in mm/Kconfig.debug:

config DEBUG_PAGEALLOC
        bool "Debug page memory allocations"
        depends on DEBUG_KERNEL
        depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC

I can't figure out from git history why it exists or if it still serves
any function. Given that HIBERNATION isn't much use on Book3S systems it
probably never matters, it just bugs me a bit.

Again, nothing that has to block this series, just maybe something to
follow up at some vague and undefined point in the future!

> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index a666d561b44d..b89482aed82a 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -812,6 +822,15 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
>   * Generic functions with hash/radix callbacks
>   */
>  
> +#ifdef CONFIG_DEBUG_PAGEALLOC
> +static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
> +{
> +	if (radix_enabled())
> +		radix__kernel_map_pages(page, numpages, enable);
> +	hash__kernel_map_pages(page, numpages, enable);


Does this require an else? On radix we will call both radix__ and
hash__kernel_map_pages.

I notice we call both hash__ and radix__ in map_kernel_page under radix,
so maybe this makes sense?

I don't fully understand the mechanism by which memory removal works: it
looks like on radix you mark the page as 'absent', which I think is
enough? Then you fall through to the hash code here:

	for (i = 0; i < numpages; i++, page++) {
		vaddr = (unsigned long)page_address(page);
		lmi = __pa(vaddr) >> PAGE_SHIFT;
		if (lmi >= linear_map_hash_count)
			continue;

I think linear_map_hash_count will be zero unless it gets inited to a
non-zero value in htab_initialize(). I am fairly sure htab_initialize()
doesn't get called for a radix MMU. In that case you'll just `continue;`
out of every iteration of the loop, which would explain why nothing
weird would happen on radix.

Have I missed something here?

The rest of the patch looks good to me.

Kind regards,
Daniel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils
  2021-05-17  6:16 ` [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Jordan Niethe
@ 2021-06-18  7:49   ` Daniel Axtens
  0 siblings, 0 replies; 10+ messages in thread
From: Daniel Axtens @ 2021-06-18  7:49 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev, christophe.leroy
  Cc: Jordan Niethe, npiggin, aneesh.kumar

Hi Jordan and Christophe,

> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -126,11 +126,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size);
>  #ifdef CONFIG_PPC_64K_PAGES
>  int mmu_ci_restrictions;
>  #endif
> -#ifdef CONFIG_DEBUG_PAGEALLOC
>  static u8 *linear_map_hash_slots;
>  static unsigned long linear_map_hash_count;
> -static DEFINE_SPINLOCK(linear_map_hash_lock);
> -#endif /* CONFIG_DEBUG_PAGEALLOC */
>  struct mmu_hash_ops mmu_hash_ops;
>  EXPORT_SYMBOL(mmu_hash_ops);
>  

> @@ -1944,6 +1937,8 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
>  }
>  
>  #ifdef CONFIG_DEBUG_PAGEALLOC
> +static DEFINE_SPINLOCK(linear_map_hash_lock);
> +

I had some trouble figuring out why the spinlock has to be in the
ifdef. A bit of investigation suggests that it's only used in functions
that are only defined under CONFIG_DEBUG_PAGEALLOC - unlike the other
variables. So that makes sense.

While I was poking around, I noticed that linear_map_hash_slots is
manipulated under linear_map_hash_lock in kernel_(un)map_linear_page but
is manipulated outside the lock in htab_bolt_mapping(). Is that OK? (I
don't know when htab_bolt_mapping is called, it's possible it's only
called at times where nothing else could be happing to that array.)

Kind regards,
Daniel

>  static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
>  {
>  	unsigned long hash;
> -- 
> 2.25.1

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64
  2021-05-17  6:16 ` [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64 Jordan Niethe
@ 2021-06-18  8:00   ` Daniel Axtens
  2021-06-18  8:02     ` Daniel Axtens
  2021-06-22  8:57   ` Michael Ellerman
  1 sibling, 1 reply; 10+ messages in thread
From: Daniel Axtens @ 2021-06-18  8:00 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar


> +#ifdef CONFIG_PPC64
> +static inline bool kfence_protect_page(unsigned long addr, bool protect)
> +{
> +	struct page *page;
> +
> +	page = virt_to_page(addr);
> +	if (protect)
> +		__kernel_map_pages(page, 1, 0);
> +	else
> +		__kernel_map_pages(page, 1, 1);

I lose track of the type conversions and code conventions involved, but
can we do something like __kernel_map_pages(page, 1, !!protect)?

Apart from that, this seems good.

Kind regards,
Daniel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64
  2021-06-18  8:00   ` Daniel Axtens
@ 2021-06-18  8:02     ` Daniel Axtens
  0 siblings, 0 replies; 10+ messages in thread
From: Daniel Axtens @ 2021-06-18  8:02 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev; +Cc: Jordan Niethe, npiggin, aneesh.kumar

Daniel Axtens <dja@axtens.net> writes:

>> +#ifdef CONFIG_PPC64
>> +static inline bool kfence_protect_page(unsigned long addr, bool protect)
>> +{
>> +	struct page *page;
>> +
>> +	page = virt_to_page(addr);
>> +	if (protect)
>> +		__kernel_map_pages(page, 1, 0);
>> +	else
>> +		__kernel_map_pages(page, 1, 1);
>
> I lose track of the type conversions and code conventions involved, but
> can we do something like __kernel_map_pages(page, 1, !!protect)?

Ah, I missed that the if changed the truth/falsity. !protect, not
!!protect :P
>
> Apart from that, this seems good.
>
> Kind regards,
> Daniel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64
  2021-05-17  6:16 ` [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64 Jordan Niethe
  2021-06-18  8:00   ` Daniel Axtens
@ 2021-06-22  8:57   ` Michael Ellerman
  1 sibling, 0 replies; 10+ messages in thread
From: Michael Ellerman @ 2021-06-22  8:57 UTC (permalink / raw)
  To: Jordan Niethe, linuxppc-dev; +Cc: aneesh.kumar, npiggin, Jordan Niethe

Jordan Niethe <jniethe5@gmail.com> writes:
> From: Christophe Leroy <christophe.leroy@csgroup.eu>
>
> This reuses the DEBUG_PAGEALLOC logic.
>
> Tested with CONFIG_KFENCE + CONFIG_KUNIT + CONFIG_KFENCE_KUNIT_TEST on
> radix and hash.
>
> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> [jpn: Handle radix]
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> ---
>  arch/powerpc/Kconfig                         |  2 +-
>  arch/powerpc/include/asm/book3s/64/pgtable.h |  2 +-
>  arch/powerpc/include/asm/kfence.h            | 19 +++++++++++++++++++
>  arch/powerpc/mm/book3s64/hash_utils.c        | 12 ++++++------
>  arch/powerpc/mm/book3s64/radix_pgtable.c     |  8 +++++---
>  5 files changed, 32 insertions(+), 11 deletions(-)

This makes lockdep very unhappy :(

  [   24.016750][    C0] ================================
  [   24.017145][    C0] WARNING: inconsistent lock state
  [   24.017600][    C0] 5.13.0-rc2-00196-g8bf29f9c76e2 #1 Not tainted
  [   24.018222][    C0] --------------------------------
  [   24.018612][    C0] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
  [   24.019146][    C0] S55runtest/104 [HC0[0]:SC1[1]:HE1:SE0] takes:
  [   24.019695][    C0] c00000000278bf50 (init_mm.page_table_lock){+.?.}-{2:2}, at: change_page_attr+0x54/0x290
  [   24.021847][    C0] {SOFTIRQ-ON-W} state was registered at:
  [   24.022353][    C0]   lock_acquire+0x128/0x600
  [   24.022941][    C0]   _raw_spin_lock+0x54/0x80
  [   24.023301][    C0]   change_page_attr+0x54/0x290
  [   24.023667][    C0]   __apply_to_page_range+0x550/0xa70
  [   24.024070][    C0]   change_memory_attr+0x7c/0x140
  [   24.024445][    C0]   bpf_prog_select_runtime+0x230/0x2a0
  [   24.024911][    C0]   bpf_migrate_filter+0x18c/0x1e0
  [   24.025310][    C0]   bpf_prog_create+0x178/0x1d0
  [   24.025681][    C0]   ptp_classifier_init+0x4c/0x80
  [   24.026090][    C0]   sock_init+0xe0/0x100
  [   24.026422][    C0]   do_one_initcall+0x88/0x4b0
  [   24.026790][    C0]   kernel_init_freeable+0x364/0x40c
  [   24.027196][    C0]   kernel_init+0x24/0x188
  [   24.027539][    C0]   ret_from_kernel_thread+0x5c/0x70
  [   24.027987][    C0] irq event stamp: 1322
  [   24.028315][    C0] hardirqs last  enabled at (1322): [<c0000000010996f4>] _raw_spin_unlock_irqrestore+0x94/0xd0
  [   24.029084][    C0] hardirqs last disabled at (1321): [<c000000001099268>] _raw_spin_lock_irqsave+0xa8/0xc0
  [   24.029813][    C0] softirqs last  enabled at (738): [<c00000000109a9b8>] __do_softirq+0x5f8/0x668
  [   24.030531][    C0] softirqs last disabled at (1271): [<c000000000159b84>] __irq_exit_rcu+0x1c4/0x1d0
  [   24.031271][    C0]
  [   24.031271][    C0] other info that might help us debug this:
  [   24.031917][    C0]  Possible unsafe locking scenario:
  [   24.031917][    C0]
  [   24.032460][    C0]        CPU0
  [   24.032720][    C0]        ----
  [   24.032980][    C0]   lock(init_mm.page_table_lock);
  [   24.033400][    C0]   <Interrupt>
  [   24.033668][    C0]     lock(init_mm.page_table_lock);
  [   24.034102][    C0]
  [   24.034102][    C0]  *** DEADLOCK ***
  [   24.034102][    C0]
  [   24.034735][    C0] 5 locks held by S55runtest/104:
  [   24.035162][    C0]  #0: c00000000a9ef098 (&tty->ldisc_sem){++++}-{0:0}, at: tty_ldisc_ref_wait+0x3c/0xa0
  [   24.035998][    C0]  #1: c00000000a9ef130 (&tty->atomic_write_lock){+.+.}-{3:3}, at: file_tty_write.constprop.0+0xd8/0x3b0
  [   24.036849][    C0]  #2: c00000000a9ef2e8 (&tty->termios_rwsem){++++}-{3:3}, at: n_tty_write+0xd0/0x6b0
  [   24.037591][    C0]  #3: c0080000001d2378 (&ldata->output_lock){+.+.}-{3:3}, at: n_tty_write+0x248/0x6b0
  [   24.038342][    C0]  #4: c000000002618448 (rcu_callback){....}-{0:0}, at: rcu_core+0x450/0x1360
  [   24.039093][    C0]
  [   24.039093][    C0] stack backtrace:
  [   24.039727][    C0] CPU: 0 PID: 104 Comm: S55runtest Not tainted 5.13.0-rc2-00196-g8bf29f9c76e2 #1
  [   24.040790][    C0] Call Trace:
  [   24.041120][    C0] [c00000000adc2be0] [c000000000940868] dump_stack+0xec/0x144 (unreliable)
  [   24.041925][    C0] [c00000000adc2c30] [c0000000001f1b38] print_usage_bug.part.0+0x24c/0x278
  [   24.042611][    C0] [c00000000adc2cd0] [c0000000001eb0c0] mark_lock+0x950/0xc00
  [   24.043186][    C0] [c00000000adc2df0] [c0000000001ebb74] __lock_acquire+0x494/0x28b0
  [   24.043794][    C0] [c00000000adc2f20] [c0000000001eeba8] lock_acquire+0x128/0x600
  [   24.044384][    C0] [c00000000adc3020] [c000000001098f64] _raw_spin_lock+0x54/0x80
  [   24.044976][    C0] [c00000000adc3050] [c00000000008aa14] change_page_attr+0x54/0x290
  [   24.045586][    C0] [c00000000adc30b0] [c0000000004347e0] __apply_to_page_range+0x550/0xa70
  [   24.046238][    C0] [c00000000adc31a0] [c00000000008accc] change_memory_attr+0x7c/0x140
  [   24.046857][    C0] [c00000000adc31e0] [c000000000099f78] radix__kernel_map_pages+0x68/0x80
  [   24.047501][    C0] [c00000000adc3200] [c0000000004a8028] kfence_protect+0x48/0x80
  [   24.048091][    C0] [c00000000adc3230] [c0000000004a84a8] kfence_guarded_free+0x448/0x590
  [   24.048718][    C0] [c00000000adc3290] [c00000000049e1b0] __slab_free+0x400/0x6c0
  [   24.049307][    C0] [c00000000adc3390] [c0000000004a471c] kmem_cache_free+0x1ac/0x4e0
  [   24.049917][    C0] [c00000000adc3450] [c000000000147a10] free_task+0x70/0xe0
  [   24.050491][    C0] [c00000000adc3480] [c000000000154084] delayed_put_task_struct+0x134/0x250
  [   24.051149][    C0] [c00000000adc34c0] [c00000000022bb94] rcu_core+0x4b4/0x1360
  [   24.051727][    C0] [c00000000adc3580] [c00000000109a5cc] __do_softirq+0x20c/0x668
  [   24.052331][    C0] [c00000000adc3680] [c000000000159b84] __irq_exit_rcu+0x1c4/0x1d0
  [   24.052937][    C0] [c00000000adc36b0] [c000000000159dd0] irq_exit+0x20/0x50
  [   24.053496][    C0] [c00000000adc36d0] [c000000000028478] timer_interrupt+0x1a8/0x520
  [   24.054111][    C0] [c00000000adc3730] [c0000000000098c4] decrementer_common_virt+0x1a4/0x1b0
  [   24.054790][    C0] --- interrupt: 900 at arch_local_irq_restore+0x118/0x180
  [   24.055373][    C0] NIP:  c0000000000164b8 LR: c000000001099700 CTR: 0000000000000000
  [   24.055991][    C0] REGS: c00000000adc37a0 TRAP: 0900   Not tainted  (5.13.0-rc2-00196-g8bf29f9c76e2)
  [   24.056678][    C0] MSR:  9000000000009033 <SF,HV,EE,ME,IR,DR,RI,LE>  CR: 28004202  XER: 20040000
  [   24.057665][    C0] CFAR: c0000000000163c4 IRQMASK: 0
  [   24.057665][    C0] GPR00: c0000000010996f4 c00000000adc3a40 c0000000027dce00 0000000000000000
  [   24.057665][    C0] GPR04: c00000000ae26dc0 0000000000000006 c00000000adc39f4 0000000000000001
  [   24.057665][    C0] GPR08: 0000000079ce0000 0000000000008002 0000000000000001 9000000000001033
  [   24.057665][    C0] GPR12: 0000000000004000 c0000000034c0000 0000000000000000 0000000000000000
  [   24.057665][    C0] GPR16: 0000000000000000 0000000000000013 c0080000001d0000 c0080000001d2310
  [   24.057665][    C0] GPR20: c00000000a9ef000 7fffffffffffffff c00000000a9ef510 c00000000ae26300
  [   24.057665][    C0] GPR24: 0000000000000000 c00000000a75d410 0000000000000010 c00000000a683b80
  [   24.057665][    C0] GPR28: 0000000000000000 0000000000000000 c00000000a683b80 0000000000000000
  [   24.063433][    C0] NIP [c0000000000164b8] arch_local_irq_restore+0x118/0x180
  [   24.063978][    C0] LR [c000000001099700] _raw_spin_unlock_irqrestore+0xa0/0xd0
  [   24.064546][    C0] --- interrupt: 900
  [   24.064854][    C0] [c00000000adc3a40] [c0000000010996f4] _raw_spin_unlock_irqrestore+0x94/0xd0 (unreliable)
  [   24.065595][    C0] [c00000000adc3a70] [c000000000a1a944] hvc_write+0xb4/0x230
  [   24.066165][    C0] [c00000000adc3ad0] [c0000000009eeb20] n_tty_write+0x1a0/0x6b0
  [   24.066744][    C0] [c00000000adc3ba0] [c0000000009e64e0] file_tty_write.constprop.0+0x190/0x3b0
  [   24.067405][    C0] [c00000000adc3c60] [c0000000004e087c] new_sync_write+0x12c/0x1d0
  [   24.067997][    C0] [c00000000adc3d00] [c0000000004e3fa0] vfs_write+0x2a0/0x4b0
  [   24.068567][    C0] [c00000000adc3d60] [c0000000004e44b4] ksys_write+0x84/0x140
  [   24.069141][    C0] [c00000000adc3db0] [c0000000000300a4] system_call_exception+0x174/0x2e0
  [   24.069785][    C0] [c00000000adc3e10] [c00000000000cb5c] system_call_common+0xec/0x278
  [   24.070404][    C0] --- interrupt: c00 at 0x7fffa8f9067c
  [   24.071243][    C0] NIP:  00007fffa8f9067c LR: 00007fffa8f8218c CTR: 0000000000000000
  [   24.071834][    C0] REGS: c00000000adc3e80 TRAP: 0c00   Not tainted  (5.13.0-rc2-00196-g8bf29f9c76e2)
  [   24.072510][    C0] MSR:  900000000280f033 <SF,HV,VEC,VSX,EE,PR,FP,ME,IR,DR,RI,LE>  CR: 20002202  XER: 00000000
  [   24.073428][    C0] IRQMASK: 0
  [   24.073428][    C0] GPR00: 0000000000000004 00007fffee2147b0 00007fffa8fd7c00 0000000000000001
  [   24.073428][    C0] GPR04: 00007fffa8fd6860 0000000000000013 0000000000000000 0000000000000000
  [   24.073428][    C0] GPR08: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
  [   24.073428][    C0] GPR12: 0000000000000000 00007fffa8fd9960 0000000000000000 0000000000000000
  [   24.073428][    C0] GPR16: 0000000000000000 0000000000000000 0000000000000000 00000000100a3c28
  [   24.073428][    C0] GPR20: 00007fffee21ff98 00000000100a3be8 0000000000000000 00000000100e0d60
  [   24.073428][    C0] GPR24: 0000000000000000 00007fffa8fd6468 0000000000000020 0000000000000013
  [   24.073428][    C0] GPR28: 00007fffa8fd6860 0000000000000001 00007fffa8fd9960 0000000000000004
  [   24.079055][    C0] NIP [00007fffa8f9067c] 0x7fffa8f9067c
  [   24.079482][    C0] LR [00007fffa8f8218c] 0x7fffa8f8218c
  [   24.079901][    C0] --- interrupt: c00


AFAICS caused by kfence calling into change_page_attr() from softirq
context, and taking init_mm.page_table_lock. But elsewhere we take
init_mm.page_table_lock with interrupts enabled.

To fix it I guess we either need to make change_page_attr() safe to call
from anywhere, or do something kfence specific.

cheers

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2021-06-22  8:57 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-17  6:16 [PATCH 0/4] powerpc/64s: Enable KFENCE Jordan Niethe
2021-05-17  6:16 ` [PATCH 1/4] powerpc/64s: Add DEBUG_PAGEALLOC for radix Jordan Niethe
2021-06-18  7:28   ` Daniel Axtens
2021-05-17  6:16 ` [PATCH 2/4] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils Jordan Niethe
2021-06-18  7:49   ` Daniel Axtens
2021-05-17  6:16 ` [PATCH 3/4] powerpc/64s: Allow double call of kernel_[un]map_linear_page() Jordan Niethe
2021-05-17  6:16 ` [PATCH 4/4] powerpc: Enable KFENCE on BOOK3S/64 Jordan Niethe
2021-06-18  8:00   ` Daniel Axtens
2021-06-18  8:02     ` Daniel Axtens
2021-06-22  8:57   ` Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.