linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v5 1/5] powerpc: mm: Replace p{u,m,4}d_is_leaf with p{u,m,4}_leaf
@ 2022-11-18  0:21 Rohan McLure
  2022-11-18  0:21 ` [PATCH v5 2/5] powerpc: mm: Implement p{m,u,4}d_leaf on all platforms Rohan McLure
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Rohan McLure @ 2022-11-18  0:21 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Rohan McLure

Replace occurrences of p{u,m,4}d_is_leaf with p{u,m,4}_leaf, as the
latter is the name given to checking that a higher-level entry in
multi-level paging contains a page translation entry (pte) throughout
all other archs.

A future patch will implement p{u,m,4}_leaf stubs on all platforms so
that they may be referenced in generic code.

Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
---
V4: New patch
V5: Previously replaced stub definition for *_is_leaf with *_leaf. Do
that in a later patch
---
 arch/powerpc/kvm/book3s_64_mmu_radix.c   | 12 ++++++------
 arch/powerpc/mm/book3s64/radix_pgtable.c | 14 +++++++-------
 arch/powerpc/mm/pgtable.c                |  6 +++---
 arch/powerpc/mm/pgtable_64.c             |  6 +++---
 arch/powerpc/xmon/xmon.c                 |  6 +++---
 5 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 5d5e12f3bf86..d29f8d1d97a6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -497,7 +497,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
 		if (!pmd_present(*p))
 			continue;
-		if (pmd_is_leaf(*p)) {
+		if (pmd_leaf(*p)) {
 			if (full) {
 				pmd_clear(p);
 			} else {
@@ -526,7 +526,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
 	for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
 		if (!pud_present(*p))
 			continue;
-		if (pud_is_leaf(*p)) {
+		if (pud_leaf(*p)) {
 			pud_clear(p);
 		} else {
 			pmd_t *pmd;
@@ -629,12 +629,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 		new_pud = pud_alloc_one(kvm->mm, gpa);
 
 	pmd = NULL;
-	if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
+	if (pud && pud_present(*pud) && !pud_leaf(*pud))
 		pmd = pmd_offset(pud, gpa);
 	else if (level <= 1)
 		new_pmd = kvmppc_pmd_alloc();
 
-	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
+	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
 		new_ptep = kvmppc_pte_alloc();
 
 	/* Check if we might have been invalidated; let the guest retry if so */
@@ -652,7 +652,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 		new_pud = NULL;
 	}
 	pud = pud_offset(p4d, gpa);
-	if (pud_is_leaf(*pud)) {
+	if (pud_leaf(*pud)) {
 		unsigned long hgpa = gpa & PUD_MASK;
 
 		/* Check if we raced and someone else has set the same thing */
@@ -703,7 +703,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 		new_pmd = NULL;
 	}
 	pmd = pmd_offset(pud, gpa);
-	if (pmd_is_leaf(*pmd)) {
+	if (pmd_leaf(*pmd)) {
 		unsigned long lgpa = gpa & PMD_MASK;
 
 		/* Check if we raced and someone else has set the same thing */
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index cac727b01799..8ac27e031ff4 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -205,14 +205,14 @@ static void radix__change_memory_range(unsigned long start, unsigned long end,
 		pudp = pud_alloc(&init_mm, p4dp, idx);
 		if (!pudp)
 			continue;
-		if (pud_is_leaf(*pudp)) {
+		if (pud_leaf(*pudp)) {
 			ptep = (pte_t *)pudp;
 			goto update_the_pte;
 		}
 		pmdp = pmd_alloc(&init_mm, pudp, idx);
 		if (!pmdp)
 			continue;
-		if (pmd_is_leaf(*pmdp)) {
+		if (pmd_leaf(*pmdp)) {
 			ptep = pmdp_ptep(pmdp);
 			goto update_the_pte;
 		}
@@ -762,7 +762,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
 		if (!pmd_present(*pmd))
 			continue;
 
-		if (pmd_is_leaf(*pmd)) {
+		if (pmd_leaf(*pmd)) {
 			if (!IS_ALIGNED(addr, PMD_SIZE) ||
 			    !IS_ALIGNED(next, PMD_SIZE)) {
 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
@@ -792,7 +792,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
 		if (!pud_present(*pud))
 			continue;
 
-		if (pud_is_leaf(*pud)) {
+		if (pud_leaf(*pud)) {
 			if (!IS_ALIGNED(addr, PUD_SIZE) ||
 			    !IS_ALIGNED(next, PUD_SIZE)) {
 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
@@ -825,7 +825,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
 		if (!p4d_present(*p4d))
 			continue;
 
-		if (p4d_is_leaf(*p4d)) {
+		if (p4d_leaf(*p4d)) {
 			if (!IS_ALIGNED(addr, P4D_SIZE) ||
 			    !IS_ALIGNED(next, P4D_SIZE)) {
 				WARN_ONCE(1, "%s: unaligned range\n", __func__);
@@ -1088,7 +1088,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 
 int pud_clear_huge(pud_t *pud)
 {
-	if (pud_is_leaf(*pud)) {
+	if (pud_leaf(*pud)) {
 		pud_clear(pud);
 		return 1;
 	}
@@ -1135,7 +1135,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
 
 int pmd_clear_huge(pmd_t *pmd)
 {
-	if (pmd_is_leaf(*pmd)) {
+	if (pmd_leaf(*pmd)) {
 		pmd_clear(pmd);
 		return 1;
 	}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index cb2dcdb18f8e..35b9677b9553 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -387,7 +387,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
 	if (p4d_none(p4d))
 		return NULL;
 
-	if (p4d_is_leaf(p4d)) {
+	if (p4d_leaf(p4d)) {
 		ret_pte = (pte_t *)p4dp;
 		goto out;
 	}
@@ -409,7 +409,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
 	if (pud_none(pud))
 		return NULL;
 
-	if (pud_is_leaf(pud)) {
+	if (pud_leaf(pud)) {
 		ret_pte = (pte_t *)pudp;
 		goto out;
 	}
@@ -448,7 +448,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
 		goto out;
 	}
 
-	if (pmd_is_leaf(pmd)) {
+	if (pmd_leaf(pmd)) {
 		ret_pte = (pte_t *)pmdp;
 		goto out;
 	}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 5ac1fd30341b..0604c80dae66 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
 /* 4 level page table */
 struct page *p4d_page(p4d_t p4d)
 {
-	if (p4d_is_leaf(p4d)) {
+	if (p4d_leaf(p4d)) {
 		if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
 			VM_WARN_ON(!p4d_huge(p4d));
 		return pte_page(p4d_pte(p4d));
@@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d)
 
 struct page *pud_page(pud_t pud)
 {
-	if (pud_is_leaf(pud)) {
+	if (pud_leaf(pud)) {
 		if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
 			VM_WARN_ON(!pud_huge(pud));
 		return pte_page(pud_pte(pud));
@@ -125,7 +125,7 @@ struct page *pud_page(pud_t pud)
  */
 struct page *pmd_page(pmd_t pmd)
 {
-	if (pmd_is_leaf(pmd)) {
+	if (pmd_leaf(pmd)) {
 		/*
 		 * vmalloc_to_page may be called on any vmap address (not only
 		 * vmalloc), and it uses pmd_page() etc., when huge vmap is
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index f51c882bf902..705c230dd4f5 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -3342,7 +3342,7 @@ static void show_pte(unsigned long addr)
 		return;
 	}
 
-	if (p4d_is_leaf(*p4dp)) {
+	if (p4d_leaf(*p4dp)) {
 		format_pte(p4dp, p4d_val(*p4dp));
 		return;
 	}
@@ -3356,7 +3356,7 @@ static void show_pte(unsigned long addr)
 		return;
 	}
 
-	if (pud_is_leaf(*pudp)) {
+	if (pud_leaf(*pudp)) {
 		format_pte(pudp, pud_val(*pudp));
 		return;
 	}
@@ -3370,7 +3370,7 @@ static void show_pte(unsigned long addr)
 		return;
 	}
 
-	if (pmd_is_leaf(*pmdp)) {
+	if (pmd_leaf(*pmdp)) {
 		format_pte(pmdp, pmd_val(*pmdp));
 		return;
 	}
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v5 2/5] powerpc: mm: Implement p{m,u,4}d_leaf on all platforms
  2022-11-18  0:21 [PATCH v5 1/5] powerpc: mm: Replace p{u,m,4}d_is_leaf with p{u,m,4}_leaf Rohan McLure
@ 2022-11-18  0:21 ` Rohan McLure
  2022-12-06 22:24   ` Michael Ellerman
  2022-11-18  0:21 ` [PATCH v5 3/5] powerpc: mm: Add common pud_pfn stub for " Rohan McLure
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Rohan McLure @ 2022-11-18  0:21 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Rohan McLure

The check that a higher-level entry in multi-level pages contains a page
translation entry (pte) is performed by p{m,u,4}d_leaf stubs, which may
be specialised for each choice of mmu. In a prior commit, we replace
uses to the catch-all stubs, p{m,u,4}d_is_leaf with p{m,u,4}d_leaf.

Replace the catch-all stub definitions for p{m,u,4}d_is_leaf with
definitions for p{m,u,4}d_leaf. A future patch will assume that
p{m,u,4}d_leaf is defined on all platforms.

In particular, implement pud_leaf for Book3E-64, pmd_leaf for all Book3E
and Book3S-64 platforms, with a catch-all definition for p4d_leaf.

Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
---
v5: Split patch that replaces p{m,u,4}d_is_leaf into two patches, first
replacing callsites and afterward providing generic definition.
Remove ifndef-defines implementing p{m,u}d_leaf in favour of
implementing stubs in headers belonging to the particular platforms
needing them.
---
 arch/powerpc/include/asm/book3s/32/pgtable.h |  4 ++++
 arch/powerpc/include/asm/book3s/64/pgtable.h |  8 ++-----
 arch/powerpc/include/asm/nohash/64/pgtable.h |  5 +++++
 arch/powerpc/include/asm/nohash/pgtable.h    |  5 +++++
 arch/powerpc/include/asm/pgtable.h           | 22 ++------------------
 5 files changed, 18 insertions(+), 26 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 75823f39e042..921ae95cd939 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -242,6 +242,10 @@ static inline void pmd_clear(pmd_t *pmdp)
 	*pmdp = __pmd(0);
 }
 
+static inline bool pmd_leaf(pmd_t pmd)
+{
+	return false;
+}
 
 /*
  * When flushing the tlb entry for a page, we also need to flush the hash
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index c436d8422654..9a6db4ad3228 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1426,16 +1426,12 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
 /*
  * Like pmd_huge() and pmd_large(), but works regardless of config options
  */
-#define pmd_is_leaf pmd_is_leaf
-#define pmd_leaf pmd_is_leaf
-static inline bool pmd_is_leaf(pmd_t pmd)
+static inline bool pmd_leaf(pmd_t pmd)
 {
 	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
 }
 
-#define pud_is_leaf pud_is_leaf
-#define pud_leaf pud_is_leaf
-static inline bool pud_is_leaf(pud_t pud)
+static inline bool pud_leaf(pud_t pud)
 {
 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
 }
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 879e9a6e5a87..34e618bb9140 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -141,6 +141,11 @@ static inline void pud_clear(pud_t *pudp)
 	*pudp = __pud(0);
 }
 
+static inline bool pud_leaf(pud_t pud)
+{
+	return false;
+}
+
 #define pud_none(pud)		(!pud_val(pud))
 #define	pud_bad(pud)		(!is_kernel_addr(pud_val(pud)) \
 				 || (pud_val(pud) & PUD_BAD_BITS))
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index d9067dfc531c..455ae13822ee 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -60,6 +60,11 @@ static inline bool pte_hw_valid(pte_t pte)
 	return pte_val(pte) & _PAGE_PRESENT;
 }
 
+static inline bool pmd_leaf(pmd_t pmd)
+{
+	return false;
+}
+
 /*
  * Don't just check for any non zero bits in __PAGE_USER, since for book3e
  * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 283f40d05a4d..68a3f271aebe 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -134,29 +134,11 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
 }
 #endif
 
-#ifndef pmd_is_leaf
-#define pmd_is_leaf pmd_is_leaf
-static inline bool pmd_is_leaf(pmd_t pmd)
+#define p4d_leaf p4d_leaf
+static inline bool p4d_leaf(p4d_t p4d)
 {
 	return false;
 }
-#endif
-
-#ifndef pud_is_leaf
-#define pud_is_leaf pud_is_leaf
-static inline bool pud_is_leaf(pud_t pud)
-{
-	return false;
-}
-#endif
-
-#ifndef p4d_is_leaf
-#define p4d_is_leaf p4d_is_leaf
-static inline bool p4d_is_leaf(p4d_t p4d)
-{
-	return false;
-}
-#endif
 
 #define pmd_pgtable pmd_pgtable
 static inline pgtable_t pmd_pgtable(pmd_t pmd)
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v5 3/5] powerpc: mm: Add common pud_pfn stub for all platforms
  2022-11-18  0:21 [PATCH v5 1/5] powerpc: mm: Replace p{u,m,4}d_is_leaf with p{u,m,4}_leaf Rohan McLure
  2022-11-18  0:21 ` [PATCH v5 2/5] powerpc: mm: Implement p{m,u,4}d_leaf on all platforms Rohan McLure
@ 2022-11-18  0:21 ` Rohan McLure
  2022-11-18  0:21 ` [PATCH v5 4/5] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure
  2022-11-18  0:21 ` [PATCH v5 5/5] powerpc: mm: support page table check Rohan McLure
  3 siblings, 0 replies; 8+ messages in thread
From: Rohan McLure @ 2022-11-18  0:21 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Rohan McLure

Prior to this commit, pud_pfn was implemented with BUILD_BUG as the inline
function for 64-bit Book3S systems but is never included, as its
invocations in generic code are guarded by calls to pud_devmap which return
zero on such systems. A future patch will provide support for page table
checks, the generic code for which depends on a pud_pfn stub being
implemented, even while the patch will not interact with puds directly.

Remove the 64-bit Book3S stub and define pud_pfn to warn on all
platforms. pud_pfn may be defined properly on a per-platform basis
should it grow real usages in future.

Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
---
V2: Remove conditional BUILD_BUG and BUG. Instead warn on usage.
V3: Replace WARN with WARN_ONCE, which should suffice to demonstrate
misuse of puds.
---
 arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ----------
 arch/powerpc/include/asm/pgtable.h           | 14 ++++++++++++++
 2 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 9a6db4ad3228..5fb910ab250d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1394,16 +1394,6 @@ static inline int pgd_devmap(pgd_t pgd)
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static inline int pud_pfn(pud_t pud)
-{
-	/*
-	 * Currently all calls to pud_pfn() are gated around a pud_devmap()
-	 * check so this should never be used. If it grows another user we
-	 * want to know about it.
-	 */
-	BUILD_BUG();
-	return 0;
-}
 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 68a3f271aebe..5db9fa157685 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -159,6 +159,20 @@ struct seq_file;
 void arch_report_meminfo(struct seq_file *m);
 #endif /* CONFIG_PPC64 */
 
+/*
+ * Currently only consumed by page_table_check_pud_{set,clear}. Since clears
+ * and sets to page table entries at any level are done through
+ * page_table_check_pte_{set,clear}, provide stub implementation.
+ */
+#ifndef pud_pfn
+#define pud_pfn pud_pfn
+static inline int pud_pfn(pud_t pud)
+{
+	WARN_ONCE(1, "pud: platform does not use pud entries directly");
+	return 0;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_PGTABLE_H */
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v5 4/5] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers
  2022-11-18  0:21 [PATCH v5 1/5] powerpc: mm: Replace p{u,m,4}d_is_leaf with p{u,m,4}_leaf Rohan McLure
  2022-11-18  0:21 ` [PATCH v5 2/5] powerpc: mm: Implement p{m,u,4}d_leaf on all platforms Rohan McLure
  2022-11-18  0:21 ` [PATCH v5 3/5] powerpc: mm: Add common pud_pfn stub for " Rohan McLure
@ 2022-11-18  0:21 ` Rohan McLure
  2022-11-18  0:21 ` [PATCH v5 5/5] powerpc: mm: support page table check Rohan McLure
  3 siblings, 0 replies; 8+ messages in thread
From: Rohan McLure @ 2022-11-18  0:21 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Rohan McLure

Add the following helpers for detecting whether a page table entry
is a leaf and is accessible to user space.

 * pte_user_accessible_page
 * pmd_user_accessible_page
 * pud_user_accessible_page

Also implement missing pud_user definitions for both Book3S/nohash 64-bit
systems, and pmd_user for Book3S/nohash 32-bit systems.

Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
---
V2: Provide missing pud_user implementations, use p{u,m}d_is_leaf.
V3: Provide missing pmd_user implementations as stubs in 32-bit.
V4: Use pmd_leaf, pud_leaf, and define pmd_user for 32 Book3E with
static inline method rather than macro.
---
 arch/powerpc/include/asm/book3s/32/pgtable.h |  4 ++++
 arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++++++++
 arch/powerpc/include/asm/nohash/32/pgtable.h |  5 +++++
 arch/powerpc/include/asm/nohash/64/pgtable.h | 10 ++++++++++
 arch/powerpc/include/asm/pgtable.h           | 15 +++++++++++++++
 5 files changed, 44 insertions(+)

diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 921ae95cd939..b2fdd3cc81de 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -515,6 +515,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 }
 
+static inline bool pmd_user(pmd_t pmd)
+{
+	return 0;
+}
 
 
 /* This low level function performs the actual PTE insertion
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 5fb910ab250d..e04e3cd378a6 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -602,6 +602,16 @@ static inline bool pte_user(pte_t pte)
 	return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
 }
 
+static inline bool pmd_user(pmd_t pmd)
+{
+	return !(pmd_raw(pmd) & cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
+static inline bool pud_user(pud_t pud)
+{
+	return !(pud_raw(pud) & cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
 #define pte_access_permitted pte_access_permitted
 static inline bool pte_access_permitted(pte_t pte, bool write)
 {
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 0d40b33184eb..94b2a53f73d5 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -209,6 +209,11 @@ static inline void pmd_clear(pmd_t *pmdp)
 	*pmdp = __pmd(0);
 }
 
+static inline bool pmd_user(pmd_t pmd)
+{
+	return false;
+}
+
 /*
  * PTE updates. This function is called whenever an existing
  * valid PTE is updated. This does -not- include set_pte_at()
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 34e618bb9140..69304159aafc 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -123,6 +123,11 @@ static inline pte_t pmd_pte(pmd_t pmd)
 	return __pte(pmd_val(pmd));
 }
 
+static inline bool pmd_user(pmd_t pmd)
+{
+	return (pmd_val(pmd) & _PAGE_USER) == _PAGE_USER;
+}
+
 #define pmd_none(pmd)		(!pmd_val(pmd))
 #define	pmd_bad(pmd)		(!is_kernel_addr(pmd_val(pmd)) \
 				 || (pmd_val(pmd) & PMD_BAD_BITS))
@@ -163,6 +168,11 @@ static inline pte_t pud_pte(pud_t pud)
 	return __pte(pud_val(pud));
 }
 
+static inline bool pud_user(pud_t pud)
+{
+	return (pud_val(pud) & _PAGE_USER) == _PAGE_USER;
+}
+
 static inline pud_t pte_pud(pte_t pte)
 {
 	return __pud(pte_val(pte));
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 5db9fa157685..5227bbaa7acf 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -173,6 +173,21 @@ static inline int pud_pfn(pud_t pud)
 }
 #endif
 
+static inline bool pte_user_accessible_page(pte_t pte)
+{
+	return pte_present(pte) && pte_user(pte);
+}
+
+static inline bool pmd_user_accessible_page(pmd_t pmd)
+{
+	return pmd_leaf(pmd) && pmd_present(pmd) && pmd_user(pmd);
+}
+
+static inline bool pud_user_accessible_page(pud_t pud)
+{
+	return pud_leaf(pud) && pud_present(pud) && pud_user(pud);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_PGTABLE_H */
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v5 5/5] powerpc: mm: support page table check
  2022-11-18  0:21 [PATCH v5 1/5] powerpc: mm: Replace p{u,m,4}d_is_leaf with p{u,m,4}_leaf Rohan McLure
                   ` (2 preceding siblings ...)
  2022-11-18  0:21 ` [PATCH v5 4/5] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure
@ 2022-11-18  0:21 ` Rohan McLure
  2022-12-06 23:04   ` Michael Ellerman
  3 siblings, 1 reply; 8+ messages in thread
From: Rohan McLure @ 2022-11-18  0:21 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Rohan McLure

On creation and clearing of a page table mapping, instrument such calls
by invoking page_table_check_pte_set and page_table_check_pte_clear
respectively. These calls serve as a sanity check against illegal
mappings.

Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit
platforms implementing Book3S.

Change pud_pfn to be a runtime bug rather than a build bug as it is
consumed by page_table_check_pud_{clear,set} which are not called.

See also:

riscv support in commit 3fee229a8eb9 ("riscv/mm: enable
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
arm64 in commit 42b2547137f5 ("arm64/mm: enable
ARCH_SUPPORTS_PAGE_TABLE_CHECK")
x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
check")

Reviewed-by: Russell Currey <ruscur@russell.cc>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
---
V2: Update spacing and types assigned to pte_update calls.
V3: Update one last pte_update call to remove __pte invocation.
---
 arch/powerpc/Kconfig                         |  1 +
 arch/powerpc/include/asm/book3s/32/pgtable.h |  9 ++++++++-
 arch/powerpc/include/asm/book3s/64/pgtable.h | 18 +++++++++++++++---
 arch/powerpc/include/asm/nohash/32/pgtable.h |  7 ++++++-
 arch/powerpc/include/asm/nohash/64/pgtable.h |  8 ++++++--
 arch/powerpc/include/asm/nohash/pgtable.h    |  1 +
 6 files changed, 37 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 699df27b0e2f..1d943a13a204 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -150,6 +150,7 @@ config PPC
 	select ARCH_STACKWALK
 	select ARCH_SUPPORTS_ATOMIC_RMW
 	select ARCH_SUPPORTS_DEBUG_PAGEALLOC	if PPC_BOOK3S || PPC_8xx || 40x
+	select ARCH_SUPPORTS_PAGE_TABLE_CHECK
 	select ARCH_USE_BUILTIN_BSWAP
 	select ARCH_USE_CMPXCHG_LOCKREF		if PPC64
 	select ARCH_USE_MEMTEST
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index b2fdd3cc81de..44703c8c590c 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -53,6 +53,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/page_table_check.h>
+
 static inline bool pte_user(pte_t pte)
 {
 	return pte_val(pte) & _PAGE_USER;
@@ -337,7 +339,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 				       pte_t *ptep)
 {
-	return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+	pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
+
+	page_table_check_pte_clear(mm, addr, old_pte);
+
+	return old_pte;
 }
 
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -529,6 +535,7 @@ static inline bool pmd_user(pmd_t pmd)
 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 				pte_t *ptep, pte_t pte, int percpu)
 {
+	page_table_check_pte_set(mm, addr, ptep, pte);
 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
 	/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
 	 * helper pte_update() which does an atomic update. We need to do that
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index e04e3cd378a6..436632d04304 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -162,6 +162,8 @@
 #define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
 
 #ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
+
 /*
  * page table defines
  */
@@ -465,8 +467,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 				       unsigned long addr, pte_t *ptep)
 {
-	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
-	return __pte(old);
+	pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+
+	page_table_check_pte_clear(mm, addr, old_pte);
+
+	return old_pte;
 }
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
@@ -475,11 +480,16 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 					    pte_t *ptep, int full)
 {
 	if (full && radix_enabled()) {
+		pte_t old_pte;
+
 		/*
 		 * We know that this is a full mm pte clear and
 		 * hence can be sure there is no parallel set_pte.
 		 */
-		return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+		old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full);
+		page_table_check_pte_clear(mm, addr, old_pte);
+
+		return old_pte;
 	}
 	return ptep_get_and_clear(mm, addr, ptep);
 }
@@ -865,6 +875,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 	 */
 	pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
 
+	page_table_check_pte_set(mm, addr, ptep, pte);
+
 	if (radix_enabled())
 		return radix__set_pte_at(mm, addr, ptep, pte, percpu);
 	return hash__set_pte_at(mm, addr, ptep, pte, percpu);
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 94b2a53f73d5..b3b8a843a1bd 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -166,6 +166,7 @@ void unmap_kernel_page(unsigned long va);
 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
 
 #ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
 
 #define pte_clear(mm, addr, ptep) \
 	do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
@@ -310,7 +311,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 				       pte_t *ptep)
 {
-	return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
+	pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
+
+	page_table_check_pte_clear(mm, addr, old_pte);
+
+	return old_pte;
 }
 
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 69304159aafc..2488da8f0deb 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -83,6 +83,7 @@
 #define H_PAGE_4K_PFN 0
 
 #ifndef __ASSEMBLY__
+#include <linux/page_table_check.h>
 /* pte_clear moved to later in this file */
 
 static inline pte_t pte_mkwrite(pte_t pte)
@@ -258,8 +259,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 				       unsigned long addr, pte_t *ptep)
 {
-	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
-	return __pte(old);
+	pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+
+	page_table_check_pte_clear(mm, addr, old_pte);
+
+	return old_pte;
 }
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 455ae13822ee..0454ed762d39 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -170,6 +170,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 				pte_t *ptep, pte_t pte, int percpu)
 {
+	page_table_check_pte_set(mm, addr, ptep, pte);
 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
 	 * can just store as long as we do the two halves in the right order
 	 * with a barrier in between.
-- 
2.37.2


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v5 2/5] powerpc: mm: Implement p{m,u,4}d_leaf on all platforms
  2022-11-18  0:21 ` [PATCH v5 2/5] powerpc: mm: Implement p{m,u,4}d_leaf on all platforms Rohan McLure
@ 2022-12-06 22:24   ` Michael Ellerman
  2022-12-06 23:30     ` Rohan McLure
  0 siblings, 1 reply; 8+ messages in thread
From: Michael Ellerman @ 2022-12-06 22:24 UTC (permalink / raw)
  To: Rohan McLure, linuxppc-dev; +Cc: Rohan McLure

Rohan McLure <rmclure@linux.ibm.com> writes:
> The check that a higher-level entry in multi-level pages contains a page
> translation entry (pte) is performed by p{m,u,4}d_leaf stubs, which may
> be specialised for each choice of mmu. In a prior commit, we replace
> uses to the catch-all stubs, p{m,u,4}d_is_leaf with p{m,u,4}d_leaf.
>
> Replace the catch-all stub definitions for p{m,u,4}d_is_leaf with
> definitions for p{m,u,4}d_leaf. A future patch will assume that
> p{m,u,4}d_leaf is defined on all platforms.
>
> In particular, implement pud_leaf for Book3E-64, pmd_leaf for all Book3E
> and Book3S-64 platforms, with a catch-all definition for p4d_leaf.
>
> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
> ---
> v5: Split patch that replaces p{m,u,4}d_is_leaf into two patches, first
> replacing callsites and afterward providing generic definition.
> Remove ifndef-defines implementing p{m,u}d_leaf in favour of
> implementing stubs in headers belonging to the particular platforms
> needing them.
> ---
>  arch/powerpc/include/asm/book3s/32/pgtable.h |  4 ++++
>  arch/powerpc/include/asm/book3s/64/pgtable.h |  8 ++-----
>  arch/powerpc/include/asm/nohash/64/pgtable.h |  5 +++++
>  arch/powerpc/include/asm/nohash/pgtable.h    |  5 +++++
>  arch/powerpc/include/asm/pgtable.h           | 22 ++------------------
>  5 files changed, 18 insertions(+), 26 deletions(-)

I needed the delta below to prevent the generic versions being defined
and overriding our versions.

cheers

diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 44703c8c590c..117135be8cc2 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -244,6 +244,7 @@ static inline void pmd_clear(pmd_t *pmdp)
 	*pmdp = __pmd(0);
 }
 
+#define pmd_leaf pmd_leaf
 static inline bool pmd_leaf(pmd_t pmd)
 {
 	return false;
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 436632d04304..f00aa2d203c2 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1438,11 +1438,13 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
 /*
  * Like pmd_huge() and pmd_large(), but works regardless of config options
  */
+#define pmd_leaf pmd_leaf
 static inline bool pmd_leaf(pmd_t pmd)
 {
 	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
 }
 
+#define pud_leaf pud_leaf
 static inline bool pud_leaf(pud_t pud)
 {
 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 2488da8f0deb..d88b22c753d3 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -147,6 +147,7 @@ static inline void pud_clear(pud_t *pudp)
 	*pudp = __pud(0);
 }
 
+#define pud_leaf pud_leaf
 static inline bool pud_leaf(pud_t pud)
 {
 	return false;
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 487804f5b1d1..dfae1dbb9c3b 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -60,6 +60,7 @@ static inline bool pte_hw_valid(pte_t pte)
 	return pte_val(pte) & _PAGE_PRESENT;
 }
 
+#define pmd_leaf pmd_leaf
 static inline bool pmd_leaf(pmd_t pmd)
 {
 	return false;


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v5 5/5] powerpc: mm: support page table check
  2022-11-18  0:21 ` [PATCH v5 5/5] powerpc: mm: support page table check Rohan McLure
@ 2022-12-06 23:04   ` Michael Ellerman
  0 siblings, 0 replies; 8+ messages in thread
From: Michael Ellerman @ 2022-12-06 23:04 UTC (permalink / raw)
  To: Rohan McLure, linuxppc-dev; +Cc: Rohan McLure

Rohan McLure <rmclure@linux.ibm.com> writes:
> On creation and clearing of a page table mapping, instrument such calls
> by invoking page_table_check_pte_set and page_table_check_pte_clear
> respectively. These calls serve as a sanity check against illegal
> mappings.
>
> Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit
> platforms implementing Book3S.
>
> Change pud_pfn to be a runtime bug rather than a build bug as it is
> consumed by page_table_check_pud_{clear,set} which are not called.
>
> See also:
>
> riscv support in commit 3fee229a8eb9 ("riscv/mm: enable
> ARCH_SUPPORTS_PAGE_TABLE_CHECK")
> arm64 in commit 42b2547137f5 ("arm64/mm: enable
> ARCH_SUPPORTS_PAGE_TABLE_CHECK")
> x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
> check")
>
> Reviewed-by: Russell Currey <ruscur@russell.cc>
> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>

This blows up for me when checking is enabled. This is a qemu pseries
KVM guest on a P9 host, booting Fedora 34. I haven't dug into what is
wrong yet.

cheers


[    0.600480][   T63] ------------[ cut here ]------------
[    0.600546][   T63] kernel BUG at mm/page_table_check.c:115!
[    0.600596][   T63] Oops: Exception in kernel mode, sig: 5 [#1]
[    0.600645][   T63] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA pSeries
[    0.600703][   T63] Modules linked in:
[    0.600736][   T63] CPU: 0 PID: 63 Comm: systemd-bless-b Not tainted 6.1.0-rc2-00178-gf0c0e10f5162 #65
[    0.600803][   T63] Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1202 0xf000005 of:SLOF,git-5b4c5a hv:linux,kvm pSeries
[    0.600885][   T63] NIP:  c0000000004a7d58 LR: c0000000004a7d74 CTR: 0000000000000000
[    0.600942][   T63] REGS: c0000000067635e0 TRAP: 0700   Not tainted  (6.1.0-rc2-00178-gf0c0e10f5162)
[    0.601008][   T63] MSR:  8000000000029033 <SF,EE,ME,IR,DR,RI,LE>  CR: 44424420  XER: 00000003
[    0.601088][   T63] CFAR: c0000000004a7d4c IRQMASK: 0
[    0.601088][   T63] GPR00: c0000000004a7d74 c000000006763880 c000000001332500 c000000003a00408
[    0.601088][   T63] GPR04: 0000000000000001 0000000000000001 8603402f000000c0 0000000000000000
[    0.601088][   T63] GPR08: 00000000000005e0 0000000000000001 c000000002702500 0000000000002000
[    0.601088][   T63] GPR12: 00007fffbc810000 c000000002a20000 0000000040000000 0000000040000000
[    0.601088][   T63] GPR16: 0000000040000000 0000000000000000 0000000000000000 0000000040000000
[    0.601088][   T63] GPR20: ff7fffffffffefbf 0000000000000000 c000000003555a00 0000000000000001
[    0.601088][   T63] GPR24: c0000000028dda60 c00c000000020ac0 c0000000082a1100 c00c0000000bd000
[    0.601088][   T63] GPR28: 0000000000000001 c0000000026fcf60 0000000000000001 c000000003a00400
[    0.601614][   T63] NIP [c0000000004a7d58] page_table_check_set.part.0+0xc8/0x170
[    0.601675][   T63] LR [c0000000004a7d74] page_table_check_set.part.0+0xe4/0x170
[    0.601734][   T63] Call Trace:
[    0.601764][   T63] [c000000006763880] [c0000000067638c0] 0xc0000000067638c0 (unreliable)
[    0.601825][   T63] [c0000000067638c0] [c000000000087c28] set_pte_at+0x68/0x210
[    0.601884][   T63] [c000000006763910] [c000000000483fd8] __split_huge_pmd+0x7f8/0x11c0
[    0.601947][   T63] [c000000006763a20] [c000000000485908] vma_adjust_trans_huge+0x158/0x2d0
[    0.602006][   T63] [c000000006763a70] [c00000000040b5dc] __vma_adjust+0x13c/0xbe0
[    0.602067][   T63] [c000000006763b80] [c00000000040d708] __split_vma+0x158/0x270
[    0.602128][   T63] [c000000006763bd0] [c00000000040d938] do_mas_align_munmap.constprop.0+0x118/0x610
[    0.602196][   T63] [c000000006763cd0] [c000000000416228] sys_mremap+0x3c8/0x850
[    0.602255][   T63] [c000000006763e10] [c00000000002fab8] system_call_exception+0x128/0x330
[    0.602314][   T63] [c000000006763e50] [c00000000000d05c] system_call_vectored_common+0x15c/0x2ec
[    0.602384][   T63] --- interrupt: 3000 at 0x7fffbd94f86c
[    0.602425][   T63] NIP:  00007fffbd94f86c LR: 0000000000000000 CTR: 0000000000000000
[    0.602481][   T63] REGS: c000000006763e80 TRAP: 3000   Not tainted  (6.1.0-rc2-00178-gf0c0e10f5162)
[    0.602546][   T63] MSR:  800000000280f033 <SF,VEC,VSX,EE,PR,FP,ME,IR,DR,RI,LE>  CR: 44004422  XER: 00000000
[    0.602635][   T63] IRQMASK: 0
[    0.602635][   T63] GPR00: 00000000000000a3 00007ffff2c4e670 00007fffbda37000 00007fffbc400000
[    0.602635][   T63] GPR04: 0000000000410000 0000000000010000 0000000000000001 0000000000000000
[    0.602635][   T63] GPR08: 00007fffbc410000 0000000000000000 0000000000000000 0000000000000000
[    0.602635][   T63] GPR12: 0000000000000000 00007fffbc937d50 0000000000000000 0000000000000000
[    0.602635][   T63] GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000
[    0.602635][   T63] GPR20: 000000000000046b 00000000003fffff 00007ffff2c4e7e8 0000000000400000
[    0.602635][   T63] GPR24: 0000000000000000 0000000000000000 0000000000000480 0000000000410000
[    0.602635][   T63] GPR28: 00007fffbc400000 0000000000000000 0000000000010000 0000000000100000
[    0.603146][   T63] NIP [00007fffbd94f86c] 0x7fffbd94f86c
[    0.603186][   T63] LR [0000000000000000] 0x0
[    0.603226][   T63] --- interrupt: 3000
[    0.603256][   T63] Code: 0b090000 7c0004ac 7d201828 31290001 7d20192d 40c2fff4 7c0004ac 2c090001 7f89e378 41810008 39200000 5529063e <0b090000> e93d0000 37deffff 7fff4a14
[    0.603384][   T63] ---[ end trace 0000000000000000 ]---

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v5 2/5] powerpc: mm: Implement p{m,u,4}d_leaf on all platforms
  2022-12-06 22:24   ` Michael Ellerman
@ 2022-12-06 23:30     ` Rohan McLure
  0 siblings, 0 replies; 8+ messages in thread
From: Rohan McLure @ 2022-12-06 23:30 UTC (permalink / raw)
  To: Michael Ellerman; +Cc: linuxppc-dev

Great job spotting this. Somehow lost these throughout the revisions. Thanks.

> On 7 Dec 2022, at 9:24 am, Michael Ellerman <mpe@ellerman.id.au> wrote:
> 
> Rohan McLure <rmclure@linux.ibm.com> writes:
>> The check that a higher-level entry in multi-level pages contains a page
>> translation entry (pte) is performed by p{m,u,4}d_leaf stubs, which may
>> be specialised for each choice of mmu. In a prior commit, we replace
>> uses to the catch-all stubs, p{m,u,4}d_is_leaf with p{m,u,4}d_leaf.
>> 
>> Replace the catch-all stub definitions for p{m,u,4}d_is_leaf with
>> definitions for p{m,u,4}d_leaf. A future patch will assume that
>> p{m,u,4}d_leaf is defined on all platforms.
>> 
>> In particular, implement pud_leaf for Book3E-64, pmd_leaf for all Book3E
>> and Book3S-64 platforms, with a catch-all definition for p4d_leaf.
>> 
>> Signed-off-by: Rohan McLure <rmclure@linux.ibm.com>
>> ---
>> v5: Split patch that replaces p{m,u,4}d_is_leaf into two patches, first
>> replacing callsites and afterward providing generic definition.
>> Remove ifndef-defines implementing p{m,u}d_leaf in favour of
>> implementing stubs in headers belonging to the particular platforms
>> needing them.
>> ---
>> arch/powerpc/include/asm/book3s/32/pgtable.h |  4 ++++
>> arch/powerpc/include/asm/book3s/64/pgtable.h |  8 ++-----
>> arch/powerpc/include/asm/nohash/64/pgtable.h |  5 +++++
>> arch/powerpc/include/asm/nohash/pgtable.h    |  5 +++++
>> arch/powerpc/include/asm/pgtable.h           | 22 ++------------------
>> 5 files changed, 18 insertions(+), 26 deletions(-)
> 
> I needed the delta below to prevent the generic versions being defined
> and overriding our versions.
> 
> cheers
> 
> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
> index 44703c8c590c..117135be8cc2 100644
> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
> @@ -244,6 +244,7 @@ static inline void pmd_clear(pmd_t *pmdp)
> *pmdp = __pmd(0);
> }
> 
> +#define pmd_leaf pmd_leaf
> static inline bool pmd_leaf(pmd_t pmd)
> {
> return false;
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 436632d04304..f00aa2d203c2 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1438,11 +1438,13 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
> /*
>  * Like pmd_huge() and pmd_large(), but works regardless of config options
>  */
> +#define pmd_leaf pmd_leaf
> static inline bool pmd_leaf(pmd_t pmd)
> {
> return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> }
> 
> +#define pud_leaf pud_leaf
> static inline bool pud_leaf(pud_t pud)
> {
> return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
> index 2488da8f0deb..d88b22c753d3 100644
> --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
> @@ -147,6 +147,7 @@ static inline void pud_clear(pud_t *pudp)
> *pudp = __pud(0);
> }
> 
> +#define pud_leaf pud_leaf
> static inline bool pud_leaf(pud_t pud)
> {
> return false;
> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
> index 487804f5b1d1..dfae1dbb9c3b 100644
> --- a/arch/powerpc/include/asm/nohash/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
> @@ -60,6 +60,7 @@ static inline bool pte_hw_valid(pte_t pte)
> return pte_val(pte) & _PAGE_PRESENT;
> }
> 
> +#define pmd_leaf pmd_leaf
> static inline bool pmd_leaf(pmd_t pmd)
> {
> return false;



^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-12-06 23:32 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-18  0:21 [PATCH v5 1/5] powerpc: mm: Replace p{u,m,4}d_is_leaf with p{u,m,4}_leaf Rohan McLure
2022-11-18  0:21 ` [PATCH v5 2/5] powerpc: mm: Implement p{m,u,4}d_leaf on all platforms Rohan McLure
2022-12-06 22:24   ` Michael Ellerman
2022-12-06 23:30     ` Rohan McLure
2022-11-18  0:21 ` [PATCH v5 3/5] powerpc: mm: Add common pud_pfn stub for " Rohan McLure
2022-11-18  0:21 ` [PATCH v5 4/5] powerpc: mm: add p{te,md,ud}_user_accessible_page helpers Rohan McLure
2022-11-18  0:21 ` [PATCH v5 5/5] powerpc: mm: support page table check Rohan McLure
2022-12-06 23:04   ` Michael Ellerman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).