linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH V3] powerpc/mm: Initialize kernel pagetable memory for PTE fragments
@ 2018-06-20 13:05 Anshuman Khandual
  0 siblings, 0 replies; only message in thread
From: Anshuman Khandual @ 2018-06-20 13:05 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: mpe, aneesh.kumar, benh

Kernel pagetable pages for PTE fragments never go through the standard init
sequence which can cause inaccuracies in utilization statistics reported at
places like /proc and /sysfs interfaces etc. Also the allocated page misses
out on pagetable lock and page flag init as well. Fix it by making sure all
pages allocated for either user process or kernel PTE fragments go through
same initialization.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
Changes in V3:

- Replaced 'kernel' argument with direct check on init_mm as per Aneesh

Changes in V2:

- Call the destructor function during free for all cases

 arch/powerpc/include/asm/book3s/64/pgalloc.h | 12 ++++-----
 arch/powerpc/mm/pgtable-book3s64.c           | 37 +++++++++++++---------------
 2 files changed, 23 insertions(+), 26 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 01ee40f..ccb351c 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -41,9 +41,9 @@ struct vmemmap_backing {
 			pgtable_cache[(shift) - 1];	\
 		})
 
-extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
+extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long);
 extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
-extern void pte_fragment_free(unsigned long *, int);
+extern void pte_fragment_free(unsigned long *);
 extern void pmd_fragment_free(unsigned long *);
 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
 #ifdef CONFIG_SMP
@@ -176,23 +176,23 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 					  unsigned long address)
 {
-	return (pte_t *)pte_fragment_alloc(mm, address, 1);
+	return (pte_t *)pte_fragment_alloc(mm, address);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 				      unsigned long address)
 {
-	return (pgtable_t)pte_fragment_alloc(mm, address, 0);
+	return (pgtable_t)pte_fragment_alloc(mm, address);
 }
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-	pte_fragment_free((unsigned long *)pte, 1);
+	pte_fragment_free((unsigned long *)pte);
 }
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 {
-	pte_fragment_free((unsigned long *)ptepage, 0);
+	pte_fragment_free((unsigned long *)ptepage);
 }
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index c1f4ca4..b792f8a 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -333,25 +333,23 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm)
 	return (pte_t *)ret;
 }
 
-static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
+static pte_t *__alloc_for_ptecache(struct mm_struct *mm)
 {
+	gfp_t gfp_mask = PGALLOC_GFP;
 	void *ret = NULL;
 	struct page *page;
 
-	if (!kernel) {
-		page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
-		if (!page)
-			return NULL;
-		if (!pgtable_page_ctor(page)) {
-			__free_page(page);
-			return NULL;
-		}
-	} else {
-		page = alloc_page(PGALLOC_GFP);
-		if (!page)
-			return NULL;
-	}
+	if (mm != &init_mm)
+		gfp_mask |= __GFP_ACCOUNT;
 
+	page = alloc_page(gfp_mask);
+	if (!page)
+		return NULL;
+
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
 
 	ret = page_address(page);
 	/*
@@ -375,7 +373,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
 	return (pte_t *)ret;
 }
 
-pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
+pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
 {
 	pte_t *pte;
 
@@ -383,16 +381,15 @@ pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel
 	if (pte)
 		return pte;
 
-	return __alloc_for_ptecache(mm, kernel);
+	return __alloc_for_ptecache(mm);
 }
 
-void pte_fragment_free(unsigned long *table, int kernel)
+void pte_fragment_free(unsigned long *table)
 {
 	struct page *page = virt_to_page(table);
 
 	if (put_page_testzero(page)) {
-		if (!kernel)
-			pgtable_page_dtor(page);
+		pgtable_page_dtor(page);
 		free_unref_page(page);
 	}
 }
@@ -401,7 +398,7 @@ static inline void pgtable_free(void *table, int index)
 {
 	switch (index) {
 	case PTE_INDEX:
-		pte_fragment_free(table, 0);
+		pte_fragment_free(table);
 		break;
 	case PMD_INDEX:
 		pmd_fragment_free(table);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2018-06-20 13:05 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-20 13:05 [PATCH V3] powerpc/mm: Initialize kernel pagetable memory for PTE fragments Anshuman Khandual

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).