All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config
@ 2018-02-11 15:00 Aneesh Kumar K.V
  2018-02-11 15:00 ` [PATCH V2 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled Aneesh Kumar K.V
                   ` (4 more replies)
  0 siblings, 5 replies; 10+ messages in thread
From: Aneesh Kumar K.V @ 2018-02-11 15:00 UTC (permalink / raw)
  To: benh, paulus, mpe, Ram Pai; +Cc: linuxppc-dev, Aneesh Kumar K.V

To support memory keys, we moved the hash pte slot information to the second
half of the page table. This was ok with PTE entries at level 4 and level 3.
We already allocate larger page table pages at those level to accomodate extra
details. For level 4 we already have the extra space which was used to track
4k hash page table entry details and at pmd level the extra space was allocated
to track the THP details.

With hugetlbfs PTE, we used this extra space at the PMD level to store the
slot details. But we also support hugetlbfs PTE at PUD leve and PUD level page
didn't allocate extra space. This resulted in memory corruption.

Fix this by allocating extra space at PUD level when HUGETLB is enabled. We
may need further changes to allocate larger space at PMD level when we enable
HUGETLB. That will be done in next patch.

Fixes:bf9a95f9a6481bc6e(" powerpc: Free up four 64K PTE bits in 64K backed HPTE pages")

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/32/pgtable.h  |  1 +
 arch/powerpc/include/asm/book3s/64/hash-64k.h |  5 +++++
 arch/powerpc/include/asm/book3s/64/hash.h     | 10 ++++++++++
 arch/powerpc/include/asm/book3s/64/pgalloc.h  |  6 +++---
 arch/powerpc/include/asm/book3s/64/pgtable.h  |  2 ++
 arch/powerpc/include/asm/nohash/32/pgtable.h  |  1 +
 arch/powerpc/include/asm/nohash/64/pgtable.h  |  1 +
 arch/powerpc/mm/hash_utils_64.c               |  1 +
 arch/powerpc/mm/init-common.c                 |  4 ++--
 arch/powerpc/mm/pgtable-radix.c               |  1 +
 arch/powerpc/mm/pgtable_64.c                  |  2 ++
 11 files changed, 29 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 30a155c0a6b0..c615abdce119 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -16,6 +16,7 @@
 #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
 
 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX	PUD_INDEX_SIZE
 
 #ifndef __ASSEMBLY__
 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 338b7da468ce..c08b3b032ec0 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -146,7 +146,12 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
 #else
 #define H_PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
 #endif
+#ifdef CONFIG_HUGETLB_PAGE
+#define H_PUD_TABLE_SIZE	((sizeof(pud_t) << PUD_INDEX_SIZE) +	\
+				 (sizeof(unsigned long) << PUD_INDEX_SIZE))
+#else
 #define H_PUD_TABLE_SIZE	(sizeof(pud_t) << PUD_INDEX_SIZE)
+#endif
 #define H_PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0920eff731b3..234f141fb151 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -32,6 +32,16 @@
 #else
 #define H_PMD_CACHE_INDEX	H_PMD_INDEX_SIZE
 #endif
+/*
+ * We not store the slot details in the second half of page table.
+ * Increase the pud level table so that hugetlb ptes can be stored
+ * at pud level.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
+#define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE + 1)
+#else
+#define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE)
+#endif
 /*
  * Define the address range of the kernel non-linear virtual area
  */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 1fcfa425cefa..53df86d3cfce 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -93,13 +93,13 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+	return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
 		pgtable_gfp_flags(mm, GFP_KERNEL));
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 {
-	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
+	kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
 }
 
 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -115,7 +115,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 	 * ahead and flush the page walk cache
 	 */
 	flush_tlb_pgtable(tlb, address);
-        pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
+        pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
 }
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 51017726d495..1c8c88e90553 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -232,11 +232,13 @@ extern unsigned long __pmd_index_size;
 extern unsigned long __pud_index_size;
 extern unsigned long __pgd_index_size;
 extern unsigned long __pmd_cache_index;
+extern unsigned long __pud_cache_index;
 #define PTE_INDEX_SIZE  __pte_index_size
 #define PMD_INDEX_SIZE  __pmd_index_size
 #define PUD_INDEX_SIZE  __pud_index_size
 #define PGD_INDEX_SIZE  __pgd_index_size
 #define PMD_CACHE_INDEX __pmd_cache_index
+#define PUD_CACHE_INDEX __pud_cache_index
 /*
  * Because of use of pte fragments and THP, size of page table
  * are not always derived out of index size above.
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 504a3c36ce5c..03bbd1149530 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -24,6 +24,7 @@ extern int icache_44x_need_flush;
 #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
 
 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX	PUD_INDEX_SIZE
 
 #ifndef __ASSEMBLY__
 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index abddf5830ad5..5c5f75d005ad 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -27,6 +27,7 @@
 #else
 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
 #endif
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
 
 /*
  * Define the address range of the kernel non-linear virtual area
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 7d07c7e17db6..cf290d415dcd 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1008,6 +1008,7 @@ void __init hash__early_init_mmu(void)
 	__pmd_index_size = H_PMD_INDEX_SIZE;
 	__pud_index_size = H_PUD_INDEX_SIZE;
 	__pgd_index_size = H_PGD_INDEX_SIZE;
+	__pud_cache_index = H_PUD_CACHE_INDEX;
 	__pmd_cache_index = H_PMD_CACHE_INDEX;
 	__pte_table_size = H_PTE_TABLE_SIZE;
 	__pmd_table_size = H_PMD_TABLE_SIZE;
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index eb8c6c8c4851..2b656e67f2ea 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -100,6 +100,6 @@ void pgtable_cache_init(void)
 	 * same size as either the pgd or pmd index except with THP enabled
 	 * on book3s 64
 	 */
-	if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
-		pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
+	if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX))
+		pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor);
 }
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 573a9a2ee455..27d096610369 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -535,6 +535,7 @@ void __init radix__early_init_mmu(void)
 	__pmd_index_size = RADIX_PMD_INDEX_SIZE;
 	__pud_index_size = RADIX_PUD_INDEX_SIZE;
 	__pgd_index_size = RADIX_PGD_INDEX_SIZE;
+	__pud_cache_index = RADIX_PUD_INDEX_SIZE;
 	__pmd_cache_index = RADIX_PMD_INDEX_SIZE;
 	__pte_table_size = RADIX_PTE_TABLE_SIZE;
 	__pmd_table_size = RADIX_PMD_TABLE_SIZE;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c9a623c2d8a2..a0f8928c0b86 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -82,6 +82,8 @@ unsigned long __pgd_index_size;
 EXPORT_SYMBOL(__pgd_index_size);
 unsigned long __pmd_cache_index;
 EXPORT_SYMBOL(__pmd_cache_index);
+unsigned long __pud_cache_index;
+EXPORT_SYMBOL(__pud_cache_index);
 unsigned long __pte_table_size;
 EXPORT_SYMBOL(__pte_table_size);
 unsigned long __pmd_table_size;
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH V2 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled.
  2018-02-11 15:00 [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Aneesh Kumar K.V
@ 2018-02-11 15:00 ` Aneesh Kumar K.V
  2018-02-11 22:28   ` Ram Pai
  2018-02-14  5:43   ` [V2, " Michael Ellerman
  2018-02-11 15:00 ` [PATCH V2 3/4] powerpc/mm/hash64: Store the slot information at the right offset Aneesh Kumar K.V
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 10+ messages in thread
From: Aneesh Kumar K.V @ 2018-02-11 15:00 UTC (permalink / raw)
  To: benh, paulus, mpe, Ram Pai; +Cc: linuxppc-dev, Aneesh Kumar K.V

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/hash-64k.h | 2 +-
 arch/powerpc/include/asm/book3s/64/hash.h     | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index c08b3b032ec0..ee440fb3d240 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -140,7 +140,7 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
 }
 
 #define H_PTE_TABLE_SIZE	PTE_FRAG_SIZE
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
 #define H_PMD_TABLE_SIZE	((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
 				 (sizeof(unsigned long) << PMD_INDEX_SIZE))
 #else
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 234f141fb151..0851c328bea6 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -23,7 +23,8 @@
 				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
 #define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
 
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&  defined(CONFIG_PPC_64K_PAGES)
+#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
+	defined(CONFIG_PPC_64K_PAGES)
 /*
  * only with hash 64k we need to use the second half of pmd page table
  * to store pointer to deposited pgtable_t
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH V2 3/4] powerpc/mm/hash64: Store the slot information at the right offset.
  2018-02-11 15:00 [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Aneesh Kumar K.V
  2018-02-11 15:00 ` [PATCH V2 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled Aneesh Kumar K.V
@ 2018-02-11 15:00 ` Aneesh Kumar K.V
  2018-02-11 22:39   ` Ram Pai
  2018-02-14  5:43   ` [V2, " Michael Ellerman
  2018-02-11 15:00 ` [PATCH V2 4/4] powerpc/mm/hash64: memset the pagetable pages on allocation Aneesh Kumar K.V
                   ` (2 subsequent siblings)
  4 siblings, 2 replies; 10+ messages in thread
From: Aneesh Kumar K.V @ 2018-02-11 15:00 UTC (permalink / raw)
  To: benh, paulus, mpe, Ram Pai; +Cc: linuxppc-dev, Aneesh Kumar K.V

The hugetlb pte entries are at the PMD and PUD level. Use the right offset
for them to get the second half of the table.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/hash-4k.h  |  3 ++-
 arch/powerpc/include/asm/book3s/64/hash-64k.h |  9 +++++----
 arch/powerpc/include/asm/book3s/64/pgtable.h  |  2 +-
 arch/powerpc/mm/hash64_4k.c                   |  4 ++--
 arch/powerpc/mm/hash64_64k.c                  |  8 ++++----
 arch/powerpc/mm/hugetlbpage-hash64.c          | 10 +++++++---
 arch/powerpc/mm/tlb_hash64.c                  |  9 +++++++--
 7 files changed, 28 insertions(+), 17 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 949d691094a4..67c5475311ee 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
  * keeping the prototype consistent across the two formats.
  */
 static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
-			unsigned int subpg_index, unsigned long hidx)
+					 unsigned int subpg_index, unsigned long hidx,
+					 int offset)
 {
 	return (hidx << H_PAGE_F_GIX_SHIFT) &
 		(H_PAGE_F_SECOND | H_PAGE_F_GIX);
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index ee440fb3d240..3bcf269f8f55 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -45,7 +45,7 @@
  * generic accessors and iterators here
  */
 #define __real_pte __real_pte
-static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
 {
 	real_pte_t rpte;
 	unsigned long *hidxp;
@@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
 	 */
 	smp_rmb();
 
-	hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+	hidxp = (unsigned long *)(ptep + offset);
 	rpte.hidx = *hidxp;
 	return rpte;
 }
@@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
  * expected to modify the PTE bits accordingly and commit the PTE to memory.
  */
 static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
-		unsigned int subpg_index, unsigned long hidx)
+					 unsigned int subpg_index,
+					 unsigned long hidx, int offset)
 {
-	unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+	unsigned long *hidxp = (unsigned long *)(ptep + offset);
 
 	rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
 	*hidxp = rpte.hidx  | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 1c8c88e90553..a6b9f1d74600 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -350,7 +350,7 @@ extern unsigned long pci_io_base;
  */
 #ifndef __real_pte
 
-#define __real_pte(e,p)		((real_pte_t){(e)})
+#define __real_pte(e, p, o)		((real_pte_t){(e)})
 #define __rpte_to_pte(r)	((r).pte)
 #define __rpte_to_hidx(r,index)	(pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
 
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 5a69b51d08a3..d573d7d07f25 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
 	 * need to add in 0x1 if it's a read-only user page
 	 */
 	rflags = htab_convert_pte_flags(new_pte);
-	rpte = __real_pte(__pte(old_pte), ptep);
+	rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
 
 	if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
 	    !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -117,7 +117,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
 			return -1;
 		}
 		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
-		new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+		new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
 	}
 	*ptep = __pte(new_pte & ~H_PAGE_BUSY);
 	return 0;
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 2253bbc6a599..e601d95c3b20 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
 
 	subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
 	vpn  = hpt_vpn(ea, vsid, ssize);
-	rpte = __real_pte(__pte(old_pte), ptep);
+	rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
 	/*
 	 *None of the sub 4k page is hashed
 	 */
@@ -214,7 +214,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
 		return -1;
 	}
 
-	new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot);
+	new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
 	new_pte |= H_PAGE_HASHPTE;
 
 	*ptep = __pte(new_pte & ~H_PAGE_BUSY);
@@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
 	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
 
 	rflags = htab_convert_pte_flags(new_pte);
-	rpte = __real_pte(__pte(old_pte), ptep);
+	rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
 
 	if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
 	    !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -327,7 +327,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
 		}
 
 		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
-		new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+		new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
 	}
 	*ptep = __pte(new_pte & ~H_PAGE_BUSY);
 	return 0;
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 12511f5a015f..b320f5097a06 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 	unsigned long vpn;
 	unsigned long old_pte, new_pte;
 	unsigned long rflags, pa, sz;
-	long slot;
+	long slot, offset;
 
 	BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
 
@@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 	} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
 
 	rflags = htab_convert_pte_flags(new_pte);
-	rpte = __real_pte(__pte(old_pte), ptep);
+	if (unlikely(mmu_psize == MMU_PAGE_16G))
+		offset = PTRS_PER_PUD;
+	else
+		offset = PTRS_PER_PMD;
+	rpte = __real_pte(__pte(old_pte), ptep, offset);
 
 	sz = ((1UL) << shift);
 	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 			return -1;
 		}
 
-		new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+		new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
 	}
 
 	/*
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 881ebd53ffc2..9b23f12e863c 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 	unsigned int psize;
 	int ssize;
 	real_pte_t rpte;
-	int i;
+	int i, offset;
 
 	i = batch->index;
 
@@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 		psize = get_slice_psize(mm, addr);
 		/* Mask the address for the correct page size */
 		addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
+		if (unlikely(psize == MMU_PAGE_16G))
+			offset = PTRS_PER_PUD;
+		else
+			offset = PTRS_PER_PMD;
 #else
 		BUG();
 		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
@@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 		 * support 64k pages, this might be different from the
 		 * hardware page size encoded in the slice table. */
 		addr &= PAGE_MASK;
+		offset = PTRS_PER_PTE;
 	}
 
 
@@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 	}
 	WARN_ON(vsid == 0);
 	vpn = hpt_vpn(addr, vsid, ssize);
-	rpte = __real_pte(__pte(pte), ptep);
+	rpte = __real_pte(__pte(pte), ptep, offset);
 
 	/*
 	 * Check if we have an active batch on this CPU. If not, just
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH V2 4/4] powerpc/mm/hash64: memset the pagetable pages on allocation.
  2018-02-11 15:00 [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Aneesh Kumar K.V
  2018-02-11 15:00 ` [PATCH V2 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled Aneesh Kumar K.V
  2018-02-11 15:00 ` [PATCH V2 3/4] powerpc/mm/hash64: Store the slot information at the right offset Aneesh Kumar K.V
@ 2018-02-11 15:00 ` Aneesh Kumar K.V
  2018-02-11 22:24 ` [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Ram Pai
  2018-02-14  5:43 ` [V2,1/4] " Michael Ellerman
  4 siblings, 0 replies; 10+ messages in thread
From: Aneesh Kumar K.V @ 2018-02-11 15:00 UTC (permalink / raw)
  To: benh, paulus, mpe, Ram Pai; +Cc: linuxppc-dev, Aneesh Kumar K.V

Now that we are using second half of the table to store slot details and we
don't clear them in the huge_pte_get_and_clear, we need to make sure we zero
out the range on allocation.

Simplify this by calling the object initialization after kmem_cache_alloc and
update the constructor do nothing.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/32/pgalloc.h | 16 +++++++++++++
 arch/powerpc/include/asm/book3s/64/pgalloc.h | 34 +++++++++++++++++++++++-----
 arch/powerpc/include/asm/nohash/pgalloc.h    | 16 +++++++++++++
 arch/powerpc/mm/init-common.c                | 15 ------------
 4 files changed, 60 insertions(+), 21 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 5073cc75f1c8..9f5c411bce1b 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -139,4 +139,20 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 	pgtable_page_dtor(table);
 	pgtable_free_tlb(tlb, page_address(table), 0);
 }
+
+static inline void pgd_ctor(void *addr)
+{
+	memset(addr, 0, PGD_TABLE_SIZE);
+}
+
+static inline void pud_ctor(void *addr)
+{
+	memset(addr, 0, PUD_TABLE_SIZE);
+}
+
+static inline void pmd_ctor(void *addr)
+{
+	memset(addr, 0, PMD_TABLE_SIZE);
+}
+
 #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 53df86d3cfce..d6ee7563b09d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -73,10 +73,13 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
+	pgd_t *pgd;
 	if (radix_enabled())
 		return radix__pgd_alloc(mm);
-	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
-		pgtable_gfp_flags(mm, GFP_KERNEL));
+	pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+			       pgtable_gfp_flags(mm, GFP_KERNEL));
+	memset(pgd, 0, PGD_TABLE_SIZE);
+	return pgd;
 }
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -93,8 +96,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-	return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
-		pgtable_gfp_flags(mm, GFP_KERNEL));
+	pud_t *pud;
+	pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+			       pgtable_gfp_flags(mm, GFP_KERNEL));
+	memset(pud, 0, PUD_TABLE_SIZE);
+	return pud;
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -120,8 +126,12 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
-		pgtable_gfp_flags(mm, GFP_KERNEL));
+	pmd_t *pmd;
+	pmd = kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+			       pgtable_gfp_flags(mm, GFP_KERNEL));
+	memset(pmd, 0, PMD_TABLE_SIZE);
+	return pmd;
+
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -218,4 +228,16 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 
 #define check_pgt_cache()	do { } while (0)
 
+static inline void pgd_ctor(void *addr)
+{
+}
+
+static inline void pud_ctor(void *addr)
+{
+}
+
+static inline void pmd_ctor(void *addr)
+{
+}
+
 #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
index 0634f2949438..df3be548ff97 100644
--- a/arch/powerpc/include/asm/nohash/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -21,4 +21,20 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
 #else
 #include <asm/nohash/32/pgalloc.h>
 #endif
+
+static inline void pgd_ctor(void *addr)
+{
+	memset(addr, 0, PGD_TABLE_SIZE);
+}
+
+static inline void pud_ctor(void *addr)
+{
+	memset(addr, 0, PUD_TABLE_SIZE);
+}
+
+static inline void pmd_ctor(void *addr)
+{
+	memset(addr, 0, PMD_TABLE_SIZE);
+}
+
 #endif /* _ASM_POWERPC_NOHASH_PGALLOC_H */
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 2b656e67f2ea..f92dd8cee3c5 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -25,21 +25,6 @@
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 
-static void pgd_ctor(void *addr)
-{
-	memset(addr, 0, PGD_TABLE_SIZE);
-}
-
-static void pud_ctor(void *addr)
-{
-	memset(addr, 0, PUD_TABLE_SIZE);
-}
-
-static void pmd_ctor(void *addr)
-{
-	memset(addr, 0, PMD_TABLE_SIZE);
-}
-
 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
 EXPORT_SYMBOL_GPL(pgtable_cache);	/* used by kvm_hv module */
 
-- 
2.14.3

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config
  2018-02-11 15:00 [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Aneesh Kumar K.V
                   ` (2 preceding siblings ...)
  2018-02-11 15:00 ` [PATCH V2 4/4] powerpc/mm/hash64: memset the pagetable pages on allocation Aneesh Kumar K.V
@ 2018-02-11 22:24 ` Ram Pai
  2018-02-14  5:43 ` [V2,1/4] " Michael Ellerman
  4 siblings, 0 replies; 10+ messages in thread
From: Ram Pai @ 2018-02-11 22:24 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: benh, paulus, mpe, linuxppc-dev

On Sun, Feb 11, 2018 at 08:30:06PM +0530, Aneesh Kumar K.V wrote:
> To support memory keys, we moved the hash pte slot information to the second
> half of the page table. This was ok with PTE entries at level 4 and level 3.
> We already allocate larger page table pages at those level to accomodate extra
> details. For level 4 we already have the extra space which was used to track
> 4k hash page table entry details and at pmd level the extra space was allocated
> to track the THP details.
> 
> With hugetlbfs PTE, we used this extra space at the PMD level to store the
> slot details. But we also support hugetlbfs PTE at PUD leve and PUD level page
> didn't allocate extra space. This resulted in memory corruption.
> 
> Fix this by allocating extra space at PUD level when HUGETLB is enabled. We
> may need further changes to allocate larger space at PMD level when we enable
> HUGETLB. That will be done in next patch.
> 
> Fixes:bf9a95f9a6481bc6e(" powerpc: Free up four 64K PTE bits in 64K backed HPTE pages")
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/book3s/32/pgtable.h  |  1 +
>  arch/powerpc/include/asm/book3s/64/hash-64k.h |  5 +++++
>  arch/powerpc/include/asm/book3s/64/hash.h     | 10 ++++++++++
>  arch/powerpc/include/asm/book3s/64/pgalloc.h  |  6 +++---
>  arch/powerpc/include/asm/book3s/64/pgtable.h  |  2 ++
>  arch/powerpc/include/asm/nohash/32/pgtable.h  |  1 +
>  arch/powerpc/include/asm/nohash/64/pgtable.h  |  1 +
>  arch/powerpc/mm/hash_utils_64.c               |  1 +
>  arch/powerpc/mm/init-common.c                 |  4 ++--
>  arch/powerpc/mm/pgtable-radix.c               |  1 +
>  arch/powerpc/mm/pgtable_64.c                  |  2 ++
>  11 files changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
> index 30a155c0a6b0..c615abdce119 100644
> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
> @@ -16,6 +16,7 @@
>  #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
> 
>  #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
> +#define PUD_CACHE_INDEX	PUD_INDEX_SIZE
> 
>  #ifndef __ASSEMBLY__
>  #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
> diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
> index 338b7da468ce..c08b3b032ec0 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
> @@ -146,7 +146,12 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
>  #else
>  #define H_PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
>  #endif
> +#ifdef CONFIG_HUGETLB_PAGE
> +#define H_PUD_TABLE_SIZE	((sizeof(pud_t) << PUD_INDEX_SIZE) +	\
> +				 (sizeof(unsigned long) << PUD_INDEX_SIZE))
> +#else
>  #define H_PUD_TABLE_SIZE	(sizeof(pud_t) << PUD_INDEX_SIZE)
> +#endif
>  #define H_PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
> 
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
> index 0920eff731b3..234f141fb151 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
> @@ -32,6 +32,16 @@
>  #else
>  #define H_PMD_CACHE_INDEX	H_PMD_INDEX_SIZE
>  #endif
> +/*
> + * We not store the slot details in the second half of page table.

s/not//
We store....


Reviewed-by: Ram Pai <linuxram@us.ibm.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH V2 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled.
  2018-02-11 15:00 ` [PATCH V2 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled Aneesh Kumar K.V
@ 2018-02-11 22:28   ` Ram Pai
  2018-02-14  5:43   ` [V2, " Michael Ellerman
  1 sibling, 0 replies; 10+ messages in thread
From: Ram Pai @ 2018-02-11 22:28 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: benh, paulus, mpe, linuxppc-dev

On Sun, Feb 11, 2018 at 08:30:07PM +0530, Aneesh Kumar K.V wrote:
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/book3s/64/hash-64k.h | 2 +-
>  arch/powerpc/include/asm/book3s/64/hash.h     | 3 ++-
>  2 files changed, 3 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
> index c08b3b032ec0..ee440fb3d240 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
> @@ -140,7 +140,7 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
>  }
> 
>  #define H_PTE_TABLE_SIZE	PTE_FRAG_SIZE
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
>  #define H_PMD_TABLE_SIZE	((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
>  				 (sizeof(unsigned long) << PMD_INDEX_SIZE))
>  #else
> diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
> index 234f141fb151..0851c328bea6 100644
> --- a/arch/powerpc/include/asm/book3s/64/hash.h
> +++ b/arch/powerpc/include/asm/book3s/64/hash.h
> @@ -23,7 +23,8 @@
>  				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
>  #define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
> 
> -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&  defined(CONFIG_PPC_64K_PAGES)
> +#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
> +	defined(CONFIG_PPC_64K_PAGES)
>  /*
>   * only with hash 64k we need to use the second half of pmd page table
>   * to store pointer to deposited pgtable_t

A small nitpick.  the definition of  H_PMD_CACHE_INDEX and
H_PUD_CACHE_INDEX (introduced in the previous patch)
can be nested under #ifdef CONFIG_PPC_64K_PAGES


Reviewed-by: Ram Pai <linuxram@us.ibm.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH V2 3/4] powerpc/mm/hash64: Store the slot information at the right offset.
  2018-02-11 15:00 ` [PATCH V2 3/4] powerpc/mm/hash64: Store the slot information at the right offset Aneesh Kumar K.V
@ 2018-02-11 22:39   ` Ram Pai
  2018-02-14  5:43   ` [V2, " Michael Ellerman
  1 sibling, 0 replies; 10+ messages in thread
From: Ram Pai @ 2018-02-11 22:39 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: benh, paulus, mpe, linuxppc-dev

On Sun, Feb 11, 2018 at 08:30:08PM +0530, Aneesh Kumar K.V wrote:
> The hugetlb pte entries are at the PMD and PUD level. Use the right offset
> for them to get the second half of the table.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/book3s/64/hash-4k.h  |  3 ++-
>  arch/powerpc/include/asm/book3s/64/hash-64k.h |  9 +++++----
>  arch/powerpc/include/asm/book3s/64/pgtable.h  |  2 +-
>  arch/powerpc/mm/hash64_4k.c                   |  4 ++--
>  arch/powerpc/mm/hash64_64k.c                  |  8 ++++----
>  arch/powerpc/mm/hugetlbpage-hash64.c          | 10 +++++++---
>  arch/powerpc/mm/tlb_hash64.c                  |  9 +++++++--
>  7 files changed, 28 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
> index 949d691094a4..67c5475311ee 100644
> diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c

....snip...

> index 881ebd53ffc2..9b23f12e863c 100644
> --- a/arch/powerpc/mm/tlb_hash64.c
> +++ b/arch/powerpc/mm/tlb_hash64.c
> @@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
>  	unsigned int psize;
>  	int ssize;
>  	real_pte_t rpte;
> -	int i;
> +	int i, offset;
> 
>  	i = batch->index;
> 
> @@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
>  		psize = get_slice_psize(mm, addr);
>  		/* Mask the address for the correct page size */
>  		addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
> +		if (unlikely(psize == MMU_PAGE_16G))
> +			offset = PTRS_PER_PUD;
> +		else
> +			offset = PTRS_PER_PMD;

I prefer to encapsulate this under some function/macro; somewhere in hugetlb.h, which returns
the offset given a mmu_size.  But no big deal..


Reviewed-by: Ram Pai <linuxram@us.ibm.com>

RP

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [V2,1/4] powerpc/mm: Fix crashes with PUD level hugetlb config
  2018-02-11 15:00 [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Aneesh Kumar K.V
                   ` (3 preceding siblings ...)
  2018-02-11 22:24 ` [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Ram Pai
@ 2018-02-14  5:43 ` Michael Ellerman
  4 siblings, 0 replies; 10+ messages in thread
From: Michael Ellerman @ 2018-02-14  5:43 UTC (permalink / raw)
  To: Aneesh Kumar K.V, benh, paulus, Ram Pai; +Cc: linuxppc-dev, Aneesh Kumar K.V

On Sun, 2018-02-11 at 15:00:06 UTC, "Aneesh Kumar K.V" wrote:
> To support memory keys, we moved the hash pte slot information to the second
> half of the page table. This was ok with PTE entries at level 4 and level 3.
> We already allocate larger page table pages at those level to accomodate extra
> details. For level 4 we already have the extra space which was used to track
> 4k hash page table entry details and at pmd level the extra space was allocated
> to track the THP details.
> 
> With hugetlbfs PTE, we used this extra space at the PMD level to store the
> slot details. But we also support hugetlbfs PTE at PUD leve and PUD level page
> didn't allocate extra space. This resulted in memory corruption.
> 
> Fix this by allocating extra space at PUD level when HUGETLB is enabled. We
> may need further changes to allocate larger space at PMD level when we enable
> HUGETLB. That will be done in next patch.
> 
> Fixes:bf9a95f9a6481bc6e(" powerpc: Free up four 64K PTE bits in 64K backed HPTE pages")
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> Reviewed-by: Ram Pai <linuxram@us.ibm.com>

Applied to powerpc fixes, thanks.

https://git.kernel.org/powerpc/c/fae2211697c9490414e974431051f7

cheers

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [V2, 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled.
  2018-02-11 15:00 ` [PATCH V2 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled Aneesh Kumar K.V
  2018-02-11 22:28   ` Ram Pai
@ 2018-02-14  5:43   ` Michael Ellerman
  1 sibling, 0 replies; 10+ messages in thread
From: Michael Ellerman @ 2018-02-14  5:43 UTC (permalink / raw)
  To: Aneesh Kumar K.V, benh, paulus, Ram Pai; +Cc: linuxppc-dev, Aneesh Kumar K.V

On Sun, 2018-02-11 at 15:00:07 UTC, "Aneesh Kumar K.V" wrote:
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> Reviewed-by: Ram Pai <linuxram@us.ibm.com>

Applied to powerpc fixes, thanks.

https://git.kernel.org/powerpc/c/4a7aa4fecbbf94b5c6fae8acccc983

cheers

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [V2, 3/4] powerpc/mm/hash64: Store the slot information at the right offset.
  2018-02-11 15:00 ` [PATCH V2 3/4] powerpc/mm/hash64: Store the slot information at the right offset Aneesh Kumar K.V
  2018-02-11 22:39   ` Ram Pai
@ 2018-02-14  5:43   ` Michael Ellerman
  1 sibling, 0 replies; 10+ messages in thread
From: Michael Ellerman @ 2018-02-14  5:43 UTC (permalink / raw)
  To: Aneesh Kumar K.V, benh, paulus, Ram Pai; +Cc: linuxppc-dev, Aneesh Kumar K.V

On Sun, 2018-02-11 at 15:00:08 UTC, "Aneesh Kumar K.V" wrote:
> The hugetlb pte entries are at the PMD and PUD level. Use the right offset
> for them to get the second half of the table.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> Reviewed-by: Ram Pai <linuxram@us.ibm.com>

Applied to powerpc fixes, thanks.

https://git.kernel.org/powerpc/c/ff31e105464d8c8c97301964682702

cheers

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2018-02-14  5:43 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-11 15:00 [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Aneesh Kumar K.V
2018-02-11 15:00 ` [PATCH V2 2/4] powerpc/mm/hash64: Allocate larger PMD table if hugetlb config is enabled Aneesh Kumar K.V
2018-02-11 22:28   ` Ram Pai
2018-02-14  5:43   ` [V2, " Michael Ellerman
2018-02-11 15:00 ` [PATCH V2 3/4] powerpc/mm/hash64: Store the slot information at the right offset Aneesh Kumar K.V
2018-02-11 22:39   ` Ram Pai
2018-02-14  5:43   ` [V2, " Michael Ellerman
2018-02-11 15:00 ` [PATCH V2 4/4] powerpc/mm/hash64: memset the pagetable pages on allocation Aneesh Kumar K.V
2018-02-11 22:24 ` [PATCH V2 1/4] powerpc/mm: Fix crashes with PUD level hugetlb config Ram Pai
2018-02-14  5:43 ` [V2,1/4] " Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.