All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush
@ 2016-11-21 18:33 Aneesh Kumar K.V
  2016-11-21 18:33 ` [PATCH v3 2/6] powerpc/mm: Rename hugetlb-radix.h to hugetlb.h Aneesh Kumar K.V
                   ` (5 more replies)
  0 siblings, 6 replies; 11+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-21 18:33 UTC (permalink / raw)
  To: benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V

When we are updating pte, we just need to flush the tlb mapping for
that pte. Right now we do a full mm flush because we don't track page
size. Update the interface to track the page size and use that to
do the right tlb flush.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/32/pgtable.h |  4 +++-
 arch/powerpc/include/asm/book3s/64/pgtable.h |  7 +++++--
 arch/powerpc/include/asm/book3s/64/radix.h   | 14 +++++++-------
 arch/powerpc/include/asm/nohash/32/pgtable.h |  4 +++-
 arch/powerpc/include/asm/nohash/64/pgtable.h |  4 +++-
 arch/powerpc/mm/pgtable-book3s64.c           |  3 ++-
 arch/powerpc/mm/pgtable-radix.c              | 16 ++++++++++++++++
 arch/powerpc/mm/pgtable.c                    | 10 ++++++++--
 arch/powerpc/mm/tlb-radix.c                  | 15 ---------------
 9 files changed, 47 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 6b8b2d57fdc8..0713626e9189 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -224,7 +224,9 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 
 
 static inline void __ptep_set_access_flags(struct mm_struct *mm,
-					   pte_t *ptep, pte_t entry)
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address,
+					   unsigned long pg_sz)
 {
 	unsigned long set = pte_val(entry) &
 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 86870c11917b..46d739457d68 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -580,10 +580,13 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
  */
 
 static inline void __ptep_set_access_flags(struct mm_struct *mm,
-					   pte_t *ptep, pte_t entry)
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address,
+					   unsigned long pg_sz)
 {
 	if (radix_enabled())
-		return radix__ptep_set_access_flags(mm, ptep, entry);
+		return radix__ptep_set_access_flags(mm, ptep, entry,
+						    address, pg_sz);
 	return hash__ptep_set_access_flags(ptep, entry);
 }
 
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 2a46dea8e1b1..279b2f68e00f 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -110,6 +110,7 @@
 #define RADIX_PUD_TABLE_SIZE	(sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
 #define RADIX_PGD_TABLE_SIZE	(sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
 
+extern int radix_get_mmu_psize(unsigned long pg_sz);
 static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
 					       unsigned long set)
 {
@@ -167,7 +168,9 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
  * function doesn't need to invalidate tlb.
  */
 static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
-						pte_t *ptep, pte_t entry)
+						pte_t *ptep, pte_t entry,
+						unsigned long address,
+						unsigned long pg_sz)
 {
 
 	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
@@ -175,6 +178,7 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
 
 	if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
 
+		int psize;
 		unsigned long old_pte, new_pte;
 
 		old_pte = __radix_pte_update(ptep, ~0, 0);
@@ -183,12 +187,8 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
 		 * new value of pte
 		 */
 		new_pte = old_pte | set;
-
-		/*
-		 * For now let's do heavy pid flush
-		 * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
-		 */
-		radix__flush_tlb_mm(mm);
+		psize = radix_get_mmu_psize(pg_sz);
+		radix__flush_tlb_page_psize(mm, address, psize);
 
 		__radix_pte_update(ptep, 0, new_pte);
 	} else
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c219ef7be53b..24ee66bf7223 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -268,7 +268,9 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 
 
 static inline void __ptep_set_access_flags(struct mm_struct *mm,
-					   pte_t *ptep, pte_t entry)
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address,
+					   unsigned long pg_sz)
 {
 	unsigned long set = pte_val(entry) &
 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 653a1838469d..86d49dc60ec6 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -301,7 +301,9 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  * function doesn't need to flush the hash entry
  */
 static inline void __ptep_set_access_flags(struct mm_struct *mm,
-					   pte_t *ptep, pte_t entry)
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address,
+					   unsigned long pg_sz)
 {
 	unsigned long bits = pte_val(entry) &
 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index f4f437cbabf1..5c7c501b7cae 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -35,7 +35,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 #endif
 	changed = !pmd_same(*(pmdp), entry);
 	if (changed) {
-		__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), pmd_pte(entry));
+		__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), pmd_pte(entry),
+					address, HPAGE_PMD_SIZE);
 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 	}
 	return changed;
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 688b54517655..416918005395 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -222,6 +222,22 @@ static int __init get_idx_from_shift(unsigned int shift)
 	return idx;
 }
 
+int radix_get_mmu_psize(unsigned long page_size)
+{
+	int psize;
+
+	if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
+		psize = mmu_virtual_psize;
+	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
+		psize = MMU_PAGE_2M;
+	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
+		psize = MMU_PAGE_1G;
+	else
+		return -1;
+	return psize;
+}
+
+
 static int __init radix_dt_scan_page_sizes(unsigned long node,
 					   const char *uname, int depth,
 					   void *data)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 911fdfb63ec1..a9ec0d8f1bcf 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -219,12 +219,18 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 			  pte_t *ptep, pte_t entry, int dirty)
 {
 	int changed;
+	unsigned long pg_sz;
+
 	entry = set_access_flags_filter(entry, vma, dirty);
 	changed = !pte_same(*(ptep), entry);
 	if (changed) {
-		if (!is_vm_hugetlb_page(vma))
+		if (!is_vm_hugetlb_page(vma)) {
+			pg_sz = PAGE_SIZE;
 			assert_pte_locked(vma->vm_mm, address);
-		__ptep_set_access_flags(vma->vm_mm, ptep, entry);
+		} else
+			pg_sz = huge_page_size(hstate_vma(vma));
+		__ptep_set_access_flags(vma->vm_mm, ptep, entry,
+					address, pg_sz);
 		flush_tlb_page(vma, address);
 	}
 	return changed;
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 3493cf4e0452..81a9f6390f64 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -282,21 +282,6 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 }
 EXPORT_SYMBOL(radix__flush_tlb_range);
 
-static int radix_get_mmu_psize(int page_size)
-{
-	int psize;
-
-	if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
-		psize = mmu_virtual_psize;
-	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
-		psize = MMU_PAGE_2M;
-	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
-		psize = MMU_PAGE_1G;
-	else
-		return -1;
-	return psize;
-}
-
 void radix__tlb_flush(struct mmu_gather *tlb)
 {
 	int psize = 0;
-- 
2.10.2

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 2/6] powerpc/mm: Rename hugetlb-radix.h to hugetlb.h
  2016-11-21 18:33 [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Aneesh Kumar K.V
@ 2016-11-21 18:33 ` Aneesh Kumar K.V
  2016-11-21 18:33 ` [PATCH v3 3/6] powerpc/mm/hugetlb: Handle hugepage size supported by hash config Aneesh Kumar K.V
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-21 18:33 UTC (permalink / raw)
  To: benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V

We will start moving some book3s specific hugetlb functions there.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/{hugetlb-radix.h => hugetlb.h} | 4 ++--
 arch/powerpc/include/asm/hugetlb.h                                | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
 rename arch/powerpc/include/asm/book3s/64/{hugetlb-radix.h => hugetlb.h} (90%)

diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
similarity index 90%
rename from arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
rename to arch/powerpc/include/asm/book3s/64/hugetlb.h
index c45189aa7476..499268045306 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
-#define _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
+#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
 /*
  * For radix we want generic code to handle hugetlb. But then if we want
  * both hash and radix to be enabled together we need to workaround the
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index c5517f463ec7..c03e0a3dd4d8 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -9,7 +9,7 @@ extern struct kmem_cache *hugepte_cache;
 
 #ifdef CONFIG_PPC_BOOK3S_64
 
-#include <asm/book3s/64/hugetlb-radix.h>
+#include <asm/book3s/64/hugetlb.h>
 /*
  * This should work for other subarchs too. But right now we use the
  * new format only for 64bit book3s
-- 
2.10.2

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 3/6] powerpc/mm/hugetlb: Handle hugepage size supported by hash config
  2016-11-21 18:33 [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Aneesh Kumar K.V
  2016-11-21 18:33 ` [PATCH v3 2/6] powerpc/mm: Rename hugetlb-radix.h to hugetlb.h Aneesh Kumar K.V
@ 2016-11-21 18:33 ` Aneesh Kumar K.V
  2016-11-21 18:33 ` [PATCH v3 4/6] powerpc/mm/hugetlb: Switch hugetlb update to use huge_pte_update Aneesh Kumar K.V
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-21 18:33 UTC (permalink / raw)
  To: benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V

W.r.t hash page table config, we support 16MB and 16GB as the hugepage
size. Update the hstate_get_psize to handle 16M and 16G.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/hugetlb.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 499268045306..d9c283f95e05 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -21,6 +21,10 @@ static inline int hstate_get_psize(struct hstate *hstate)
 		return MMU_PAGE_2M;
 	else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
 		return MMU_PAGE_1G;
+	else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
+		return MMU_PAGE_16M;
+	else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
+		return MMU_PAGE_16G;
 	else {
 		WARN(1, "Wrong huge page shift\n");
 		return mmu_virtual_psize;
-- 
2.10.2

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 4/6] powerpc/mm/hugetlb: Switch hugetlb update to use huge_pte_update
  2016-11-21 18:33 [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Aneesh Kumar K.V
  2016-11-21 18:33 ` [PATCH v3 2/6] powerpc/mm: Rename hugetlb-radix.h to hugetlb.h Aneesh Kumar K.V
  2016-11-21 18:33 ` [PATCH v3 3/6] powerpc/mm/hugetlb: Handle hugepage size supported by hash config Aneesh Kumar K.V
@ 2016-11-21 18:33 ` Aneesh Kumar K.V
  2016-11-22  2:41   ` Michael Ellerman
  2016-11-21 18:33 ` [PATCH v3 5/6] powerpc/mm: update pte_update to not do full mm tlb flush Aneesh Kumar K.V
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 11+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-21 18:33 UTC (permalink / raw)
  To: benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V

We want to switch pte_update to use va based tlb flush. In order to do that we
need to track the page size. With hugetlb we currently don't have page size
available in these functions. Hence switch hugetlb to use seperate functions
for update. In later patch we will update hugetlb functions to take
vm_area_struct from which we can derive the page size. After that we will switch
this back to use pte_update

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/hugetlb.h | 42 ++++++++++++++++++++++++++++
 arch/powerpc/include/asm/book3s/64/pgtable.h |  9 ------
 arch/powerpc/include/asm/hugetlb.h           |  2 +-
 3 files changed, 43 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index d9c283f95e05..9a64f356a8e8 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -30,4 +30,46 @@ static inline int hstate_get_psize(struct hstate *hstate)
 		return mmu_virtual_psize;
 	}
 }
+
+static inline unsigned long huge_pte_update(struct mm_struct *mm, unsigned long addr,
+					    pte_t *ptep, unsigned long clr,
+					    unsigned long set)
+{
+	if (radix_enabled()) {
+		unsigned long old_pte;
+
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+
+			unsigned long new_pte;
+
+			old_pte = __radix_pte_update(ptep, ~0, 0);
+			asm volatile("ptesync" : : : "memory");
+			/*
+			 * new value of pte
+			 */
+			new_pte = (old_pte | set) & ~clr;
+			/*
+			 * For now let's do heavy pid flush
+			 * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
+			 */
+			radix__flush_tlb_mm(mm);
+
+			__radix_pte_update(ptep, 0, new_pte);
+		} else
+			old_pte = __radix_pte_update(ptep, clr, set);
+		asm volatile("ptesync" : : : "memory");
+		return old_pte;
+	}
+	return hash__pte_update(mm, addr, ptep, clr, set, true);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+					   unsigned long addr, pte_t *ptep)
+{
+	if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
+		return;
+
+	huge_pte_update(mm, addr, ptep, _PAGE_WRITE, 0);
+}
+
 #endif
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 46d739457d68..ef2eef1ba99a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -346,15 +346,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 	pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
 }
 
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
-					   unsigned long addr, pte_t *ptep)
-{
-	if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
-		return;
-
-	pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
-}
-
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 				       unsigned long addr, pte_t *ptep)
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index c03e0a3dd4d8..058d6311de87 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -136,7 +136,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 					    unsigned long addr, pte_t *ptep)
 {
 #ifdef CONFIG_PPC64
-	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
+	return __pte(huge_pte_update(mm, addr, ptep, ~0UL, 0));
 #else
 	return __pte(pte_update(ptep, ~0UL, 0));
 #endif
-- 
2.10.2

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 5/6] powerpc/mm: update pte_update to not do full mm tlb flush
  2016-11-21 18:33 [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Aneesh Kumar K.V
                   ` (2 preceding siblings ...)
  2016-11-21 18:33 ` [PATCH v3 4/6] powerpc/mm/hugetlb: Switch hugetlb update to use huge_pte_update Aneesh Kumar K.V
@ 2016-11-21 18:33 ` Aneesh Kumar K.V
  2016-11-21 18:33 ` [PATCH v3 6/6] powerpc/mm: Batch tlb flush when invalidating pte entries Aneesh Kumar K.V
  2016-11-22  2:55 ` [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Balbir Singh
  5 siblings, 0 replies; 11+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-21 18:33 UTC (permalink / raw)
  To: benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V

When we are updating pte, we just need to flush the tlb mapping for
that pte. Right now we do a full mm flush because we don't track page
size. Update the interface to track the page size and use that to
do the right tlb flush.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h | 16 ++++++++++------
 arch/powerpc/include/asm/book3s/64/radix.h   | 19 ++++++++-----------
 arch/powerpc/mm/pgtable-radix.c              |  2 +-
 3 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index ef2eef1ba99a..09869ad37aba 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -301,12 +301,16 @@ extern unsigned long pci_io_base;
 
 static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
 				       pte_t *ptep, unsigned long clr,
-				       unsigned long set, int huge)
+				       unsigned long set,
+				       unsigned long pg_sz)
 {
+	bool huge = (pg_sz != PAGE_SIZE);
+
 	if (radix_enabled())
-		return radix__pte_update(mm, addr, ptep, clr, set, huge);
+		return radix__pte_update(mm, addr, ptep, clr, set, pg_sz);
 	return hash__pte_update(mm, addr, ptep, clr, set, huge);
 }
+
 /*
  * For hash even if we have _PAGE_ACCESSED = 0, we do a pte_update.
  * We currently remove entries from the hashtable regardless of whether
@@ -324,7 +328,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 
 	if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
 		return 0;
-	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, PAGE_SIZE);
 	return (old & _PAGE_ACCESSED) != 0;
 }
 
@@ -343,21 +347,21 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 	if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
 		return;
 
-	pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+	pte_update(mm, addr, ptep, _PAGE_WRITE, 0, PAGE_SIZE);
 }
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 				       unsigned long addr, pte_t *ptep)
 {
-	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, PAGE_SIZE);
 	return __pte(old);
 }
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 			     pte_t * ptep)
 {
-	pte_update(mm, addr, ptep, ~0UL, 0, 0);
+	pte_update(mm, addr, ptep, ~0UL, 0, PAGE_SIZE);
 }
 
 static inline int pte_write(pte_t pte)
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 279b2f68e00f..aec6e8ee6e27 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -129,15 +129,16 @@ static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
 
 
 static inline unsigned long radix__pte_update(struct mm_struct *mm,
-					unsigned long addr,
-					pte_t *ptep, unsigned long clr,
-					unsigned long set,
-					int huge)
+					      unsigned long addr,
+					      pte_t *ptep, unsigned long clr,
+					      unsigned long set,
+					      unsigned long pg_sz)
 {
 	unsigned long old_pte;
 
 	if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
 
+		int psize;
 		unsigned long new_pte;
 
 		old_pte = __radix_pte_update(ptep, ~0, 0);
@@ -146,18 +147,14 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
 		 * new value of pte
 		 */
 		new_pte = (old_pte | set) & ~clr;
-
-		/*
-		 * For now let's do heavy pid flush
-		 * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
-		 */
-		radix__flush_tlb_mm(mm);
+		psize = radix_get_mmu_psize(pg_sz);
+		radix__flush_tlb_page_psize(mm, addr, psize);
 
 		__radix_pte_update(ptep, 0, new_pte);
 	} else
 		old_pte = __radix_pte_update(ptep, clr, set);
 	asm volatile("ptesync" : : : "memory");
-	if (!huge)
+	if (pg_sz == PAGE_SIZE)
 		assert_pte_locked(mm, addr);
 
 	return old_pte;
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 416918005395..2fc7336619b3 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -486,7 +486,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add
 	assert_spin_locked(&mm->page_table_lock);
 #endif
 
-	old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
+	old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, HPAGE_PMD_SIZE);
 	trace_hugepage_update(addr, old, clr, set);
 
 	return old;
-- 
2.10.2

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 6/6] powerpc/mm: Batch tlb flush when invalidating pte entries
  2016-11-21 18:33 [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Aneesh Kumar K.V
                   ` (3 preceding siblings ...)
  2016-11-21 18:33 ` [PATCH v3 5/6] powerpc/mm: update pte_update to not do full mm tlb flush Aneesh Kumar K.V
@ 2016-11-21 18:33 ` Aneesh Kumar K.V
  2016-11-21 20:40   ` Benjamin Herrenschmidt
  2016-11-22  2:55 ` [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Balbir Singh
  5 siblings, 1 reply; 11+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-21 18:33 UTC (permalink / raw)
  To: benh, paulus, mpe; +Cc: linuxppc-dev, Aneesh Kumar K.V

This will improve the task exit case, by batching tlb invalidates.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/radix.h | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index aec6e8ee6e27..83c77323a769 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -142,15 +142,21 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
 		unsigned long new_pte;
 
 		old_pte = __radix_pte_update(ptep, ~0, 0);
-		asm volatile("ptesync" : : : "memory");
 		/*
 		 * new value of pte
 		 */
 		new_pte = (old_pte | set) & ~clr;
-		psize = radix_get_mmu_psize(pg_sz);
-		radix__flush_tlb_page_psize(mm, addr, psize);
-
-		__radix_pte_update(ptep, 0, new_pte);
+		/*
+		 * If we are trying to clear the pte, we can skip
+		 * the below sequence and batch the tlb flush. The
+		 * tlb flush batching is done by mmu gather code
+		 */
+		if (new_pte) {
+			asm volatile("ptesync" : : : "memory");
+			psize = radix_get_mmu_psize(pg_sz);
+			radix__flush_tlb_page_psize(mm, addr, psize);
+			__radix_pte_update(ptep, 0, new_pte);
+		}
 	} else
 		old_pte = __radix_pte_update(ptep, clr, set);
 	asm volatile("ptesync" : : : "memory");
-- 
2.10.2

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 6/6] powerpc/mm: Batch tlb flush when invalidating pte entries
  2016-11-21 18:33 ` [PATCH v3 6/6] powerpc/mm: Batch tlb flush when invalidating pte entries Aneesh Kumar K.V
@ 2016-11-21 20:40   ` Benjamin Herrenschmidt
  2016-11-22  2:21     ` Aneesh Kumar K.V
  0 siblings, 1 reply; 11+ messages in thread
From: Benjamin Herrenschmidt @ 2016-11-21 20:40 UTC (permalink / raw)
  To: Aneesh Kumar K.V, paulus, mpe; +Cc: linuxppc-dev

On Tue, 2016-11-22 at 00:03 +0530, Aneesh Kumar K.V wrote:
> +               /*
> +                * If we are trying to clear the pte, we can skip
> +                * the below sequence and batch the tlb flush. The
> +                * tlb flush batching is done by mmu gather code
> +                */
> +               if (new_pte) {
> +                       asm volatile("ptesync" : : : "memory");
> +                       psize = radix_get_mmu_psize(pg_sz);
> +                       radix__flush_tlb_page_psize(mm, addr, psize);
> +                       __radix_pte_update(ptep, 0, new_pte);
> +               }
>         } else
>                 old_pte = __radix_pte_update(ptep, clr, set);

Can you check the valid bit ? What if we are just setting a swap PTE
on top of an invalid one for example ?

Should the above case be limited to both old and new being valid ?

Cheers,
Ben.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 6/6] powerpc/mm: Batch tlb flush when invalidating pte entries
  2016-11-21 20:40   ` Benjamin Herrenschmidt
@ 2016-11-22  2:21     ` Aneesh Kumar K.V
  0 siblings, 0 replies; 11+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-22  2:21 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, paulus, mpe; +Cc: linuxppc-dev

QmVuamFtaW4gSGVycmVuc2NobWlkdCA8YmVuaEBrZXJuZWwuY3Jhc2hpbmcub3JnPiB3cml0ZXM6
DQoNCj4gT24gVHVlLCAyMDE2LTExLTIyIGF0IDAwOjAzICswNTMwLCBBbmVlc2ggS3VtYXIgSy5W
IHdyb3RlOg0KPj4gK8KgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoC8qDQo+PiArwqDCoMKg
wqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgICogSWYgd2UgYXJlIHRyeWluZyB0byBjbGVhciB0aGUg
cHRlLCB3ZSBjYW4gc2tpcA0KPj4gK8KgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoCAqIHRo
ZSBiZWxvdyBzZXF1ZW5jZSBhbmQgYmF0Y2ggdGhlIHRsYiBmbHVzaC4gVGhlDQo+PiArwqDCoMKg
wqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgICogdGxiIGZsdXNoIGJhdGNoaW5nIGlzIGRvbmUgYnkg
bW11IGdhdGhlciBjb2RlDQo+PiArwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgICovDQo+
PiArwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgaWYgKG5ld19wdGUpIHsNCj4+ICvCoMKg
wqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgYXNtIHZvbGF0aWxlKCJw
dGVzeW5jIiA6IDogOiAibWVtb3J5Iik7DQo+PiArwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDC
oMKgwqDCoMKgwqDCoMKgwqDCoHBzaXplID0gcmFkaXhfZ2V0X21tdV9wc2l6ZShwZ19zeik7DQo+
PiArwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoHJhZGl4X19m
bHVzaF90bGJfcGFnZV9wc2l6ZShtbSwgYWRkciwgcHNpemUpOw0KPj4gK8KgwqDCoMKgwqDCoMKg
wqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqBfX3JhZGl4X3B0ZV91cGRhdGUocHRlcCwg
MCwgbmV3X3B0ZSk7DQo+PiArwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgfQ0KPj4gwqDC
oMKgwqDCoMKgwqDCoH0gZWxzZQ0KPj4gwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqDCoMKgwqBv
bGRfcHRlID0gX19yYWRpeF9wdGVfdXBkYXRlKHB0ZXAsIGNsciwgc2V0KTsNCj4NCj4gQ2FuIHlv
dSBjaGVjayB0aGUgdmFsaWQgYml0ID8gV2hhdCBpZiB3ZSBhcmUganVzdCBzZXR0aW5nIGEgc3dh
cCBQVEUNCj4gb24gdG9wIG9mIGFuIGludmFsaWQgb25lIGZvciBleGFtcGxlID8NCj4NCj4gU2hv
dWxkIHRoZSBhYm92ZSBjYXNlIGJlIGxpbWl0ZWQgdG8gYm90aCBvbGQgYW5kIG5ldyBiZWluZyB2
YWxpZCA/DQoNCldlIHNob3VsZCB1c2Ugc2V0X3B0ZV9hdCBpZiB0aGUgb2xkIHB0ZSBpcyBub3Qg
dmFsaWQgYW5kIHRoYXQgZG9lc24ndA0KaW52b2x2ZSBhIHRsYiBmbHVzaC4NCg0KLWFuZWVzaA0K

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 4/6] powerpc/mm/hugetlb: Switch hugetlb update to use huge_pte_update
  2016-11-21 18:33 ` [PATCH v3 4/6] powerpc/mm/hugetlb: Switch hugetlb update to use huge_pte_update Aneesh Kumar K.V
@ 2016-11-22  2:41   ` Michael Ellerman
  0 siblings, 0 replies; 11+ messages in thread
From: Michael Ellerman @ 2016-11-22  2:41 UTC (permalink / raw)
  To: Aneesh Kumar K.V, benh, paulus; +Cc: linuxppc-dev, Aneesh Kumar K.V

"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> writes:

> We want to switch pte_update to use va based tlb flush. In order to do th=
at we
> need to track the page size. With hugetlb we currently don't have page si=
ze
> available in these functions. Hence switch hugetlb to use seperate functi=
ons
> for update. In later patch we will update hugetlb functions to take
> vm_area_struct from which we can derive the page size. After that we will=
 switch
> this back to use pte_update

This breaks corenet64_smp_defconfig:

In file included from ../arch/powerpc/include/asm/page.h:294:0,
                 from ../arch/powerpc/include/asm/thread_info.h:34,
                 from ../include/linux/thread_info.h:58,
                 from ../include/asm-generic/preempt.h:4,
                 from ./arch/powerpc/include/generated/asm/preempt.h:1,
                 from ../include/linux/preempt.h:59,
                 from ../include/linux/spinlock.h:50,
                 from ../include/linux/mmzone.h:7,
                 from ../include/linux/gfp.h:5,
                 from ../arch/powerpc/mm/pgtable.c:25:
../arch/powerpc/include/asm/hugetlb.h: In function =E2=80=98huge_ptep_get_a=
nd_clear=E2=80=99:
../arch/powerpc/include/asm/hugetlb.h:139:15: error: implicit declaration o=
f function =E2=80=98huge_pte_update=E2=80=99 [-Werror=3Dimplicit-function-d=
eclaration]
  return __pte(huge_pte_update(mm, addr, ptep, ~0UL, 0));
               ^
../arch/powerpc/include/asm/pgtable-types.h:6:30: note: in definition of ma=
cro =E2=80=98__pte=E2=80=99
 #define __pte(x) ((pte_t) { (x) })


And many more.

cheers

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush
  2016-11-21 18:33 [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Aneesh Kumar K.V
                   ` (4 preceding siblings ...)
  2016-11-21 18:33 ` [PATCH v3 6/6] powerpc/mm: Batch tlb flush when invalidating pte entries Aneesh Kumar K.V
@ 2016-11-22  2:55 ` Balbir Singh
  2016-11-22  5:57   ` Aneesh Kumar K.V
  5 siblings, 1 reply; 11+ messages in thread
From: Balbir Singh @ 2016-11-22  2:55 UTC (permalink / raw)
  To: Aneesh Kumar K.V, benh, paulus, mpe; +Cc: linuxppc-dev



On 22/11/16 05:33, Aneesh Kumar K.V wrote:
> When we are updating pte, we just need to flush the tlb mapping for
> that pte. Right now we do a full mm flush because we don't track page
> size. Update the interface to track the page size and use that to
> do the right tlb flush.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/book3s/32/pgtable.h |  4 +++-
>  arch/powerpc/include/asm/book3s/64/pgtable.h |  7 +++++--
>  arch/powerpc/include/asm/book3s/64/radix.h   | 14 +++++++-------
>  arch/powerpc/include/asm/nohash/32/pgtable.h |  4 +++-
>  arch/powerpc/include/asm/nohash/64/pgtable.h |  4 +++-
>  arch/powerpc/mm/pgtable-book3s64.c           |  3 ++-
>  arch/powerpc/mm/pgtable-radix.c              | 16 ++++++++++++++++
>  arch/powerpc/mm/pgtable.c                    | 10 ++++++++--
>  arch/powerpc/mm/tlb-radix.c                  | 15 ---------------
>  9 files changed, 47 insertions(+), 30 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
> index 6b8b2d57fdc8..0713626e9189 100644
> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
> @@ -224,7 +224,9 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
>  
>  
>  static inline void __ptep_set_access_flags(struct mm_struct *mm,
> -					   pte_t *ptep, pte_t entry)
> +					   pte_t *ptep, pte_t entry,
> +					   unsigned long address,
> +					   unsigned long pg_sz)

I wonder if the change can be limited in scope by passing the struct vma instead
of struct mm as the first argument and not passing pg_sz (I don't like that
name at all, page_size if we decide to do it, please). Then extract the mm from
vma->mm


Balbir Singh.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush
  2016-11-22  2:55 ` [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Balbir Singh
@ 2016-11-22  5:57   ` Aneesh Kumar K.V
  0 siblings, 0 replies; 11+ messages in thread
From: Aneesh Kumar K.V @ 2016-11-22  5:57 UTC (permalink / raw)
  To: Balbir Singh, benh, paulus, mpe; +Cc: linuxppc-dev

Balbir Singh <bsingharora@gmail.com> writes:

> On 22/11/16 05:33, Aneesh Kumar K.V wrote:
>> When we are updating pte, we just need to flush the tlb mapping for
>> that pte. Right now we do a full mm flush because we don't track page
>> size. Update the interface to track the page size and use that to
>> do the right tlb flush.
>> 
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
>> ---
>>  arch/powerpc/include/asm/book3s/32/pgtable.h |  4 +++-
>>  arch/powerpc/include/asm/book3s/64/pgtable.h |  7 +++++--
>>  arch/powerpc/include/asm/book3s/64/radix.h   | 14 +++++++-------
>>  arch/powerpc/include/asm/nohash/32/pgtable.h |  4 +++-
>>  arch/powerpc/include/asm/nohash/64/pgtable.h |  4 +++-
>>  arch/powerpc/mm/pgtable-book3s64.c           |  3 ++-
>>  arch/powerpc/mm/pgtable-radix.c              | 16 ++++++++++++++++
>>  arch/powerpc/mm/pgtable.c                    | 10 ++++++++--
>>  arch/powerpc/mm/tlb-radix.c                  | 15 ---------------
>>  9 files changed, 47 insertions(+), 30 deletions(-)
>> 
>> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
>> index 6b8b2d57fdc8..0713626e9189 100644
>> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h
>> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
>> @@ -224,7 +224,9 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
>>  
>>  
>>  static inline void __ptep_set_access_flags(struct mm_struct *mm,
>> -					   pte_t *ptep, pte_t entry)
>> +					   pte_t *ptep, pte_t entry,
>> +					   unsigned long address,
>> +					   unsigned long pg_sz)
>
> I wonder if the change can be limited in scope by passing the struct vma instead
> of struct mm as the first argument and not passing pg_sz (I don't like that
> name at all, page_size if we decide to do it, please). Then extract the mm from
> vma->mm
>

https://lkml.kernel.org/r/20161114152020.4608-1-aneesh.kumar@linux.vnet.ibm.com

That patch series go on top of this series. The idea of splitting this
into two is to make it easy to backport this series if needed to older
kernels.


-aneesh

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2016-11-22  5:57 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-11-21 18:33 [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Aneesh Kumar K.V
2016-11-21 18:33 ` [PATCH v3 2/6] powerpc/mm: Rename hugetlb-radix.h to hugetlb.h Aneesh Kumar K.V
2016-11-21 18:33 ` [PATCH v3 3/6] powerpc/mm/hugetlb: Handle hugepage size supported by hash config Aneesh Kumar K.V
2016-11-21 18:33 ` [PATCH v3 4/6] powerpc/mm/hugetlb: Switch hugetlb update to use huge_pte_update Aneesh Kumar K.V
2016-11-22  2:41   ` Michael Ellerman
2016-11-21 18:33 ` [PATCH v3 5/6] powerpc/mm: update pte_update to not do full mm tlb flush Aneesh Kumar K.V
2016-11-21 18:33 ` [PATCH v3 6/6] powerpc/mm: Batch tlb flush when invalidating pte entries Aneesh Kumar K.V
2016-11-21 20:40   ` Benjamin Herrenschmidt
2016-11-22  2:21     ` Aneesh Kumar K.V
2016-11-22  2:55 ` [PATCH v3 1/6] powerpc/mm: update ptep_set_access_flag to not do full mm tlb flush Balbir Singh
2016-11-22  5:57   ` Aneesh Kumar K.V

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.