linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/4] mm/hugetlb: Simplify hugetlb unmap
@ 2016-06-02  9:39 Aneesh Kumar K.V
  2016-06-02  9:39 ` [PATCH 2/4] mm: Change the interface for __tlb_remove_page Aneesh Kumar K.V
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2016-06-02  9:39 UTC (permalink / raw)
  To: akpm, mpe; +Cc: linux-mm, linux-kernel, Aneesh Kumar K.V

For hugetlb like THP (and unlike regular page), we do tlb flush after
dropping ptl. Because of the above, we don't need to track force_flush
like we do now. Instead we can simply call tlb_remove_page() which
will do the flush if needed.

No functionality change in this patch.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 mm/hugetlb.c | 54 +++++++++++++++++++++---------------------------------
 1 file changed, 21 insertions(+), 33 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e4168484f249..8dd91cd5571c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3138,7 +3138,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 			    unsigned long start, unsigned long end,
 			    struct page *ref_page)
 {
-	int force_flush = 0;
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long address;
 	pte_t *ptep;
@@ -3157,19 +3156,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 	tlb_start_vma(tlb, vma);
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 	address = start;
-again:
 	for (; address < end; address += sz) {
 		ptep = huge_pte_offset(mm, address);
 		if (!ptep)
 			continue;
 
 		ptl = huge_pte_lock(h, mm, ptep);
-		if (huge_pmd_unshare(mm, &address, ptep))
-			goto unlock;
+		if (huge_pmd_unshare(mm, &address, ptep)) {
+			spin_unlock(ptl);
+			continue;
+		}
 
 		pte = huge_ptep_get(ptep);
-		if (huge_pte_none(pte))
-			goto unlock;
+		if (huge_pte_none(pte)) {
+			spin_unlock(ptl);
+			continue;
+		}
 
 		/*
 		 * Migrating hugepage or HWPoisoned hugepage is already
@@ -3177,7 +3179,8 @@ again:
 		 */
 		if (unlikely(!pte_present(pte))) {
 			huge_pte_clear(mm, address, ptep);
-			goto unlock;
+			spin_unlock(ptl);
+			continue;
 		}
 
 		page = pte_page(pte);
@@ -3187,9 +3190,10 @@ again:
 		 * are about to unmap is the actual page of interest.
 		 */
 		if (ref_page) {
-			if (page != ref_page)
-				goto unlock;
-
+			if (page != ref_page) {
+				spin_unlock(ptl);
+				continue;
+			}
 			/*
 			 * Mark the VMA as having unmapped its page so that
 			 * future faults in this VMA will fail rather than
@@ -3205,30 +3209,14 @@ again:
 
 		hugetlb_count_sub(pages_per_huge_page(h), mm);
 		page_remove_rmap(page, true);
-		force_flush = !__tlb_remove_page(tlb, page);
-		if (force_flush) {
-			address += sz;
-			spin_unlock(ptl);
-			break;
-		}
-		/* Bail out after unmapping reference page if supplied */
-		if (ref_page) {
-			spin_unlock(ptl);
-			break;
-		}
-unlock:
+
 		spin_unlock(ptl);
-	}
-	/*
-	 * mmu_gather ran out of room to batch pages, we break out of
-	 * the PTE lock to avoid doing the potential expensive TLB invalidate
-	 * and page-free while holding it.
-	 */
-	if (force_flush) {
-		force_flush = 0;
-		tlb_flush_mmu(tlb);
-		if (address < end && !ref_page)
-			goto again;
+		tlb_remove_page(tlb, page);
+		/*
+		 * Bail out after unmapping reference page if supplied
+		 */
+		if (ref_page)
+			break;
 	}
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 	tlb_end_vma(tlb, vma);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/4] mm: Change the interface for __tlb_remove_page
  2016-06-02  9:39 [PATCH 1/4] mm/hugetlb: Simplify hugetlb unmap Aneesh Kumar K.V
@ 2016-06-02  9:39 ` Aneesh Kumar K.V
  2016-06-02  9:39 ` [PATCH 3/4] mm/mmu_gather: Track page size with mmu gather and force flush if page size change Aneesh Kumar K.V
  2016-06-02  9:39 ` [PATCH 4/4] powerpc/mm/radix: Implement tlb mmu gather flush efficiently Aneesh Kumar K.V
  2 siblings, 0 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2016-06-02  9:39 UTC (permalink / raw)
  To: akpm, mpe; +Cc: linux-mm, linux-kernel, Aneesh Kumar K.V

This update the generic and arch specific implementation to return true
if we need to do a tlb flush. That means if a __tlb_remove_page indicate
a flush is needed, the page we try to remove need to be tracked and
added again after the flush. We need to track it because we have already
update the pte to none and we can't just loop back.

This changes is done to enable us to do a tlb_flush when we try to flush
a range that consists of different page sizes. For architectures like
ppc64, we can do a range based tlb flush and we need to track page size
for that. When we try to remove a huge page, we will force a tlb flush
and starts a new mmu gather.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/arm/include/asm/tlb.h  | 11 +++++++----
 arch/ia64/include/asm/tlb.h | 13 ++++++++-----
 arch/s390/include/asm/tlb.h |  4 ++--
 arch/sh/include/asm/tlb.h   |  2 +-
 arch/um/include/asm/tlb.h   |  2 +-
 include/asm-generic/tlb.h   | 18 ++++++++++++++++--
 mm/memory.c                 | 20 ++++++++++++++------
 7 files changed, 49 insertions(+), 21 deletions(-)

diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 3cadb726ec88..45dea952b0e6 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -209,17 +209,20 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 		tlb_flush(tlb);
 }
 
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
+	if (tlb->nr == tlb->max)
+		return true;
 	tlb->pages[tlb->nr++] = page;
-	VM_BUG_ON(tlb->nr > tlb->max);
-	return tlb->max - tlb->nr;
+	return false;
 }
 
 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
-	if (!__tlb_remove_page(tlb, page))
+	if (__tlb_remove_page(tlb, page)) {
 		tlb_flush_mmu(tlb);
+		__tlb_remove_page(tlb, page);
+	}
 }
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 39d64e0df1de..85005ab513e9 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -205,17 +205,18 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  * must be delayed until after the TLB has been flushed (see comments at the beginning of
  * this file).
  */
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
+	if (tlb->nr == tlb->max)
+		return true;
+
 	tlb->need_flush = 1;
 
 	if (!tlb->nr && tlb->pages == tlb->local)
 		__tlb_alloc_page(tlb);
 
 	tlb->pages[tlb->nr++] = page;
-	VM_BUG_ON(tlb->nr > tlb->max);
-
-	return tlb->max - tlb->nr;
+	return false;
 }
 
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -235,8 +236,10 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 
 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
-	if (!__tlb_remove_page(tlb, page))
+	if (__tlb_remove_page(tlb, page)) {
 		tlb_flush_mmu(tlb);
+		__tlb_remove_page(tlb, page);
+	}
 }
 
 /*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 7a92e69c50bc..6b98cb3601d5 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -87,10 +87,10 @@ static inline void tlb_finish_mmu(struct mmu_gather *tlb,
  * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
  * has already been freed, so just do free_page_and_swap_cache.
  */
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
 	free_page_and_swap_cache(page);
-	return 1; /* avoid calling tlb_flush_mmu */
+	return false; /* avoid calling tlb_flush_mmu */
 }
 
 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 62f80d2a9df9..3dec5e0734f5 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -101,7 +101,7 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
 	free_page_and_swap_cache(page);
-	return 1; /* avoid calling tlb_flush_mmu */
+	return false; /* avoid calling tlb_flush_mmu */
 }
 
 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 16eb63fac57d..c6638f8e5e90 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -102,7 +102,7 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
 	tlb->need_flush = 1;
 	free_page_and_swap_cache(page);
-	return 1; /* avoid calling tlb_flush_mmu */
+	return false; /* avoid calling tlb_flush_mmu */
 }
 
 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 9dbb739cafa0..2ac8fe202e9a 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -107,6 +107,11 @@ struct mmu_gather {
 	struct mmu_gather_batch	local;
 	struct page		*__pages[MMU_GATHER_BUNDLE];
 	unsigned int		batch_count;
+	/*
+	 * __tlb_adjust_range  will track the new addr here,
+	 * that that we can adjust the range after the flush
+	 */
+	unsigned long addr;
 };
 
 #define HAVE_GENERIC_MMU_GATHER
@@ -115,7 +120,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
 							unsigned long end);
-int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
+bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
 
 /* tlb_remove_page
  *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
@@ -123,8 +128,11 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
  */
 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
-	if (!__tlb_remove_page(tlb, page))
+	if (__tlb_remove_page(tlb, page)) {
 		tlb_flush_mmu(tlb);
+		__tlb_adjust_range(tlb, tlb->addr);
+		__tlb_remove_page(tlb, page);
+	}
 }
 
 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
@@ -132,6 +140,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 {
 	tlb->start = min(tlb->start, address);
 	tlb->end = max(tlb->end, address + PAGE_SIZE);
+	/*
+	 * Track the last address with which we adjusted the range. This
+	 * will be used later to adjust again after a mmu_flush due to
+	 * failed __tlb_remove_page
+	 */
+	tlb->addr = address;
 }
 
 static inline void __tlb_reset_range(struct mmu_gather *tlb)
diff --git a/mm/memory.c b/mm/memory.c
index 15322b73636b..a01db5bc756b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -292,23 +292,24 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
  *	handling the additional races in SMP caused by other CPUs caching valid
  *	mappings in their TLBs. Returns the number of free page slots left.
  *	When out of page slots we must call tlb_flush_mmu().
+ *returns true if the caller should flush.
  */
-int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
 	struct mmu_gather_batch *batch;
 
 	VM_BUG_ON(!tlb->end);
 
 	batch = tlb->active;
-	batch->pages[batch->nr++] = page;
 	if (batch->nr == batch->max) {
 		if (!tlb_next_batch(tlb))
-			return 0;
+			return true;
 		batch = tlb->active;
 	}
 	VM_BUG_ON_PAGE(batch->nr > batch->max, page);
 
-	return batch->max - batch->nr;
+	batch->pages[batch->nr++] = page;
+	return false;
 }
 
 #endif /* HAVE_GENERIC_MMU_GATHER */
@@ -1109,6 +1110,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 	pte_t *start_pte;
 	pte_t *pte;
 	swp_entry_t entry;
+	struct page *pending_page = NULL;
 
 again:
 	init_rss_vec(rss);
@@ -1160,8 +1162,9 @@ again:
 			page_remove_rmap(page, false);
 			if (unlikely(page_mapcount(page) < 0))
 				print_bad_pte(vma, addr, ptent, page);
-			if (unlikely(!__tlb_remove_page(tlb, page))) {
+			if (unlikely(__tlb_remove_page(tlb, page))) {
 				force_flush = 1;
+				pending_page = page;
 				addr += PAGE_SIZE;
 				break;
 			}
@@ -1202,7 +1205,12 @@ again:
 	if (force_flush) {
 		force_flush = 0;
 		tlb_flush_mmu_free(tlb);
-
+		if (pending_page) {
+			/* remove the page with new size */
+			__tlb_adjust_range(tlb, tlb->addr);
+			__tlb_remove_page(tlb, pending_page);
+			pending_page = NULL;
+		}
 		if (addr != end)
 			goto again;
 	}
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/4] mm/mmu_gather: Track page size with mmu gather and force flush if page size change
  2016-06-02  9:39 [PATCH 1/4] mm/hugetlb: Simplify hugetlb unmap Aneesh Kumar K.V
  2016-06-02  9:39 ` [PATCH 2/4] mm: Change the interface for __tlb_remove_page Aneesh Kumar K.V
@ 2016-06-02  9:39 ` Aneesh Kumar K.V
  2016-06-02  9:39 ` [PATCH 4/4] powerpc/mm/radix: Implement tlb mmu gather flush efficiently Aneesh Kumar K.V
  2 siblings, 0 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2016-06-02  9:39 UTC (permalink / raw)
  To: akpm, mpe; +Cc: linux-mm, linux-kernel, Aneesh Kumar K.V

This allows arch which need to do special handing with respect to
different page size when flushing tlb to implement the same in mmu gather

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/arm/include/asm/tlb.h  | 18 +++++++++++++++
 arch/ia64/include/asm/tlb.h | 18 +++++++++++++++
 arch/s390/include/asm/tlb.h | 18 +++++++++++++++
 arch/sh/include/asm/tlb.h   | 18 +++++++++++++++
 arch/um/include/asm/tlb.h   | 18 +++++++++++++++
 include/asm-generic/tlb.h   | 56 +++++++++++++++++++++++++++++++++------------
 mm/huge_memory.c            |  2 +-
 mm/hugetlb.c                |  2 +-
 mm/memory.c                 | 13 ++++++++---
 9 files changed, 144 insertions(+), 19 deletions(-)

diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 45dea952b0e6..1e25cd80589e 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -225,6 +225,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 	}
 }
 
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+					  struct page *page, int page_size)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+					 struct page *page)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+					struct page *page, int page_size)
+{
+	return tlb_remove_page(tlb, page);
+}
+
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 	unsigned long addr)
 {
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 85005ab513e9..77e541cf0e5d 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -242,6 +242,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 	}
 }
 
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+					  struct page *page, int page_size)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+					 struct page *page)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+					struct page *page, int page_size)
+{
+	return tlb_remove_page(tlb, page);
+}
+
 /*
  * Remove TLB entry for PTE mapped at virtual address ADDRESS.  This is called for any
  * PTE, not just those pointing to (normal) physical memory.
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 6b98cb3601d5..15711de10403 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -98,6 +98,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 	free_page_and_swap_cache(page);
 }
 
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+					  struct page *page, int page_size)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+					 struct page *page)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+					struct page *page, int page_size)
+{
+	return tlb_remove_page(tlb, page);
+}
+
 /*
  * pte_free_tlb frees a pte table and clears the CRSTE for the
  * page table from the tlb.
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 3dec5e0734f5..025cdb1032f6 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -109,6 +109,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 	__tlb_remove_page(tlb, page);
 }
 
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+					  struct page *page, int page_size)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+					 struct page *page)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+					struct page *page, int page_size)
+{
+	return tlb_remove_page(tlb, page);
+}
+
 #define pte_free_tlb(tlb, ptep, addr)	pte_free((tlb)->mm, ptep)
 #define pmd_free_tlb(tlb, pmdp, addr)	pmd_free((tlb)->mm, pmdp)
 #define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index c6638f8e5e90..821ff0acfe17 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -110,6 +110,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 	__tlb_remove_page(tlb, page);
 }
 
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+					  struct page *page, int page_size)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+					 struct page *page)
+{
+	return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+					struct page *page, int page_size)
+{
+	return tlb_remove_page(tlb, page);
+}
+
 /**
  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  *
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 2ac8fe202e9a..3ca36c111b47 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,6 +112,7 @@ struct mmu_gather {
 	 * that that we can adjust the range after the flush
 	 */
 	unsigned long addr;
+	int page_size;
 };
 
 #define HAVE_GENERIC_MMU_GATHER
@@ -120,25 +121,16 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
 							unsigned long end);
-bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
-
-/* tlb_remove_page
- *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
- *	required.
- */
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-	if (__tlb_remove_page(tlb, page)) {
-		tlb_flush_mmu(tlb);
-		__tlb_adjust_range(tlb, tlb->addr);
-		__tlb_remove_page(tlb, page);
-	}
-}
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+				   int page_size);
 
 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 				      unsigned long address)
 {
 	tlb->start = min(tlb->start, address);
+	/*
+	 * IS it enough to update the range by PAGE_SIZE ?
+	 */
 	tlb->end = max(tlb->end, address + PAGE_SIZE);
 	/*
 	 * Track the last address with which we adjusted the range. This
@@ -148,6 +140,42 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 	tlb->addr = address;
 }
 
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+					struct page *page, int page_size)
+{
+	if (__tlb_remove_page_size(tlb, page, page_size)) {
+		tlb_flush_mmu(tlb);
+		tlb->page_size = page_size;
+		__tlb_adjust_range(tlb, tlb->addr);
+		__tlb_remove_page_size(tlb, page, page_size);
+	}
+}
+
+static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
+}
+
+/* tlb_remove_page
+ *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
+ *	required.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
+}
+/*
+ * Used on reset
+ */
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
+{
+	/* active->nr should be zero when we call this */
+	VM_BUG_ON_PAGE(tlb->active->nr, page);
+	tlb->page_size = PAGE_SIZE;
+	__tlb_adjust_range(tlb, tlb->addr);
+	return __tlb_remove_page(tlb, page);
+}
+
 static inline void __tlb_reset_range(struct mmu_gather *tlb)
 {
 	if (tlb->fullmm) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9ed58530f695..a5711093a829 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1694,7 +1694,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
 		atomic_long_dec(&tlb->mm->nr_ptes);
 		spin_unlock(ptl);
-		tlb_remove_page(tlb, page);
+		tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
 	}
 	return 1;
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8dd91cd5571c..3495c519583d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3211,7 +3211,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		page_remove_rmap(page, true);
 
 		spin_unlock(ptl);
-		tlb_remove_page(tlb, page);
+		tlb_remove_page_size(tlb, page, huge_page_size(h));
 		/*
 		 * Bail out after unmapping reference page if supplied
 		 */
diff --git a/mm/memory.c b/mm/memory.c
index a01db5bc756b..c2e7ea955f06 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -233,6 +233,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	tlb->batch = NULL;
 #endif
+	tlb->page_size = 0;
 
 	__tlb_reset_range(tlb);
 }
@@ -294,12 +295,19 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
  *	When out of page slots we must call tlb_flush_mmu().
  *returns true if the caller should flush.
  */
-bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
 {
 	struct mmu_gather_batch *batch;
 
 	VM_BUG_ON(!tlb->end);
 
+	if (!tlb->page_size)
+		tlb->page_size = page_size;
+	else {
+		if (page_size != tlb->page_size)
+			return true;
+	}
+
 	batch = tlb->active;
 	if (batch->nr == batch->max) {
 		if (!tlb_next_batch(tlb))
@@ -1207,8 +1215,7 @@ again:
 		tlb_flush_mmu_free(tlb);
 		if (pending_page) {
 			/* remove the page with new size */
-			__tlb_adjust_range(tlb, tlb->addr);
-			__tlb_remove_page(tlb, pending_page);
+			__tlb_remove_pte_page(tlb, pending_page);
 			pending_page = NULL;
 		}
 		if (addr != end)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/4] powerpc/mm/radix: Implement tlb mmu gather flush efficiently
  2016-06-02  9:39 [PATCH 1/4] mm/hugetlb: Simplify hugetlb unmap Aneesh Kumar K.V
  2016-06-02  9:39 ` [PATCH 2/4] mm: Change the interface for __tlb_remove_page Aneesh Kumar K.V
  2016-06-02  9:39 ` [PATCH 3/4] mm/mmu_gather: Track page size with mmu gather and force flush if page size change Aneesh Kumar K.V
@ 2016-06-02  9:39 ` Aneesh Kumar K.V
  2016-06-02 20:12   ` Andrew Morton
  2 siblings, 1 reply; 6+ messages in thread
From: Aneesh Kumar K.V @ 2016-06-02  9:39 UTC (permalink / raw)
  To: akpm, mpe; +Cc: linux-mm, linux-kernel, Aneesh Kumar K.V

Now that we track page size in mmu_gather, we can use address based
tlbie format when doing a tlb_flush(). We don't do this if we are
invalidating the full address space.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/mm/tlb-radix.c | 28 +++++++++++++++++++++++++++-
 1 file changed, 27 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 6f06b0b04d71..e581a521a87e 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -261,11 +261,37 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 }
 EXPORT_SYMBOL(radix__flush_tlb_range);
 
+static int radix_get_mmu_psize(int page_size)
+{
+	int psize;
+
+	if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
+		psize = mmu_virtual_psize;
+	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
+		psize = MMU_PAGE_2M;
+	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
+		psize = MMU_PAGE_1G;
+	else
+		return -1;
+	return psize;
+}
 
 void radix__tlb_flush(struct mmu_gather *tlb)
 {
+	int psize = 0;
 	struct mm_struct *mm = tlb->mm;
-	radix__flush_tlb_mm(mm);
+	int page_size = tlb->page_size;
+
+	psize = radix_get_mmu_psize(page_size);
+	if (psize == -1)
+		/* unknown page size */
+		goto flush_mm;
+
+	if (!tlb->fullmm && !tlb->need_flush_all)
+		radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
+	else
+flush_mm:
+		radix__flush_tlb_mm(mm);
 }
 /*
  * flush the page walk cache for the address
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 4/4] powerpc/mm/radix: Implement tlb mmu gather flush efficiently
  2016-06-02  9:39 ` [PATCH 4/4] powerpc/mm/radix: Implement tlb mmu gather flush efficiently Aneesh Kumar K.V
@ 2016-06-02 20:12   ` Andrew Morton
  2016-06-03  2:56     ` Aneesh Kumar K.V
  0 siblings, 1 reply; 6+ messages in thread
From: Andrew Morton @ 2016-06-02 20:12 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: mpe, linux-mm, linux-kernel, Benjamin Herrenschmidt

On Thu,  2 Jun 2016 15:09:49 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:

> Now that we track page size in mmu_gather, we can use address based
> tlbie format when doing a tlb_flush(). We don't do this if we are
> invalidating the full address space.
> 
> ...
>
>  void radix__tlb_flush(struct mmu_gather *tlb)
>  {
> +	int psize = 0;
>  	struct mm_struct *mm = tlb->mm;
> -	radix__flush_tlb_mm(mm);
> +	int page_size = tlb->page_size;
> +
> +	psize = radix_get_mmu_psize(page_size);
> +	if (psize == -1)
> +		/* unknown page size */
> +		goto flush_mm;
> +
> +	if (!tlb->fullmm && !tlb->need_flush_all)
> +		radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
> +	else
> +flush_mm:
> +		radix__flush_tlb_mm(mm);

That's kinda ugly.  What about

void radix__tlb_flush(struct mmu_gather *tlb)
{
	int psize = 0;
	struct mm_struct *mm = tlb->mm;
	int page_size = tlb->page_size;

	psize = radix_get_mmu_psize(page_size);

	if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
		radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
	else
		radix__flush_tlb_mm(mm);
}

?

We lost the comment, but that can be neatly addressed by documenting
radix_get_mmu_psize() (of course!).  Please send along a comment to do
this and I'll add it in.

--- a/arch/powerpc/mm/tlb-radix.c~powerpc-mm-radix-implement-tlb-mmu-gather-flush-efficiently-fix
+++ a/arch/powerpc/mm/tlb-radix.c
@@ -265,13 +265,9 @@ void radix__tlb_flush(struct mmu_gather
 	int page_size = tlb->page_size;
 
 	psize = radix_get_mmu_psize(page_size);
-	if (psize == -1)
-		/* unknown page size */
-		goto flush_mm;
 
-	if (!tlb->fullmm && !tlb->need_flush_all)
+	if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
 		radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
 	else
-flush_mm:
 		radix__flush_tlb_mm(mm);
 }
_

I'll await feedback from the other PPC developers before doing anything
further on this patchset.

hm, no ppc mailing lists were cc'ed.  Regrettable.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 4/4] powerpc/mm/radix: Implement tlb mmu gather flush efficiently
  2016-06-02 20:12   ` Andrew Morton
@ 2016-06-03  2:56     ` Aneesh Kumar K.V
  0 siblings, 0 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2016-06-03  2:56 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mpe, linux-mm, linux-kernel, Benjamin Herrenschmidt

Andrew Morton <akpm@linux-foundation.org> writes:

> On Thu,  2 Jun 2016 15:09:49 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>
>> Now that we track page size in mmu_gather, we can use address based
>> tlbie format when doing a tlb_flush(). We don't do this if we are
>> invalidating the full address space.
>> 
>> ...
>>
>>  void radix__tlb_flush(struct mmu_gather *tlb)
>>  {
>> +	int psize = 0;
>>  	struct mm_struct *mm = tlb->mm;
>> -	radix__flush_tlb_mm(mm);
>> +	int page_size = tlb->page_size;
>> +
>> +	psize = radix_get_mmu_psize(page_size);
>> +	if (psize == -1)
>> +		/* unknown page size */
>> +		goto flush_mm;
>> +
>> +	if (!tlb->fullmm && !tlb->need_flush_all)
>> +		radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
>> +	else
>> +flush_mm:
>> +		radix__flush_tlb_mm(mm);
>
> That's kinda ugly.  What about
>
> void radix__tlb_flush(struct mmu_gather *tlb)
> {
> 	int psize = 0;
> 	struct mm_struct *mm = tlb->mm;
> 	int page_size = tlb->page_size;
>
> 	psize = radix_get_mmu_psize(page_size);
>
> 	if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
> 		radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
> 	else
> 		radix__flush_tlb_mm(mm);
> }
>
> ?
>
> We lost the comment, but that can be neatly addressed by documenting
> radix_get_mmu_psize() (of course!).  Please send along a comment to do
> this and I'll add it in.


I will update the patch. But this patch (Patch 4) need to go through
powerpc tree because radix__flush_tlb_range_psize is not yet upstream.
As I mentioned in the previous thread, if you can take patch 1 to patch 3 that
will enable wider testing w.r.t other archs and ppc64 related changes can
go later via powerpc tree ?

>
> --- a/arch/powerpc/mm/tlb-radix.c~powerpc-mm-radix-implement-tlb-mmu-gather-flush-efficiently-fix
> +++ a/arch/powerpc/mm/tlb-radix.c
> @@ -265,13 +265,9 @@ void radix__tlb_flush(struct mmu_gather
>  	int page_size = tlb->page_size;
>
>  	psize = radix_get_mmu_psize(page_size);
> -	if (psize == -1)
> -		/* unknown page size */
> -		goto flush_mm;
>
> -	if (!tlb->fullmm && !tlb->need_flush_all)
> +	if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
>  		radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
>  	else
> -flush_mm:
>  		radix__flush_tlb_mm(mm);
>  }
> _
>
> I'll await feedback from the other PPC developers before doing anything
> further on this patchset.
>
> hm, no ppc mailing lists were cc'ed.  Regrettable.

I missed that. I can resend the series again adding ppc-devel to cc: ?

-aneesh

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2016-06-03  2:56 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-02  9:39 [PATCH 1/4] mm/hugetlb: Simplify hugetlb unmap Aneesh Kumar K.V
2016-06-02  9:39 ` [PATCH 2/4] mm: Change the interface for __tlb_remove_page Aneesh Kumar K.V
2016-06-02  9:39 ` [PATCH 3/4] mm/mmu_gather: Track page size with mmu gather and force flush if page size change Aneesh Kumar K.V
2016-06-02  9:39 ` [PATCH 4/4] powerpc/mm/radix: Implement tlb mmu gather flush efficiently Aneesh Kumar K.V
2016-06-02 20:12   ` Andrew Morton
2016-06-03  2:56     ` Aneesh Kumar K.V

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).