All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH 1/4] mm/hugetlb: Simplify hugetlb unmap
@ 2016-05-30  5:44 ` Aneesh Kumar K.V
  0 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2016-05-30  5:44 UTC (permalink / raw)
  To: akpm, linux-arch; +Cc: linux-mm, linux-kernel, Aneesh Kumar K.V

For hugetlb like THP (and unlike regular page), we do tlb flush after
dropping ptl. Because of the above, we don't need to track force_flush
like we do now. Instead we can simply call tlb_remove_page() which
will do the flush if needed.

No functionality change in this patch.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 mm/hugetlb.c | 54 +++++++++++++++++++++---------------------------------
 1 file changed, 21 insertions(+), 33 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e4168484f249..8dd91cd5571c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3138,7 +3138,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 			    unsigned long start, unsigned long end,
 			    struct page *ref_page)
 {
-	int force_flush = 0;
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long address;
 	pte_t *ptep;
@@ -3157,19 +3156,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 	tlb_start_vma(tlb, vma);
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 	address = start;
-again:
 	for (; address < end; address += sz) {
 		ptep = huge_pte_offset(mm, address);
 		if (!ptep)
 			continue;
 
 		ptl = huge_pte_lock(h, mm, ptep);
-		if (huge_pmd_unshare(mm, &address, ptep))
-			goto unlock;
+		if (huge_pmd_unshare(mm, &address, ptep)) {
+			spin_unlock(ptl);
+			continue;
+		}
 
 		pte = huge_ptep_get(ptep);
-		if (huge_pte_none(pte))
-			goto unlock;
+		if (huge_pte_none(pte)) {
+			spin_unlock(ptl);
+			continue;
+		}
 
 		/*
 		 * Migrating hugepage or HWPoisoned hugepage is already
@@ -3177,7 +3179,8 @@ again:
 		 */
 		if (unlikely(!pte_present(pte))) {
 			huge_pte_clear(mm, address, ptep);
-			goto unlock;
+			spin_unlock(ptl);
+			continue;
 		}
 
 		page = pte_page(pte);
@@ -3187,9 +3190,10 @@ again:
 		 * are about to unmap is the actual page of interest.
 		 */
 		if (ref_page) {
-			if (page != ref_page)
-				goto unlock;
-
+			if (page != ref_page) {
+				spin_unlock(ptl);
+				continue;
+			}
 			/*
 			 * Mark the VMA as having unmapped its page so that
 			 * future faults in this VMA will fail rather than
@@ -3205,30 +3209,14 @@ again:
 
 		hugetlb_count_sub(pages_per_huge_page(h), mm);
 		page_remove_rmap(page, true);
-		force_flush = !__tlb_remove_page(tlb, page);
-		if (force_flush) {
-			address += sz;
-			spin_unlock(ptl);
-			break;
-		}
-		/* Bail out after unmapping reference page if supplied */
-		if (ref_page) {
-			spin_unlock(ptl);
-			break;
-		}
-unlock:
+
 		spin_unlock(ptl);
-	}
-	/*
-	 * mmu_gather ran out of room to batch pages, we break out of
-	 * the PTE lock to avoid doing the potential expensive TLB invalidate
-	 * and page-free while holding it.
-	 */
-	if (force_flush) {
-		force_flush = 0;
-		tlb_flush_mmu(tlb);
-		if (address < end && !ref_page)
-			goto again;
+		tlb_remove_page(tlb, page);
+		/*
+		 * Bail out after unmapping reference page if supplied
+		 */
+		if (ref_page)
+			break;
 	}
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 	tlb_end_vma(tlb, vma);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread
[parent not found: <001701d1ba44$b9c0d560$2d428020$@alibaba-inc.com>]

end of thread, other threads:[~2016-06-02  8:21 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-05-30  5:44 [RFC PATCH 1/4] mm/hugetlb: Simplify hugetlb unmap Aneesh Kumar K.V
2016-05-30  5:44 ` Aneesh Kumar K.V
2016-05-30  5:44 ` [RFC PATCH 2/4] mm: Change the interface for __tlb_remove_page Aneesh Kumar K.V
2016-05-30  5:44   ` Aneesh Kumar K.V
2016-05-30  5:44 ` [RFC PATCH 3/4] mm/mmu_gather: Track page size with mmu gather and force flush if page size change Aneesh Kumar K.V
2016-05-30  5:44   ` Aneesh Kumar K.V
2016-05-30  5:44 ` [RFC PATCH 4/4] powerpc/mm/radix: Implement tlb mmu gather flush efficiently Aneesh Kumar K.V
2016-05-30  5:44   ` Aneesh Kumar K.V
2016-06-01 22:34 ` [RFC PATCH 1/4] mm/hugetlb: Simplify hugetlb unmap Andrew Morton
2016-06-01 22:34   ` Andrew Morton
2016-06-02  8:21   ` Aneesh Kumar K.V
2016-06-02  8:21     ` Aneesh Kumar K.V
     [not found] <001701d1ba44$b9c0d560$2d428020$@alibaba-inc.com>
2016-05-30  8:07 ` [RFC PATCH 2/4] mm: Change the interface for __tlb_remove_page Hillf Danton
2016-05-30  8:07   ` Hillf Danton
2016-05-30 15:34   ` Aneesh Kumar K.V
2016-05-30 15:34     ` Aneesh Kumar K.V
2016-05-31  3:52     ` Hillf Danton
2016-05-31  3:52       ` Hillf Danton
2016-05-31  6:50       ` Aneesh Kumar K.V
2016-05-31  6:50         ` Aneesh Kumar K.V
2016-05-31  7:26         ` Hillf Danton
2016-05-31  7:26           ` Hillf Danton

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.