linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Qi Zheng <zhengqi.arch@bytedance.com>
To: akpm@linux-foundation.org, tglx@linutronix.de,
	hannes@cmpxchg.org, mhocko@kernel.org, vdavydov.dev@gmail.com
Cc: linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, songmuchun@bytedance.com,
	Qi Zheng <zhengqi.arch@bytedance.com>
Subject: [PATCH 7/7] mm: use mmu_gather to free PTE page table
Date: Sun, 18 Jul 2021 12:30:33 +0800	[thread overview]
Message-ID: <20210718043034.76431-8-zhengqi.arch@bytedance.com> (raw)
In-Reply-To: <20210718043034.76431-1-zhengqi.arch@bytedance.com>

In unmap_region() and other paths, we can reuse @tlb to
free PTE page table, which can reduce the number of tlb
flush.

Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
---
 arch/x86/Kconfig        |  2 +-
 include/linux/pte_ref.h | 32 +++++++++++++++++++++++++++-----
 mm/madvise.c            |  4 ++--
 mm/memory.c             |  4 ++--
 mm/mmu_gather.c         | 40 +++++++++++++++++-----------------------
 mm/pte_ref.c            | 12 +++++++++---
 6 files changed, 58 insertions(+), 36 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 761ad0830dd6..1f873c2d5f51 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -233,7 +233,7 @@ config X86
 	select HAVE_PCI
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
-	select MMU_GATHER_RCU_TABLE_FREE		if PARAVIRT
+	select MMU_GATHER_RCU_TABLE_FREE		if PARAVIRT || FREE_USER_PTE
 	select HAVE_POSIX_CPU_TIMERS_TASK_WORK
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_RELIABLE_STACKTRACE		if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
diff --git a/include/linux/pte_ref.h b/include/linux/pte_ref.h
index f4d20faab684..bf6cd09b67d7 100644
--- a/include/linux/pte_ref.h
+++ b/include/linux/pte_ref.h
@@ -18,7 +18,8 @@ int __pte_alloc_try_get(struct mm_struct *mm, pmd_t *pmd);
 int __pte_alloc_get(struct mm_struct *mm, pmd_t *pmd);
 
 #ifdef CONFIG_FREE_USER_PTE
-void free_pte_table(struct mm_struct *mm, pmd_t *pmdp, unsigned long addr);
+void free_pte_table(struct mmu_gather *tlb, struct mm_struct *mm, pmd_t *pmdp,
+		    unsigned long addr);
 
 static inline void pte_ref_init(pgtable_t pte, pmd_t *pmd, int count)
 {
@@ -62,7 +63,6 @@ static inline bool pte_get_unless_zero(pmd_t *pmdp)
 {
 	pgtable_t pte = pmd_pgtable(*pmdp);
 
-	VM_BUG_ON(!PageTable(pte));
 	return atomic_inc_not_zero(&pte->pte_refcount);
 }
 
@@ -97,8 +97,8 @@ static inline bool pte_try_get(pmd_t *pmdp)
  * i_mmap_lock or when parallel threads are excluded by other means
  * which can make @pmdp entry stable.
  */
-static inline void pte_put_many(struct mm_struct *mm, pmd_t *pmdp,
-				unsigned long addr, unsigned int nr)
+static inline void pte_put_many_tlb(struct mmu_gather *tlb, struct mm_struct *mm,
+				    pmd_t *pmdp, unsigned long addr, unsigned int nr)
 {
 	pgtable_t pte = pmd_pgtable(*pmdp);
 
@@ -106,7 +106,19 @@ static inline void pte_put_many(struct mm_struct *mm, pmd_t *pmdp,
 	VM_BUG_ON(pmd_devmap_trans_unstable(pmdp));
 	VM_BUG_ON(pte->pmd != pmdp);
 	if (atomic_sub_and_test(nr, &pte->pte_refcount))
-		free_pte_table(mm, pmdp, addr & PMD_MASK);
+		free_pte_table(tlb, mm, pmdp, addr & PMD_MASK);
+}
+
+static inline void pte_put_tlb(struct mmu_gather *tlb, struct mm_struct *mm,
+			       pmd_t *pmdp, unsigned long addr)
+{
+	pte_put_many_tlb(tlb, mm, pmdp, addr, 1);
+}
+
+static inline void pte_put_many(struct mm_struct *mm, pmd_t *pmdp,
+				unsigned long addr, unsigned int nr)
+{
+	pte_put_many_tlb(NULL, mm, pmdp, addr, nr);
 }
 
 static inline void pte_put(struct mm_struct *mm, pmd_t *pmdp, unsigned long addr)
@@ -180,6 +192,16 @@ static inline bool pte_try_get(pmd_t *pmdp)
 	return true;
 }
 
+static inline void pte_put_many_tlb(struct mmu_gather *tlb, struct mm_struct *mm,
+				    pmd_t *pmdp, unsigned long addr, unsigned int nr)
+{
+}
+
+static inline void pte_put_tlb(struct mmu_gather *tlb, struct mm_struct *mm,
+			       pmd_t *pmdp, unsigned long addr)
+{
+}
+
 static inline void pte_put_many(struct mm_struct *mm, pmd_t *pmdp,
 				unsigned long addr, unsigned int value)
 {
diff --git a/mm/madvise.c b/mm/madvise.c
index 0e849bbf268b..117bc5aad0f8 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -477,7 +477,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 
 	arch_leave_lazy_mmu_mode();
 	pte_unmap_unlock(orig_pte, ptl);
-	pte_put(vma->vm_mm, pmd, start);
+	pte_put_tlb(tlb, vma->vm_mm, pmd, start);
 	if (pageout)
 		reclaim_pages(&page_list);
 	cond_resched();
@@ -709,7 +709,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
 	arch_leave_lazy_mmu_mode();
 	pte_unmap_unlock(orig_pte, ptl);
 	if (nr_put)
-		pte_put_many(mm, pmd, start, nr_put);
+		pte_put_many_tlb(tlb, mm, pmd, start, nr_put);
 	cond_resched();
 next:
 	return 0;
diff --git a/mm/memory.c b/mm/memory.c
index c8ee0074c730..4f49e8135000 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1438,7 +1438,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 	}
 
 	if (nr_put)
-		pte_put_many(mm, pmd, start, nr_put);
+		pte_put_many_tlb(tlb, mm, pmd, start, nr_put);
 
 	return addr;
 }
@@ -1485,7 +1485,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
 			goto next;
 
 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
-		pte_put(tlb->mm, pmd, addr);
+		pte_put_tlb(tlb, tlb->mm, pmd, addr);
 next:
 		cond_resched();
 	} while (pmd++, addr = next, addr != end);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 1b9837419bf9..1bd9fa889421 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -134,42 +134,42 @@ static void __tlb_remove_table_free(struct mmu_table_batch *batch)
  *
  */
 
-static void tlb_remove_table_smp_sync(void *arg)
+static void tlb_remove_table_rcu(struct rcu_head *head)
 {
-	/* Simply deliver the interrupt */
+	__tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
 }
 
-static void tlb_remove_table_sync_one(void)
+static void tlb_remove_table_free(struct mmu_table_batch *batch)
 {
-	/*
-	 * This isn't an RCU grace period and hence the page-tables cannot be
-	 * assumed to be actually RCU-freed.
-	 *
-	 * It is however sufficient for software page-table walkers that rely on
-	 * IRQ disabling.
-	 */
-	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+	call_rcu(&batch->rcu, tlb_remove_table_rcu);
 }
 
-static void tlb_remove_table_rcu(struct rcu_head *head)
+static void tlb_remove_table_one_rcu(struct rcu_head *head)
 {
-	__tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
+	struct page *page = container_of(head, struct page, rcu_head);
+
+	__tlb_remove_table(page);
 }
 
-static void tlb_remove_table_free(struct mmu_table_batch *batch)
+static void tlb_remove_table_one(void *table)
 {
-	call_rcu(&batch->rcu, tlb_remove_table_rcu);
+	pgtable_t page = (pgtable_t)table;
+
+	call_rcu(&page->rcu_head, tlb_remove_table_one_rcu);
 }
 
 #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
 
-static void tlb_remove_table_sync_one(void) { }
-
 static void tlb_remove_table_free(struct mmu_table_batch *batch)
 {
 	__tlb_remove_table_free(batch);
 }
 
+static void tlb_remove_table_one(void *table)
+{
+	__tlb_remove_table(table);
+}
+
 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
 
 /*
@@ -187,12 +187,6 @@ static inline void tlb_table_invalidate(struct mmu_gather *tlb)
 	}
 }
 
-static void tlb_remove_table_one(void *table)
-{
-	tlb_remove_table_sync_one();
-	__tlb_remove_table(table);
-}
-
 static void tlb_table_flush(struct mmu_gather *tlb)
 {
 	struct mmu_table_batch **batch = &tlb->batch;
diff --git a/mm/pte_ref.c b/mm/pte_ref.c
index 7fd3d687a9cd..92fb73c35d81 100644
--- a/mm/pte_ref.c
+++ b/mm/pte_ref.c
@@ -10,6 +10,7 @@
 #include <linux/pte_ref.h>
 #include <linux/hugetlb.h>
 #include <asm/tlbflush.h>
+#include <asm/tlb.h>
 
 #ifdef CONFIG_DEBUG_VM
 static void pte_free_debug(pmd_t pmd)
@@ -34,7 +35,8 @@ static void pte_free_rcu(struct rcu_head *rcu)
 	__free_page(page);
 }
 
-void free_pte_table(struct mm_struct *mm, pmd_t *pmdp, unsigned long addr)
+void free_pte_table(struct mmu_gather *tlb, struct mm_struct *mm,
+		    pmd_t *pmdp, unsigned long addr)
 {
 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
 	spinlock_t *ptl;
@@ -45,9 +47,13 @@ void free_pte_table(struct mm_struct *mm, pmd_t *pmdp, unsigned long addr)
 	spin_unlock(ptl);
 
 	pte_free_debug(pmd);
-	flush_tlb_range(&vma, addr, addr + PMD_SIZE);
+	if (!tlb) {
+		flush_tlb_range(&vma, addr, addr + PMD_SIZE);
+		call_rcu(&pmd_pgtable(pmd)->rcu_head, pte_free_rcu);
+	} else {
+		pte_free_tlb(tlb, pmd_pgtable(pmd), addr);
+	}
 	mm_dec_nr_ptes(mm);
-	call_rcu(&pmd_pgtable(pmd)->rcu_head, pte_free_rcu);
 }
 
 static inline void __pte_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
-- 
2.11.0



  parent reply	other threads:[~2021-07-18  4:31 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-18  4:30 [PATCH 0/7] Free user PTE page table pages Qi Zheng
2021-07-18  4:30 ` [PATCH 1/7] mm: fix the deadlock in finish_fault() Qi Zheng
2021-07-18 21:28   ` Kirill A. Shutemov
2021-07-19  9:53     ` Qi Zheng
2021-07-20 23:14       ` Andrew Morton
2021-07-21  2:21         ` Qi Zheng
2021-07-18  4:30 ` [PATCH 2/7] mm: introduce pte_install() helper Qi Zheng
2021-07-18 21:31   ` Kirill A. Shutemov
2021-07-19 10:20     ` Qi Zheng
2021-07-18  4:30 ` [PATCH 3/7] mm: remove redundant smp_wmb() Qi Zheng
2021-07-27 13:39   ` Muchun Song
2021-07-18  4:30 ` [PATCH 4/7] mm: rework the parameter of lock_page_or_retry() Qi Zheng
2021-07-18  4:30 ` [PATCH 5/7] mm: free user PTE page table pages Qi Zheng
2021-07-18  6:19   ` Mika Penttilä
2021-07-19 12:56     ` Qi Zheng
2021-07-19 13:55       ` Mika Penttilä
2021-07-19 14:12         ` Qi Zheng
2021-07-19 14:17           ` Mika Penttilä
2021-07-18 22:01   ` Kirill A. Shutemov
2021-07-19 13:55     ` Qi Zheng
2021-07-18  4:30 ` [PATCH 6/7] mm: defer freeing PTE page table for a grace period Qi Zheng
2021-07-18  4:30 ` Qi Zheng [this message]
2021-07-19  7:34 ` [PATCH 0/7] Free user PTE page table pages David Hildenbrand
2021-07-19 11:28   ` David Hildenbrand
2021-07-19 12:42     ` Muchun Song
2021-07-19 13:30       ` Muchun Song
2021-07-20  4:00     ` Qi Zheng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210718043034.76431-8-zhengqi.arch@bytedance.com \
    --to=zhengqi.arch@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=songmuchun@bytedance.com \
    --cc=tglx@linutronix.de \
    --cc=vdavydov.dev@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).