All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Zach O'Keefe" <zokeefe@google.com>
To: Alex Shi <alex.shi@linux.alibaba.com>,
	David Hildenbrand <david@redhat.com>,
	 David Rientjes <rientjes@google.com>,
	Matthew Wilcox <willy@infradead.org>,
	 Michal Hocko <mhocko@suse.com>,
	Pasha Tatashin <pasha.tatashin@soleen.com>,
	 Peter Xu <peterx@redhat.com>, SeongJae Park <sj@kernel.org>,
	Song Liu <songliubraving@fb.com>,
	 Vlastimil Babka <vbabka@suse.cz>, Yang Shi <shy828301@gmail.com>,
	Zi Yan <ziy@nvidia.com>,
	 linux-mm@kvack.org
Cc: Andrea Arcangeli <aarcange@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	 Arnd Bergmann <arnd@arndb.de>,
	Axel Rasmussen <axelrasmussen@google.com>,
	 Chris Kennelly <ckennelly@google.com>,
	Chris Zankel <chris@zankel.net>, Helge Deller <deller@gmx.de>,
	 Hugh Dickins <hughd@google.com>,
	Ivan Kokshaysky <ink@jurassic.park.msu.ru>,
	 "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Jens Axboe <axboe@kernel.dk>,
	 "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Matt Turner <mattst88@gmail.com>,
	 Max Filippov <jcmvbkbc@gmail.com>,
	Miaohe Lin <linmiaohe@huawei.com>,
	 Minchan Kim <minchan@kernel.org>,
	Patrick Xia <patrickx@google.com>,
	 Pavel Begunkov <asml.silence@gmail.com>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	 "Zach O'Keefe" <zokeefe@google.com>
Subject: [PATCH v4 09/13] mm/khugepaged: rename prefix of shared collapse functions
Date: Mon,  2 May 2022 11:17:10 -0700	[thread overview]
Message-ID: <20220502181714.3483177-10-zokeefe@google.com> (raw)
In-Reply-To: <20220502181714.3483177-1-zokeefe@google.com>

The following functions/tracepoints are shared between khugepaged and
madvise collapse contexts.  Replace the "khugepaged_" prefixe with
generic "hpage_collapse_" prefix in such cases:

huge_memory:mm_khugepaged_scan_pmd -> huge_memory:mm_hpage_collapse_scan_pmd
khugepaged_test_exit() -> hpage_collapse_test_exit()
khugepaged_scan_abort() -> hpage_collapse_scan_abort()
khugepaged_scan_pmd() -> hpage_collapse_scan_pmd()
khugepaged_find_target_node() -> hpage_collapse_find_target_node()

Signed-off-by: Zach O'Keefe <zokeefe@google.com>
---
 include/trace/events/huge_memory.h |  2 +-
 mm/khugepaged.c                    | 72 ++++++++++++++++--------------
 2 files changed, 39 insertions(+), 35 deletions(-)

diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 55392bf30a03..fb6c73632ff3 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -48,7 +48,7 @@ SCAN_STATUS
 #define EM(a, b)	{a, b},
 #define EMe(a, b)	{a, b}
 
-TRACE_EVENT(mm_khugepaged_scan_pmd,
+TRACE_EVENT(mm_hpage_collapse_scan_pmd,
 
 	TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
 		 int referenced, int none_or_zero, int status, int unmapped),
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 3ba2c570da5e..9f1b7e9e78c2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -96,7 +96,7 @@ struct collapse_control {
 	/* Num pages scanned per node */
 	int node_load[MAX_NUMNODES];
 
-	/* Last target selected in khugepaged_find_target_node() */
+	/* Last target selected in hpage_collapse_find_target_node() */
 	int last_target_node;
 
 	struct page *hpage;
@@ -453,7 +453,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 }
 
-static inline int khugepaged_test_exit(struct mm_struct *mm)
+static inline int hpage_collapse_test_exit(struct mm_struct *mm)
 {
 	return atomic_read(&mm->mm_users) == 0;
 }
@@ -502,7 +502,7 @@ int __khugepaged_enter(struct mm_struct *mm)
 		return -ENOMEM;
 
 	/* __khugepaged_exit() must not run from under us */
-	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
+	VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 		free_mm_slot(mm_slot);
 		return 0;
@@ -566,11 +566,10 @@ void __khugepaged_exit(struct mm_struct *mm)
 	} else if (mm_slot) {
 		/*
 		 * This is required to serialize against
-		 * khugepaged_test_exit() (which is guaranteed to run
-		 * under mmap sem read mode). Stop here (after we
-		 * return all pagetables will be destroyed) until
-		 * khugepaged has finished working on the pagetables
-		 * under the mmap_lock.
+		 * hpage_collapse_test_exit() (which is guaranteed to run
+		 * under mmap sem read mode). Stop here (after we return all
+		 * pagetables will be destroyed) until khugepaged has finished
+		 * working on the pagetables under the mmap_lock.
 		 */
 		mmap_write_lock(mm);
 		mmap_write_unlock(mm);
@@ -807,7 +806,7 @@ static void khugepaged_alloc_sleep(void)
 	remove_wait_queue(&khugepaged_wait, &wait);
 }
 
-static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
+static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
 {
 	int i;
 
@@ -854,7 +853,7 @@ static bool alloc_hpage(gfp_t gfp, int node, struct collapse_control *cc)
 }
 
 #ifdef CONFIG_NUMA
-static int khugepaged_find_target_node(struct collapse_control *cc)
+static int hpage_collapse_find_target_node(struct collapse_control *cc)
 {
 	int nid, target_node = 0, max_value = 0;
 
@@ -901,7 +900,7 @@ static bool khugepaged_alloc_page(gfp_t gfp, int node,
 	return alloc_hpage(gfp, node, cc);
 }
 #else
-static int khugepaged_find_target_node(struct collapse_control *cc)
+static int hpage_collapse_find_target_node(struct collapse_control *cc)
 {
 	return 0;
 }
@@ -981,7 +980,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 	struct vm_area_struct *vma;
 	unsigned long hstart, hend;
 
-	if (unlikely(khugepaged_test_exit(mm)))
+	if (unlikely(hpage_collapse_test_exit(mm)))
 		return SCAN_ANY_PROCESS;
 
 	*vmap = vma = find_vma(mm, address);
@@ -1025,7 +1024,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
 
 /*
  * Bring missing pages in from swap, to complete THP collapse.
- * Only done if khugepaged_scan_pmd believes it is worthwhile.
+ * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
  *
  * Called and returns without pte mapped or spinlocks held,
  * but with mmap_lock held to protect against vma changes.
@@ -1092,7 +1091,7 @@ static int alloc_charge_hpage(struct mm_struct *mm, struct collapse_control *cc)
 	const struct cpumask *cpumask;
 #endif
 	gfp_t gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
-	int node = khugepaged_find_target_node(cc);
+	int node = hpage_collapse_find_target_node(cc);
 
 #ifdef CONFIG_NUMA
 	/* sched to specified node before huge page memory copy */
@@ -1262,9 +1261,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	return result;
 }
 
-static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
-			       unsigned long address, bool *mmap_locked,
-			       struct collapse_control *cc)
+static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+				   struct vm_area_struct *vma,
+				   unsigned long address, bool *mmap_locked,
+				   struct collapse_control *cc)
 {
 	pmd_t *pmd;
 	pte_t *pte, *_pte;
@@ -1358,7 +1358,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 		 * hit record.
 		 */
 		node = page_to_nid(page);
-		if (khugepaged_scan_abort(node, cc)) {
+		if (hpage_collapse_scan_abort(node, cc)) {
 			result = SCAN_SCAN_ABORT;
 			goto out_unmap;
 		}
@@ -1423,8 +1423,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 					    unmapped, cc);
 	}
 out:
-	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
-				     none_or_zero, result, unmapped);
+	trace_mm_hpage_collapse_scan_pmd(mm, page, writable, referenced,
+					 none_or_zero, result, unmapped);
 	return result;
 }
 
@@ -1434,7 +1434,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 
 	lockdep_assert_held(&khugepaged_mm_lock);
 
-	if (khugepaged_test_exit(mm)) {
+	if (hpage_collapse_test_exit(mm)) {
 		/* free mm_slot */
 		hash_del(&mm_slot->hash);
 		list_del(&mm_slot->mm_node);
@@ -1605,7 +1605,7 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
 	if (!mmap_write_trylock(mm))
 		return;
 
-	if (unlikely(khugepaged_test_exit(mm)))
+	if (unlikely(hpage_collapse_test_exit(mm)))
 		goto out;
 
 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
@@ -1668,7 +1668,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
 			 * it'll always mapped in small page size for uffd-wp
 			 * registered ranges.
 			 */
-			if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
+			if (!hpage_collapse_test_exit(mm) &&
+			    !userfaultfd_wp(vma))
 				collapse_and_free_pmd(mm, vma, addr, pmd);
 			mmap_write_unlock(mm);
 		} else {
@@ -2094,7 +2095,7 @@ static int khugepaged_scan_file(struct mm_struct *mm,
 		}
 
 		node = page_to_nid(page);
-		if (khugepaged_scan_abort(node, cc)) {
+		if (hpage_collapse_scan_abort(node, cc)) {
 			result = SCAN_SCAN_ABORT;
 			break;
 		}
@@ -2183,7 +2184,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 	vma = NULL;
 	if (unlikely(!mmap_read_trylock(mm)))
 		goto breakouterloop_mmap_lock;
-	if (likely(!khugepaged_test_exit(mm)))
+	if (likely(!hpage_collapse_test_exit(mm)))
 		vma = find_vma(mm, khugepaged_scan.address);
 
 	progress++;
@@ -2191,7 +2192,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 		unsigned long hstart, hend;
 
 		cond_resched();
-		if (unlikely(khugepaged_test_exit(mm))) {
+		if (unlikely(hpage_collapse_test_exit(mm))) {
 			progress++;
 			break;
 		}
@@ -2217,7 +2218,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 			bool mmap_locked;
 
 			cond_resched();
-			if (unlikely(khugepaged_test_exit(mm)))
+			if (unlikely(hpage_collapse_test_exit(mm)))
 				goto breakouterloop;
 
 			VM_BUG_ON(khugepaged_scan.address < hstart ||
@@ -2234,9 +2235,10 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 							      cc);
 				fput(file);
 			} else {
-				result = khugepaged_scan_pmd(mm, vma,
-							     khugepaged_scan.address,
-							     &mmap_locked, cc);
+				result = hpage_collapse_scan_pmd(mm, vma,
+								 khugepaged_scan.address,
+								 &mmap_locked,
+								 cc);
 			}
 			if (result == SCAN_SUCCEED)
 				++khugepaged_pages_collapsed;
@@ -2260,7 +2262,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 	 * Release the current mm_slot if this mm is about to die, or
 	 * if we scanned all vmas of this mm.
 	 */
-	if (khugepaged_test_exit(mm) || !vma) {
+	if (hpage_collapse_test_exit(mm) || !vma) {
 		/*
 		 * Make sure that if mm_users is reaching zero while
 		 * khugepaged runs here, khugepaged_exit will find
@@ -2500,7 +2502,8 @@ static int madvise_collapse_errno(enum scan_result r)
 static int madvise_alloc_charge_hpage(struct mm_struct *mm,
 				      struct collapse_control *cc)
 {
-	if (!alloc_hpage(GFP_TRANSHUGE, khugepaged_find_target_node(cc), cc))
+	if (!alloc_hpage(GFP_TRANSHUGE, hpage_collapse_find_target_node(cc),
+			 cc))
 		return SCAN_ALLOC_HUGE_PAGE_FAIL;
 	if (unlikely(mem_cgroup_charge(page_folio(cc->hpage), mm,
 				       GFP_TRANSHUGE)))
@@ -2547,13 +2550,14 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
 		cond_resched();
 		result = SCAN_FAIL;
 
-		if (unlikely(khugepaged_test_exit(mm))) {
+		if (unlikely(hpage_collapse_test_exit(mm))) {
 			result = SCAN_ANY_PROCESS;
 			break;
 		}
 
 		memset(cc.node_load, 0, sizeof(cc.node_load));
-		result = khugepaged_scan_pmd(mm, vma, addr, &mmap_locked, &cc);
+		result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked,
+						 &cc);
 		if (!mmap_locked)
 			*prev = NULL;  /* tell madvise we dropped mmap_lock */
 
-- 
2.36.0.464.gb9c8b46e94-goog



  parent reply	other threads:[~2022-05-02 18:17 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-02 18:17 [PATCH v4 00/12] mm: userspace hugepage collapse Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 01/13] mm/khugepaged: record SCAN_PMD_MAPPED when scan_pmd() finds THP Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 02/13] mm/khugepaged: add struct collapse_control Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 03/13] mm/khugepaged: dedup and simplify hugepage alloc and charging Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 04/13] mm/khugepaged: make hugepage allocation context-specific Zach O'Keefe
2022-05-03  3:38   ` kernel test robot
2022-05-03  6:30   ` kernel test robot
2022-05-04 21:45     ` Zach O'Keefe
2022-05-04 21:45       ` Zach O'Keefe
2022-05-04  2:25   ` [mm/khugepaged] 0d006aeaf9: BUG:unable_to_handle_page_fault_for_address kernel test robot
2022-05-04  2:25     ` kernel test robot
2022-05-04 21:46     ` Zach O'Keefe
2022-05-04 21:46       ` Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 05/13] mm/khugepaged: pipe enum scan_result codes back to callers Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 06/13] mm/khugepaged: add flag to ignore khugepaged_max_ptes_* Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 07/13] mm/khugepaged: add flag to ignore page young/referenced requirement Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 08/13] mm/madvise: introduce MADV_COLLAPSE sync hugepage collapse Zach O'Keefe
2022-05-03  7:21   ` kernel test robot
2022-05-04 21:46     ` Zach O'Keefe
2022-05-04 21:46       ` Zach O'Keefe
2022-05-02 18:17 ` Zach O'Keefe [this message]
2022-05-02 18:17 ` [PATCH v4 10/13] mm/madvise: add MADV_COLLAPSE to process_madvise() Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 11/13] selftests/vm: modularize collapse selftests Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 12/13] selftests/vm: add MADV_COLLAPSE collapse context to selftests Zach O'Keefe
2022-05-02 18:17 ` [PATCH v4 13/13] selftests/vm: add test to verify recollapse of THPs Zach O'Keefe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220502181714.3483177-10-zokeefe@google.com \
    --to=zokeefe@google.com \
    --cc=James.Bottomley@HansenPartnership.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=alex.shi@linux.alibaba.com \
    --cc=arnd@arndb.de \
    --cc=asml.silence@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=axelrasmussen@google.com \
    --cc=chris@zankel.net \
    --cc=ckennelly@google.com \
    --cc=david@redhat.com \
    --cc=deller@gmx.de \
    --cc=hughd@google.com \
    --cc=ink@jurassic.park.msu.ru \
    --cc=jcmvbkbc@gmail.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linmiaohe@huawei.com \
    --cc=linux-mm@kvack.org \
    --cc=mattst88@gmail.com \
    --cc=mhocko@suse.com \
    --cc=minchan@kernel.org \
    --cc=pasha.tatashin@soleen.com \
    --cc=patrickx@google.com \
    --cc=peterx@redhat.com \
    --cc=rientjes@google.com \
    --cc=shy828301@gmail.com \
    --cc=sj@kernel.org \
    --cc=songliubraving@fb.com \
    --cc=tsbogend@alpha.franken.de \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.