All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-mm@kvack.org, Hugh Dickins <hughd@google.com>
Subject: [PATCH 24/28] mm: Move page->deferred_list to folio->_deferred_list
Date: Wed, 11 Jan 2023 14:29:10 +0000	[thread overview]
Message-ID: <20230111142915.1001531-25-willy@infradead.org> (raw)
In-Reply-To: <20230111142915.1001531-1-willy@infradead.org>

Remove the entire block of definitions for the second tail page,
and add the deferred list to the struct folio.  This actually moves
_deferred_list to a different offset in struct folio because I don't
see a need to include the padding.

This lets us use list_for_each_entry_safe() in deferred_split_scan()
and avoid a number of calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/huge_mm.h  |  9 ++++-----
 include/linux/mm_types.h | 14 ++++++++------
 mm/huge_memory.c         | 32 +++++++++++++++-----------------
 3 files changed, 27 insertions(+), 28 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a1341fdcf666..aacfcb02606f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -295,11 +295,10 @@ static inline bool thp_migration_supported(void)
 
 static inline struct list_head *page_deferred_list(struct page *page)
 {
-	/*
-	 * See organization of tail pages of compound page in
-	 * "struct page" definition.
-	 */
-	return &page[2].deferred_list;
+	struct folio *folio = (struct folio *)page;
+
+	VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
+	return &folio->_deferred_list;
 }
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4b8aa0f8f9fe..c464205cf7ea 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -141,12 +141,6 @@ struct page {
 		struct {	/* Tail pages of compound page */
 			unsigned long compound_head;	/* Bit zero is set */
 		};
-		struct {	/* Second tail page of transparent huge page */
-			unsigned long _compound_pad_1;	/* compound_head */
-			unsigned long _compound_pad_2;
-			/* For both global and memcg */
-			struct list_head deferred_list;
-		};
 		struct {	/* Second tail page of hugetlb page */
 			unsigned long _hugetlb_pad_1;	/* compound_head */
 			void *hugetlb_subpool;
@@ -302,6 +296,7 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
  * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
  * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
  * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
+ * @_deferred_list: Folios to be split under memory pressure.
  *
  * A folio is a physically, virtually and logically contiguous set
  * of bytes.  It is a power-of-two in size, and it is aligned to that
@@ -366,6 +361,13 @@ struct folio {
 			void *_hugetlb_cgroup;
 			void *_hugetlb_cgroup_rsvd;
 			void *_hugetlb_hwpoison;
+	/* private: the union with struct page is transitional */
+		};
+		struct {
+			unsigned long _flags_2a;
+			unsigned long _head_2a;
+	/* public: */
+			struct list_head _deferred_list;
 	/* private: the union with struct page is transitional */
 		};
 		struct page __page_2;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bfa960f012fa..a4138daaa0b8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2756,9 +2756,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 	/* Prevent deferred_split_scan() touching ->_refcount */
 	spin_lock(&ds_queue->split_queue_lock);
 	if (folio_ref_freeze(folio, 1 + extra_pins)) {
-		if (!list_empty(page_deferred_list(&folio->page))) {
+		if (!list_empty(&folio->_deferred_list)) {
 			ds_queue->split_queue_len--;
-			list_del(page_deferred_list(&folio->page));
+			list_del(&folio->_deferred_list);
 		}
 		spin_unlock(&ds_queue->split_queue_lock);
 		if (mapping) {
@@ -2873,8 +2873,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
 	struct pglist_data *pgdata = NODE_DATA(sc->nid);
 	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
 	unsigned long flags;
-	LIST_HEAD(list), *pos, *next;
-	struct page *page;
+	LIST_HEAD(list);
+	struct folio *folio, *next;
 	int split = 0;
 
 #ifdef CONFIG_MEMCG
@@ -2884,14 +2884,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
 
 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
 	/* Take pin on all head pages to avoid freeing them under us */
-	list_for_each_safe(pos, next, &ds_queue->split_queue) {
-		page = list_entry((void *)pos, struct page, deferred_list);
-		page = compound_head(page);
-		if (get_page_unless_zero(page)) {
-			list_move(page_deferred_list(page), &list);
+	list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
+							_deferred_list) {
+		if (folio_try_get(folio)) {
+			list_move(&folio->_deferred_list, &list);
 		} else {
-			/* We lost race with put_compound_page() */
-			list_del_init(page_deferred_list(page));
+			/* We lost race with folio_put() */
+			list_del_init(&folio->_deferred_list);
 			ds_queue->split_queue_len--;
 		}
 		if (!--sc->nr_to_scan)
@@ -2899,16 +2898,15 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
 	}
 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
 
-	list_for_each_safe(pos, next, &list) {
-		page = list_entry((void *)pos, struct page, deferred_list);
-		if (!trylock_page(page))
+	list_for_each_entry_safe(folio, next, &list, _deferred_list) {
+		if (!folio_trylock(folio))
 			goto next;
 		/* split_huge_page() removes page from list on success */
-		if (!split_huge_page(page))
+		if (!split_folio(folio))
 			split++;
-		unlock_page(page);
+		folio_unlock(folio);
 next:
-		put_page(page);
+		folio_put(folio);
 	}
 
 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
-- 
2.35.1



  parent reply	other threads:[~2023-01-11 14:29 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-11 14:28 [PATCH 00/28] Get rid of tail page fields Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 01/28] mm: Remove folio_pincount_ptr() and head_compound_pincount() Matthew Wilcox (Oracle)
2023-01-12  3:05   ` John Hubbard
2023-01-12 12:40     ` Matthew Wilcox
2023-01-11 14:28 ` [PATCH 02/28] mm: Convert head_subpages_mapcount() into folio_nr_pages_mapped() Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 03/28] doc: Clarify refcount section by referring to folios & pages Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 04/28] mm: Convert total_compound_mapcount() to folio_total_mapcount() Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 05/28] mm: Convert page_remove_rmap() to use a folio internally Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 06/28] mm: Convert page_add_anon_rmap() " Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 07/28] mm: Convert page_add_file_rmap() " Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 08/28] mm: Add folio_add_new_anon_rmap() Matthew Wilcox (Oracle)
2023-01-12  1:06   ` kernel test robot
2023-01-11 14:28 ` [PATCH 09/28] page_alloc: Use folio fields directly Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 10/28] mm: Use a folio in hugepage_add_anon_rmap() and hugepage_add_new_anon_rmap() Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 11/28] mm: Use entire_mapcount in __page_dup_rmap() Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 12/28] mm/debug: Remove call to head_compound_mapcount() Matthew Wilcox (Oracle)
2023-01-11 14:28 ` [PATCH 13/28] hugetlb: Remove uses of folio_mapcount_ptr Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 14/28] mm: Convert page_mapcount() to use folio_entire_mapcount() Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 15/28] mm: Remove head_compound_mapcount() and _ptr functions Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 16/28] mm: Reimplement compound_order() Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 17/28] mm: Reimplement compound_nr() Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 18/28] mm: Convert set_compound_page_dtor() and set_compound_order() to folios Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 19/28] mm: Convert is_transparent_hugepage() to use a folio Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 20/28] mm: Convert destroy_large_folio() to use folio_dtor Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 21/28] hugetlb: Remove uses of compound_dtor and compound_nr Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 22/28] mm: Remove 'First tail page' members from struct page Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 23/28] doc: Correct struct folio kernel-doc Matthew Wilcox (Oracle)
2023-01-11 14:29 ` Matthew Wilcox (Oracle) [this message]
2023-01-11 14:29 ` [PATCH 25/28] mm/huge_memory: Remove page_deferred_list() Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 26/28] mm/huge_memory: Convert get_deferred_split_queue() to take a folio Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 27/28] mm: Convert deferred_split_huge_page() to deferred_split_folio() Matthew Wilcox (Oracle)
2023-01-11 14:29 ` [PATCH 28/28] mm: remove the hugetlb field from struct page Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230111142915.1001531-25-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=hughd@google.com \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.