linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Ryan Roberts <ryan.roberts@arm.com>
To: Zi Yan <ziy@nvidia.com>
Cc: "Pankaj Raghav (Samsung)" <kernel@pankajraghav.com>,
	linux-mm@kvack.org,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	"David Hildenbrand" <david@redhat.com>,
	"Yang Shi" <shy828301@gmail.com>, "Yu Zhao" <yuzhao@google.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	"Michal Koutný" <mkoutny@suse.com>,
	"Roman Gushchin" <roman.gushchin@linux.dev>,
	"Zach O'Keefe" <zokeefe@google.com>,
	"Hugh Dickins" <hughd@google.com>,
	"Luis Chamberlain" <mcgrof@kernel.org>,
	"Andrew Morton" <akpm@linux-foundation.org>,
	linux-kernel@vger.kernel.org, cgroups@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-kselftest@vger.kernel.org
Subject: Re: [PATCH v5 7/8] mm: thp: split huge page to any lower order pages
Date: Wed, 28 Feb 2024 15:44:44 +0000	[thread overview]
Message-ID: <408df79a-130e-43cd-a21a-9b3a2ddef617@arm.com> (raw)
In-Reply-To: <BD595F60-38C4-42B1-8EB9-9AE5B413C3F9@nvidia.com>

On 28/02/2024 15:42, Zi Yan wrote:
> On 28 Feb 2024, at 3:23, Ryan Roberts wrote:
> 
>> Hi Zi,
>>
>>
>> On 26/02/2024 20:55, Zi Yan wrote:
>>> From: Zi Yan <ziy@nvidia.com>
>>>
>>> To split a THP to any lower order pages, we need to reform THPs on
>>> subpages at given order and add page refcount based on the new page
>>> order. Also we need to reinitialize page_deferred_list after removing
>>> the page from the split_queue, otherwise a subsequent split will
>>> see list corruption when checking the page_deferred_list again.
>>>
>>> Note: Anonymous order-1 folio is not supported because _deferred_list,
>>> which is used by partially mapped folios, is stored in subpage 2 and an
>>> order-1 folio only has subpage 0 and 1. File-backed order-1 folios are
>>> fine, since they do not use _deferred_list.
>>>
>>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>>> ---
>>>  include/linux/huge_mm.h |  21 +++++---
>>>  mm/huge_memory.c        | 110 +++++++++++++++++++++++++++++++---------
>>>  2 files changed, 99 insertions(+), 32 deletions(-)
>>>
>>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>>> index 5adb86af35fc..de0c89105076 100644
>>> --- a/include/linux/huge_mm.h
>>> +++ b/include/linux/huge_mm.h
>>> @@ -265,10 +265,11 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
>>>
>>>  void folio_prep_large_rmappable(struct folio *folio);
>>>  bool can_split_folio(struct folio *folio, int *pextra_pins);
>>> -int split_huge_page_to_list(struct page *page, struct list_head *list);
>>> +int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>>> +		unsigned int new_order);
>>>  static inline int split_huge_page(struct page *page)
>>>  {
>>> -	return split_huge_page_to_list(page, NULL);
>>> +	return split_huge_page_to_list_to_order(page, NULL, 0);
>>>  }
>>>  void deferred_split_folio(struct folio *folio);
>>>
>>> @@ -422,7 +423,8 @@ can_split_folio(struct folio *folio, int *pextra_pins)
>>>  	return false;
>>>  }
>>>  static inline int
>>> -split_huge_page_to_list(struct page *page, struct list_head *list)
>>> +split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>>> +		unsigned int new_order)
>>>  {
>>>  	return 0;
>>>  }
>>> @@ -519,17 +521,20 @@ static inline bool thp_migration_supported(void)
>>>  }
>>>  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>>>
>>> -static inline int split_folio_to_list(struct folio *folio,
>>> -		struct list_head *list)
>>> +static inline int split_folio_to_list_to_order(struct folio *folio,
>>> +		struct list_head *list, int new_order)
>>>  {
>>> -	return split_huge_page_to_list(&folio->page, list);
>>> +	return split_huge_page_to_list_to_order(&folio->page, list, new_order);
>>>  }
>>>
>>> -static inline int split_folio(struct folio *folio)
>>> +static inline int split_folio_to_order(struct folio *folio, int new_order)
>>>  {
>>> -	return split_folio_to_list(folio, NULL);
>>> +	return split_folio_to_list_to_order(folio, NULL, new_order);
>>>  }
>>>
>>> +#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
>>> +#define split_folio(f) split_folio_to_order(f, 0)
>>> +
>>>  /*
>>>   * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
>>>   * limitations in the implementation like arm64 MTE can override this to
>>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>>> index b2df788c11fa..8b47a96a28f9 100644
>>> --- a/mm/huge_memory.c
>>> +++ b/mm/huge_memory.c
>>> @@ -2770,7 +2770,6 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
>>>  		struct lruvec *lruvec, struct list_head *list)
>>>  {
>>>  	VM_BUG_ON_PAGE(!PageHead(head), head);
>>> -	VM_BUG_ON_PAGE(PageCompound(tail), head);
>>>  	VM_BUG_ON_PAGE(PageLRU(tail), head);
>>>  	lockdep_assert_held(&lruvec->lru_lock);
>>>
>>> @@ -2791,7 +2790,8 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
>>>  }
>>>
>>>  static void __split_huge_page_tail(struct folio *folio, int tail,
>>> -		struct lruvec *lruvec, struct list_head *list)
>>> +		struct lruvec *lruvec, struct list_head *list,
>>> +		unsigned int new_order)
>>>  {
>>>  	struct page *head = &folio->page;
>>>  	struct page *page_tail = head + tail;
>>> @@ -2861,10 +2861,15 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
>>>  	 * which needs correct compound_head().
>>>  	 */
>>>  	clear_compound_head(page_tail);
>>> +	if (new_order) {
>>> +		prep_compound_page(page_tail, new_order);
>>> +		folio_prep_large_rmappable(new_folio);
>>> +	}
>>>
>>>  	/* Finally unfreeze refcount. Additional reference from page cache. */
>>> -	page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
>>> -					  folio_test_swapcache(folio)));
>>> +	page_ref_unfreeze(page_tail,
>>> +		1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
>>> +			     folio_nr_pages(new_folio) : 0));
>>>
>>>  	if (folio_test_young(folio))
>>>  		folio_set_young(new_folio);
>>> @@ -2882,7 +2887,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
>>>  }
>>>
>>>  static void __split_huge_page(struct page *page, struct list_head *list,
>>> -		pgoff_t end)
>>> +		pgoff_t end, unsigned int new_order)
>>>  {
>>>  	struct folio *folio = page_folio(page);
>>>  	struct page *head = &folio->page;
>>> @@ -2890,11 +2895,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>>>  	struct address_space *swap_cache = NULL;
>>>  	unsigned long offset = 0;
>>>  	int i, nr_dropped = 0;
>>> +	unsigned int new_nr = 1 << new_order;
>>>  	int order = folio_order(folio);
>>>  	unsigned int nr = 1 << order;
>>>
>>>  	/* complete memcg works before add pages to LRU */
>>> -	split_page_memcg(head, order, 0);
>>> +	split_page_memcg(head, order, new_order);
>>>
>>>  	if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
>>>  		offset = swp_offset(folio->swap);
>>> @@ -2907,8 +2913,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>>>
>>>  	ClearPageHasHWPoisoned(head);
>>>
>>> -	for (i = nr - 1; i >= 1; i--) {
>>> -		__split_huge_page_tail(folio, i, lruvec, list);
>>> +	for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
>>> +		__split_huge_page_tail(folio, i, lruvec, list, new_order);
>>>  		/* Some pages can be beyond EOF: drop them from page cache */
>>>  		if (head[i].index >= end) {
>>>  			struct folio *tail = page_folio(head + i);
>>> @@ -2929,24 +2935,30 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>>>  		}
>>>  	}
>>>
>>> -	ClearPageCompound(head);
>>> +	if (!new_order)
>>> +		ClearPageCompound(head);
>>> +	else {
>>> +		struct folio *new_folio = (struct folio *)head;
>>> +
>>> +		folio_set_order(new_folio, new_order);
>>> +	}
>>>  	unlock_page_lruvec(lruvec);
>>>  	/* Caller disabled irqs, so they are still disabled here */
>>>
>>> -	split_page_owner(head, order, 0);
>>> +	split_page_owner(head, order, new_order);
>>>
>>>  	/* See comment in __split_huge_page_tail() */
>>>  	if (PageAnon(head)) {
>>>  		/* Additional pin to swap cache */
>>>  		if (PageSwapCache(head)) {
>>> -			page_ref_add(head, 2);
>>> +			page_ref_add(head, 1 + new_nr);
>>>  			xa_unlock(&swap_cache->i_pages);
>>>  		} else {
>>>  			page_ref_inc(head);
>>>  		}
>>>  	} else {
>>>  		/* Additional pin to page cache */
>>> -		page_ref_add(head, 2);
>>> +		page_ref_add(head, 1 + new_nr);
>>>  		xa_unlock(&head->mapping->i_pages);
>>>  	}
>>>  	local_irq_enable();
>>> @@ -2958,7 +2970,15 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>>>  	if (folio_test_swapcache(folio))
>>>  		split_swap_cluster(folio->swap);
>>>
>>> -	for (i = 0; i < nr; i++) {
>>> +	/*
>>> +	 * set page to its compound_head when split to non order-0 pages, so
>>> +	 * we can skip unlocking it below, since PG_locked is transferred to
>>> +	 * the compound_head of the page and the caller will unlock it.
>>> +	 */
>>> +	if (new_order)
>>> +		page = compound_head(page);
>>> +
>>> +	for (i = 0; i < nr; i += new_nr) {
>>>  		struct page *subpage = head + i;
>>>  		if (subpage == page)
>>>  			continue;
>>> @@ -2992,29 +3012,36 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
>>>  }
>>>
>>>  /*
>>> - * This function splits huge page into normal pages. @page can point to any
>>> - * subpage of huge page to split. Split doesn't change the position of @page.
>>> + * This function splits huge page into pages in @new_order. @page can point to
>>> + * any subpage of huge page to split. Split doesn't change the position of
>>> + * @page.
>>> + *
>>> + * NOTE: order-1 anonymous folio is not supported because _deferred_list,
>>> + * which is used by partially mapped folios, is stored in subpage 2 and an
>>> + * order-1 folio only has subpage 0 and 1. File-backed order-1 folios are OK,
>>> + * since they do not use _deferred_list.
>>>   *
>>>   * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
>>>   * The huge page must be locked.
>>>   *
>>>   * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
>>>   *
>>> - * Both head page and tail pages will inherit mapping, flags, and so on from
>>> - * the hugepage.
>>> + * Pages in new_order will inherit mapping, flags, and so on from the hugepage.
>>>   *
>>> - * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
>>> - * they are not mapped.
>>> + * GUP pin and PG_locked transferred to @page or the compound page @page belongs
>>> + * to. Rest subpages can be freed if they are not mapped.
>>>   *
>>>   * Returns 0 if the hugepage is split successfully.
>>>   * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
>>>   * us.
>>>   */
>>> -int split_huge_page_to_list(struct page *page, struct list_head *list)
>>> +int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>>> +				     unsigned int new_order)
>>>  {
>>>  	struct folio *folio = page_folio(page);
>>>  	struct deferred_split *ds_queue = get_deferred_split_queue(folio);
>>> -	XA_STATE(xas, &folio->mapping->i_pages, folio->index);
>>> +	/* reset xarray order to new order after split */
>>> +	XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
>>>  	struct anon_vma *anon_vma = NULL;
>>>  	struct address_space *mapping = NULL;
>>>  	int extra_pins, ret;
>>> @@ -3024,6 +3051,34 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
>>>  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
>>>  	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
>>>
>>> +	/* Cannot split anonymous THP to order-1 */
>>> +	if (new_order == 1 && folio_test_anon(folio)) {
>>> +		VM_WARN_ONCE(1, "Cannot split to order-1 folio");
>>> +		return -EINVAL;
>>> +	}
>>> +
>>> +	if (new_order) {
>>> +		/* Only swapping a whole PMD-mapped folio is supported */
>>> +		if (folio_test_swapcache(folio)) {
>>> +			VM_WARN_ONCE(1,
>>> +				"Cannot split swap-cached folio to non-0 order");
>>
>> My understanding may be wrong here, but can't the folio be moved to swapcache
>> asynchronously? How does the caller guarrantee that the folio is not in
>> swapcache and will not be moved between the call to
>> split_huge_page_to_list_to_order() and this test? If the caller can't prevent
>> it, then isn't it wrong to raise a warning here? Perhaps you just have to fail
>> to split?
> 
> Right. That is why I only use VM_WARN_ONCE here. You mean it is better to
> get rid of the warning. I have no strong preference about it.

Yes; I don't think we should be issuing warnings when the caller has done
nothing wrong?

> 
>>
>> I'm guessing this restriction is because swap only supports order-0 and
>> pmd-order folios currently? (And you only have split_swap_cluster() to downgrade
>> from pmd-order to order-0). Perhaps you need my series that allows swapping out
>> any order THP? Current version at [1] but I'm working on a new version.
>>
>> [1] https://lore.kernel.org/linux-mm/20231025144546.577640-1-ryan.roberts@arm.com/
> 
> Right. Once your patchset is in, the above check can be removed.
> 
>>> +			return -EINVAL;
>>> +		}
>>> +		/* Split shmem folio to non-zero order not supported */
>>> +		if (shmem_mapping(folio->mapping)) {
>>> +			VM_WARN_ONCE(1,
>>> +				"Cannot split shmem folio to non-0 order");
>>> +			return -EINVAL;
>>> +		}
>>> +		/* No split if the file system does not support large folio */
>>> +		if (!mapping_large_folio_support(folio->mapping)) {
>>> +			VM_WARN_ONCE(1,
>>> +				"Cannot split file folio to non-0 order");
>>> +			return -EINVAL;
>>> +		}
>>> +	}
>>> +
>>> +
>>>  	is_hzp = is_huge_zero_page(&folio->page);
>>>  	if (is_hzp) {
>>>  		pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
>>> @@ -3120,14 +3175,21 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
>>>  		if (folio_order(folio) > 1 &&
>>>  		    !list_empty(&folio->_deferred_list)) {
>>>  			ds_queue->split_queue_len--;
>>> -			list_del(&folio->_deferred_list);
>>> +			/*
>>> +			 * Reinitialize page_deferred_list after removing the
>>> +			 * page from the split_queue, otherwise a subsequent
>>> +			 * split will see list corruption when checking the
>>> +			 * page_deferred_list.
>>> +			 */
>>> +			list_del_init(&folio->_deferred_list);
>>>  		}
>>>  		spin_unlock(&ds_queue->split_queue_lock);
>>>  		if (mapping) {
>>>  			int nr = folio_nr_pages(folio);
>>>
>>>  			xas_split(&xas, folio, folio_order(folio));
>>> -			if (folio_test_pmd_mappable(folio)) {
>>> +			if (folio_test_pmd_mappable(folio) &&
>>> +			    new_order < HPAGE_PMD_ORDER) {
>>>  				if (folio_test_swapbacked(folio)) {
>>>  					__lruvec_stat_mod_folio(folio,
>>>  							NR_SHMEM_THPS, -nr);
>>> @@ -3139,7 +3201,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
>>>  			}
>>>  		}
>>>
>>> -		__split_huge_page(page, list, end);
>>> +		__split_huge_page(page, list, end, new_order);
>>>  		ret = 0;
>>>  	} else {
>>>  		spin_unlock(&ds_queue->split_queue_lock);
> 
> 
> --
> Best Regards,
> Yan, Zi



  reply	other threads:[~2024-02-28 15:44 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-26 20:55 [PATCH v5 0/8] Split a folio to any lower order folios Zi Yan
2024-02-26 20:55 ` [PATCH v5 1/8] mm/huge_memory: only split PMD mapping when necessary in unmap_folio() Zi Yan
2024-02-28 10:30   ` David Hildenbrand
2024-02-26 20:55 ` [PATCH v5 2/8] mm: Support order-1 folios in the page cache Zi Yan
2024-02-26 20:55 ` [PATCH v5 3/8] mm/memcg: use order instead of nr in split_page_memcg() Zi Yan
2024-02-26 20:55 ` [PATCH v5 4/8] mm/page_owner: use order instead of nr in split_page_owner() Zi Yan
2024-02-26 20:55 ` [PATCH v5 5/8] mm: memcg: make memcg huge page split support any order split Zi Yan
2024-02-26 20:55 ` [PATCH v5 6/8] mm: page_owner: add support for splitting to any order in split page_owner Zi Yan
2024-02-28 10:31   ` David Hildenbrand
2024-02-26 20:55 ` [PATCH v5 7/8] mm: thp: split huge page to any lower order pages Zi Yan
2024-02-28  8:23   ` Ryan Roberts
2024-02-28 15:42     ` Zi Yan
2024-02-28 15:44       ` Ryan Roberts [this message]
2024-02-28 15:52   ` Zi Yan
2024-03-07 14:58   ` Zi Yan
2024-02-26 20:55 ` [PATCH v5 8/8] mm: huge_memory: enable debugfs to split huge pages to any order Zi Yan
2024-03-01  9:51   ` Aishwarya TCV
2024-03-01 10:33     ` Ryan Roberts
2024-03-01 12:11       ` Mark Brown
2024-03-01 12:56         ` Zi Yan
2024-03-01 14:14           ` Mark Brown
2024-03-01 12:52       ` Zi Yan
2024-03-01 13:09         ` Ryan Roberts
2024-03-01 13:53           ` Zi Yan
2024-03-01 14:18             ` Ryan Roberts
2024-03-01 14:27               ` Mark Brown
2024-03-01 15:21                 ` Zi Yan
2024-03-01 19:41                 ` Zi Yan
2024-03-01 14:24           ` Mark Brown
2024-03-01 14:00     ` Zi Yan
2024-03-01 14:23       ` Ryan Roberts
2024-03-01 14:33         ` Zi Yan
2024-03-01 19:37     ` Zi Yan
2024-03-01 20:02       ` Zi Yan
2024-03-01 21:10         ` Zi Yan
2024-03-04  9:50           ` Aishwarya TCV
2024-03-04 14:58             ` Zi Yan
2024-03-04 15:44               ` Aishwarya TCV
2024-03-04 15:57                 ` Zi Yan
2024-03-04 18:25                   ` Aishwarya TCV
2024-03-04 18:26                     ` Zi Yan
2024-03-04 18:31                   ` Zi Yan
2024-03-07 15:06   ` Zi Yan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=408df79a-130e-43cd-a21a-9b3a2ddef617@arm.com \
    --to=ryan.roberts@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=kernel@pankajraghav.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mcgrof@kernel.org \
    --cc=mkoutny@suse.com \
    --cc=roman.gushchin@linux.dev \
    --cc=shy828301@gmail.com \
    --cc=willy@infradead.org \
    --cc=yuzhao@google.com \
    --cc=ziy@nvidia.com \
    --cc=zokeefe@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).