linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/lru: Add folio LRU functions
@ 2021-05-13 12:27 Matthew Wilcox (Oracle)
  2021-05-13 17:52 ` Yu Zhao
  2021-05-27  8:36 ` Christoph Hellwig
  0 siblings, 2 replies; 3+ messages in thread
From: Matthew Wilcox (Oracle) @ 2021-05-13 12:27 UTC (permalink / raw)
  To: yuzhao, akpm, linux-mm; +Cc: Matthew Wilcox (Oracle)

Handle arbitrary-order folios being added to the LRU.  By definition,
all pages being added to the LRU were already head or base pages,
so define page wrappers around folio functions where the original
page functions involved calling compound_head() to manipulate flags,
but define folio wrappers around page functions where there's no need to
call compound_head().  The one thing that does change for those functions
is calling compound_nr() instead of thp_nr_pages(), in order to handle
arbitrary-sized folios.

Saves almost 800 bytes of kernel text; no functions grow.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/mm_inline.h | 85 +++++++++++++++++++++++++++------------
 1 file changed, 59 insertions(+), 26 deletions(-)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 355ea1ee32bd..c9e05631e565 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -6,22 +6,27 @@
 #include <linux/swap.h>
 
 /**
- * page_is_file_lru - should the page be on a file LRU or anon LRU?
- * @page: the page to test
+ * folio_is_file_lru - should the folio be on a file LRU or anon LRU?
+ * @folio: the folio to test
  *
- * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
- * freed anonymous page (e.g. via MADV_FREE).  Returns 0 if @page is a normal
- * anonymous page, a tmpfs page or otherwise ram or swap backed page.  Used by
- * functions that manipulate the LRU lists, to sort a page onto the right LRU
- * list.
+ * Returns 1 if @folio is a regular filesystem backed page cache folio
+ * or a lazily freed anonymous folio (e.g. via MADV_FREE).  Returns 0 if
+ * @folio is a normal anonymous folio, a tmpfs folio or otherwise ram or
+ * swap backed folio.  Used by functions that manipulate the LRU lists,
+ * to sort a folio onto the right LRU list.
  *
  * We would like to get this info without a page flag, but the state
- * needs to survive until the page is last deleted from the LRU, which
+ * needs to survive until the folio is last deleted from the LRU, which
  * could be as far down as __page_cache_release.
  */
+static inline int folio_is_file_lru(struct folio *folio)
+{
+	return !folio_swapbacked(folio);
+}
+
 static inline int page_is_file_lru(struct page *page)
 {
-	return !PageSwapBacked(page);
+	return folio_is_file_lru(page_folio(page));
 }
 
 static __always_inline void update_lru_size(struct lruvec *lruvec,
@@ -42,66 +47,94 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
  * __clear_page_lru_flags - clear page lru flags before releasing a page
  * @page: the page that was on lru and now has a zero reference
  */
-static __always_inline void __clear_page_lru_flags(struct page *page)
+static __always_inline void __folio_clear_lru_flags(struct folio *folio)
 {
-	VM_BUG_ON_PAGE(!PageLRU(page), page);
+	VM_BUG_ON_FOLIO(!folio_lru(folio), folio);
 
-	__ClearPageLRU(page);
+	__folio_clear_lru_flag(folio);
 
 	/* this shouldn't happen, so leave the flags to bad_page() */
-	if (PageActive(page) && PageUnevictable(page))
+	if (folio_active(folio) && folio_unevictable(folio))
 		return;
 
-	__ClearPageActive(page);
-	__ClearPageUnevictable(page);
+	__folio_clear_active_flag(folio);
+	__folio_clear_unevictable_flag(folio);
+}
+
+static __always_inline void __clear_page_lru_flags(struct page *page)
+{
+	__folio_clear_lru_flags(page_folio(page));
 }
 
 /**
- * page_lru - which LRU list should a page be on?
- * @page: the page to test
+ * folio_lru_list - which LRU list should a folio be on?
+ * @folio: the folio to test
  *
- * Returns the LRU list a page should be on, as an index
+ * Returns the LRU list a folio should be on, as an index
  * into the array of LRU lists.
  */
-static __always_inline enum lru_list page_lru(struct page *page)
+static __always_inline enum lru_list folio_lru_list(struct folio *folio)
 {
 	enum lru_list lru;
 
-	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
+	VM_BUG_ON_FOLIO(folio_active(folio) && folio_unevictable(folio), folio);
 
-	if (PageUnevictable(page))
+	if (folio_unevictable(folio))
 		return LRU_UNEVICTABLE;
 
-	lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
-	if (PageActive(page))
+	lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+	if (folio_active(folio))
 		lru += LRU_ACTIVE;
 
 	return lru;
 }
 
+static __always_inline enum lru_list page_lru(struct page *page)
+{
+	return folio_lru_list(page_folio(page));
+}
+
 static __always_inline void add_page_to_lru_list(struct page *page,
 				struct lruvec *lruvec)
 {
 	enum lru_list lru = page_lru(page);
 
-	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
+	update_lru_size(lruvec, lru, page_zonenum(page), compound_nr(page));
 	list_add(&page->lru, &lruvec->lists[lru]);
 }
 
+static __always_inline void folio_add_to_lru_list(struct folio *folio,
+				struct lruvec *lruvec)
+{
+	add_page_to_lru_list(&folio->page, lruvec);
+}
+
 static __always_inline void add_page_to_lru_list_tail(struct page *page,
 				struct lruvec *lruvec)
 {
 	enum lru_list lru = page_lru(page);
 
-	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
+	update_lru_size(lruvec, lru, page_zonenum(page), compound_nr(page));
 	list_add_tail(&page->lru, &lruvec->lists[lru]);
 }
 
+static __always_inline void folio_add_to_lru_list_tail(struct folio *folio,
+				struct lruvec *lruvec)
+{
+	add_page_to_lru_list_tail(&folio->page, lruvec);
+}
+
 static __always_inline void del_page_from_lru_list(struct page *page,
 				struct lruvec *lruvec)
 {
 	list_del(&page->lru);
 	update_lru_size(lruvec, page_lru(page), page_zonenum(page),
-			-thp_nr_pages(page));
+			-compound_nr(page));
+}
+
+static __always_inline void folio_del_from_lru_list(struct folio *folio,
+				struct lruvec *lruvec)
+{
+	del_page_from_lru_list(&folio->page, lruvec);
 }
 #endif
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] mm/lru: Add folio LRU functions
  2021-05-13 12:27 [PATCH] mm/lru: Add folio LRU functions Matthew Wilcox (Oracle)
@ 2021-05-13 17:52 ` Yu Zhao
  2021-05-27  8:36 ` Christoph Hellwig
  1 sibling, 0 replies; 3+ messages in thread
From: Yu Zhao @ 2021-05-13 17:52 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: Andrew Morton, Linux-MM

On Thu, May 13, 2021 at 6:28 AM Matthew Wilcox (Oracle)
<willy@infradead.org> wrote:
>
> Handle arbitrary-order folios being added to the LRU.  By definition,
> all pages being added to the LRU were already head or base pages,
> so define page wrappers around folio functions where the original
> page functions involved calling compound_head() to manipulate flags,
> but define folio wrappers around page functions where there's no need to
> call compound_head().  The one thing that does change for those functions
> is calling compound_nr() instead of thp_nr_pages(), in order to handle
> arbitrary-sized folios.
>
> Saves almost 800 bytes of kernel text; no functions grow.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Yu Zhao <yuzhao@google.com>

> ---
>  include/linux/mm_inline.h | 85 +++++++++++++++++++++++++++------------
>  1 file changed, 59 insertions(+), 26 deletions(-)
>
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index 355ea1ee32bd..c9e05631e565 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -6,22 +6,27 @@
>  #include <linux/swap.h>
>
>  /**
> - * page_is_file_lru - should the page be on a file LRU or anon LRU?
> - * @page: the page to test
> + * folio_is_file_lru - should the folio be on a file LRU or anon LRU?
> + * @folio: the folio to test
>   *
> - * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
> - * freed anonymous page (e.g. via MADV_FREE).  Returns 0 if @page is a normal
> - * anonymous page, a tmpfs page or otherwise ram or swap backed page.  Used by
> - * functions that manipulate the LRU lists, to sort a page onto the right LRU
> - * list.
> + * Returns 1 if @folio is a regular filesystem backed page cache folio
> + * or a lazily freed anonymous folio (e.g. via MADV_FREE).  Returns 0 if
> + * @folio is a normal anonymous folio, a tmpfs folio or otherwise ram or
> + * swap backed folio.  Used by functions that manipulate the LRU lists,
> + * to sort a folio onto the right LRU list.
>   *
>   * We would like to get this info without a page flag, but the state
> - * needs to survive until the page is last deleted from the LRU, which
> + * needs to survive until the folio is last deleted from the LRU, which
>   * could be as far down as __page_cache_release.
>   */
> +static inline int folio_is_file_lru(struct folio *folio)
> +{
> +       return !folio_swapbacked(folio);
> +}
> +
>  static inline int page_is_file_lru(struct page *page)
>  {
> -       return !PageSwapBacked(page);
> +       return folio_is_file_lru(page_folio(page));
>  }
>
>  static __always_inline void update_lru_size(struct lruvec *lruvec,
> @@ -42,66 +47,94 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
>   * __clear_page_lru_flags - clear page lru flags before releasing a page
>   * @page: the page that was on lru and now has a zero reference
>   */
> -static __always_inline void __clear_page_lru_flags(struct page *page)
> +static __always_inline void __folio_clear_lru_flags(struct folio *folio)
>  {
> -       VM_BUG_ON_PAGE(!PageLRU(page), page);
> +       VM_BUG_ON_FOLIO(!folio_lru(folio), folio);
>
> -       __ClearPageLRU(page);
> +       __folio_clear_lru_flag(folio);
>
>         /* this shouldn't happen, so leave the flags to bad_page() */
> -       if (PageActive(page) && PageUnevictable(page))
> +       if (folio_active(folio) && folio_unevictable(folio))
>                 return;
>
> -       __ClearPageActive(page);
> -       __ClearPageUnevictable(page);
> +       __folio_clear_active_flag(folio);
> +       __folio_clear_unevictable_flag(folio);
> +}
> +
> +static __always_inline void __clear_page_lru_flags(struct page *page)
> +{
> +       __folio_clear_lru_flags(page_folio(page));
>  }
>
>  /**
> - * page_lru - which LRU list should a page be on?
> - * @page: the page to test
> + * folio_lru_list - which LRU list should a folio be on?
> + * @folio: the folio to test
>   *
> - * Returns the LRU list a page should be on, as an index
> + * Returns the LRU list a folio should be on, as an index
>   * into the array of LRU lists.
>   */
> -static __always_inline enum lru_list page_lru(struct page *page)
> +static __always_inline enum lru_list folio_lru_list(struct folio *folio)
>  {
>         enum lru_list lru;
>
> -       VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
> +       VM_BUG_ON_FOLIO(folio_active(folio) && folio_unevictable(folio), folio);
>
> -       if (PageUnevictable(page))
> +       if (folio_unevictable(folio))
>                 return LRU_UNEVICTABLE;
>
> -       lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
> -       if (PageActive(page))
> +       lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
> +       if (folio_active(folio))
>                 lru += LRU_ACTIVE;
>
>         return lru;
>  }
>
> +static __always_inline enum lru_list page_lru(struct page *page)
> +{
> +       return folio_lru_list(page_folio(page));
> +}
> +
>  static __always_inline void add_page_to_lru_list(struct page *page,
>                                 struct lruvec *lruvec)
>  {
>         enum lru_list lru = page_lru(page);
>
> -       update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
> +       update_lru_size(lruvec, lru, page_zonenum(page), compound_nr(page));
>         list_add(&page->lru, &lruvec->lists[lru]);
>  }
>
> +static __always_inline void folio_add_to_lru_list(struct folio *folio,
> +                               struct lruvec *lruvec)
> +{
> +       add_page_to_lru_list(&folio->page, lruvec);
> +}
> +
>  static __always_inline void add_page_to_lru_list_tail(struct page *page,
>                                 struct lruvec *lruvec)
>  {
>         enum lru_list lru = page_lru(page);
>
> -       update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
> +       update_lru_size(lruvec, lru, page_zonenum(page), compound_nr(page));
>         list_add_tail(&page->lru, &lruvec->lists[lru]);
>  }
>
> +static __always_inline void folio_add_to_lru_list_tail(struct folio *folio,
> +                               struct lruvec *lruvec)
> +{
> +       add_page_to_lru_list_tail(&folio->page, lruvec);
> +}
> +
>  static __always_inline void del_page_from_lru_list(struct page *page,
>                                 struct lruvec *lruvec)
>  {
>         list_del(&page->lru);
>         update_lru_size(lruvec, page_lru(page), page_zonenum(page),
> -                       -thp_nr_pages(page));
> +                       -compound_nr(page));
> +}
> +
> +static __always_inline void folio_del_from_lru_list(struct folio *folio,
> +                               struct lruvec *lruvec)
> +{
> +       del_page_from_lru_list(&folio->page, lruvec);
>  }
>  #endif
> --
> 2.30.2
>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] mm/lru: Add folio LRU functions
  2021-05-13 12:27 [PATCH] mm/lru: Add folio LRU functions Matthew Wilcox (Oracle)
  2021-05-13 17:52 ` Yu Zhao
@ 2021-05-27  8:36 ` Christoph Hellwig
  1 sibling, 0 replies; 3+ messages in thread
From: Christoph Hellwig @ 2021-05-27  8:36 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: yuzhao, akpm, linux-mm

On Thu, May 13, 2021 at 01:27:02PM +0100, Matthew Wilcox (Oracle) wrote:
> +static inline int folio_is_file_lru(struct folio *folio)
> +{
> +	return !folio_swapbacked(folio);
> +}

This should probably be changed to return a bool.

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-05-27  8:36 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-13 12:27 [PATCH] mm/lru: Add folio LRU functions Matthew Wilcox (Oracle)
2021-05-13 17:52 ` Yu Zhao
2021-05-27  8:36 ` Christoph Hellwig

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).