All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vlastimil Babka <vbabka@suse.cz>
To: Yu Zhao <yuzhao@google.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Hugh Dickins <hughd@google.com>,
	Alex Shi <alex.shi@linux.alibaba.com>
Cc: Michal Hocko <mhocko@kernel.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Vladimir Davydov <vdavydov.dev@gmail.com>,
	Roman Gushchin <guro@fb.com>,
	Matthew Wilcox <willy@infradead.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v2 03/10] mm: don't pass "enum lru_list" to lru list addition functions
Date: Tue, 26 Jan 2021 20:13:11 +0100	[thread overview]
Message-ID: <85b3e8f2-5982-3329-c20d-cf062b8da71e@suse.cz> (raw)
In-Reply-To: <20210122220600.906146-4-yuzhao@google.com>

On 1/22/21 11:05 PM, Yu Zhao wrote:
> The "enum lru_list" parameter to add_page_to_lru_list() and
> add_page_to_lru_list_tail() is redundant in the sense that it can
> be extracted from the "struct page" parameter by page_lru().

Okay, however, it means repeated extraction of a value that we already knew. The
result of compilation is rather sad. This is bloat-o-meter on mm/built-in.a
(without CONFIG_DEBUG_VM, btw) between patch 2 and 5:

add/remove: 0/0 grow/shrink: 10/5 up/down: 1837/-60 (1777)
Function                                     old     new   delta
lru_deactivate_file_fn                       932    1368    +436
lru_lazyfree_fn.part                         629     953    +324
check_move_unevictable_pages                1171    1424    +253
__activate_page.part                         735     984    +249
lru_deactivate_fn.part                       593     822    +229
perf_trace_mm_lru_insertion                  458     560    +102
trace_event_raw_event_mm_lru_insertion       412     500     +88
__page_cache_release                         479     558     +79
release_pages                               1430    1499     +69
pagevec_move_tail_fn.part                    761     769      +8
isolate_lru_page                             471     470      -1
__bpf_trace_mm_lru_insertion                   7       5      -2
__traceiter_mm_lru_insertion                  55      47      -8
isolate_migratepages_block                  3200    3185     -15
__pagevec_lru_add_fn                        1092    1058     -34


> A caveat is that we need to make sure PageActive() or
> PageUnevictable() is correctly set or cleared before calling
> these two functions. And they are indeed.
> 
> Link: https://lore.kernel.org/linux-mm/20201207220949.830352-4-yuzhao@google.com/
> Signed-off-by: Yu Zhao <yuzhao@google.com>
> ---
>  include/linux/mm_inline.h |  8 ++++++--
>  mm/swap.c                 | 15 +++++++--------
>  mm/vmscan.c               |  6 ++----
>  3 files changed, 15 insertions(+), 14 deletions(-)
> 
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index 2889741f450a..130ba3201d3f 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -106,15 +106,19 @@ static __always_inline enum lru_list page_lru(struct page *page)
>  }
>  
>  static __always_inline void add_page_to_lru_list(struct page *page,
> -				struct lruvec *lruvec, enum lru_list lru)
> +				struct lruvec *lruvec)
>  {
> +	enum lru_list lru = page_lru(page);
> +
>  	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
>  	list_add(&page->lru, &lruvec->lists[lru]);
>  }
>  
>  static __always_inline void add_page_to_lru_list_tail(struct page *page,
> -				struct lruvec *lruvec, enum lru_list lru)
> +				struct lruvec *lruvec)
>  {
> +	enum lru_list lru = page_lru(page);
> +
>  	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
>  	list_add_tail(&page->lru, &lruvec->lists[lru]);
>  }
> diff --git a/mm/swap.c b/mm/swap.c
> index 490553f3f9ef..4b058ef37add 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -231,7 +231,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
>  	if (!PageUnevictable(page)) {
>  		del_page_from_lru_list(page, lruvec, page_lru(page));
>  		ClearPageActive(page);
> -		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
> +		add_page_to_lru_list_tail(page, lruvec);
>  		__count_vm_events(PGROTATED, thp_nr_pages(page));
>  	}
>  }
> @@ -313,8 +313,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec)
>  
>  		del_page_from_lru_list(page, lruvec, lru);
>  		SetPageActive(page);
> -		lru += LRU_ACTIVE;
> -		add_page_to_lru_list(page, lruvec, lru);
> +		add_page_to_lru_list(page, lruvec);
>  		trace_mm_lru_activate(page);
>  
>  		__count_vm_events(PGACTIVATE, nr_pages);
> @@ -543,14 +542,14 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
>  		 * It can make readahead confusing.  But race window
>  		 * is _really_ small and  it's non-critical problem.
>  		 */
> -		add_page_to_lru_list(page, lruvec, lru);
> +		add_page_to_lru_list(page, lruvec);
>  		SetPageReclaim(page);
>  	} else {
>  		/*
>  		 * The page's writeback ends up during pagevec
>  		 * We moves tha page into tail of inactive.
>  		 */
> -		add_page_to_lru_list_tail(page, lruvec, lru);
> +		add_page_to_lru_list_tail(page, lruvec);
>  		__count_vm_events(PGROTATED, nr_pages);
>  	}
>  
> @@ -570,7 +569,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
>  		del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
>  		ClearPageActive(page);
>  		ClearPageReferenced(page);
> -		add_page_to_lru_list(page, lruvec, lru);
> +		add_page_to_lru_list(page, lruvec);
>  
>  		__count_vm_events(PGDEACTIVATE, nr_pages);
>  		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
> @@ -595,7 +594,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
>  		 * anonymous pages
>  		 */
>  		ClearPageSwapBacked(page);
> -		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
> +		add_page_to_lru_list(page, lruvec);
>  
>  		__count_vm_events(PGLAZYFREE, nr_pages);
>  		__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
> @@ -1005,7 +1004,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
>  			__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
>  	}
>  
> -	add_page_to_lru_list(page, lruvec, lru);
> +	add_page_to_lru_list(page, lruvec);
>  	trace_mm_lru_insertion(page, lru);
>  }
>  
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 19875660e8f8..09e4f97488c9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1867,7 +1867,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
>  		 * inhibits memcg migration).
>  		 */
>  		VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
> -		add_page_to_lru_list(page, lruvec, page_lru(page));
> +		add_page_to_lru_list(page, lruvec);
>  		nr_pages = thp_nr_pages(page);
>  		nr_moved += nr_pages;
>  		if (PageActive(page))
> @@ -4282,12 +4282,10 @@ void check_move_unevictable_pages(struct pagevec *pvec)
>  
>  		lruvec = relock_page_lruvec_irq(page, lruvec);
>  		if (page_evictable(page) && PageUnevictable(page)) {
> -			enum lru_list lru = page_lru_base_type(page);
> -
>  			VM_BUG_ON_PAGE(PageActive(page), page);
>  			ClearPageUnevictable(page);
>  			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
> -			add_page_to_lru_list(page, lruvec, lru);
> +			add_page_to_lru_list(page, lruvec);
>  			pgrescued += nr_pages;
>  		}
>  		SetPageLRU(page);
> 


  reply	other threads:[~2021-01-27  3:13 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-22 22:05 [PATCH v2 00/10] mm: lru related cleanups Yu Zhao
2021-01-22 22:05 ` Yu Zhao
2021-01-22 22:05 ` [PATCH v2 01/10] mm: use add_page_to_lru_list() Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-26 18:57   ` Vlastimil Babka
2021-01-27  2:12   ` Miaohe Lin
2021-01-22 22:05 ` [PATCH v2 02/10] mm: shuffle lru list addition and deletion functions Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-26 18:58   ` Vlastimil Babka
2021-01-27  2:14   ` Miaohe Lin
2021-01-22 22:05 ` [PATCH v2 03/10] mm: don't pass "enum lru_list" to lru list addition functions Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-26 19:13   ` Vlastimil Babka [this message]
2021-01-26 21:34     ` Yu Zhao
2021-01-27 10:51       ` Vlastimil Babka
2021-01-26 22:01   ` Matthew Wilcox
2021-01-26 22:14     ` Yu Zhao
2021-02-23 22:50       ` Andrew Morton
2021-02-24  5:29         ` Yu Zhao
2021-02-24  8:06           ` Alex Shi
2021-02-24  8:37             ` Yu Zhao
2021-02-24  9:01               ` Alex Shi
2021-01-22 22:05 ` [PATCH v2 04/10] mm: don't pass "enum lru_list" to trace_mm_lru_insertion() Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-22 22:05 ` [PATCH v2 05/10] mm: don't pass "enum lru_list" to del_page_from_lru_list() Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-22 22:05 ` [PATCH v2 06/10] mm: add __clear_page_lru_flags() to replace page_off_lru() Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-22 22:05 ` [PATCH v2 07/10] mm: VM_BUG_ON lru page flags Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-22 22:05 ` [PATCH v2 08/10] mm: fold page_lru_base_type() into its sole caller Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-22 22:05 ` [PATCH v2 09/10] mm: fold __update_lru_size() " Yu Zhao
2021-01-22 22:05   ` Yu Zhao
2021-01-22 22:06 ` [PATCH v2 10/10] mm: make lruvec_lru_size() static Yu Zhao
2021-01-22 22:06   ` Yu Zhao
2021-02-24  8:48   ` [PATCH] mm: test page->flags directly in page_lru() Yu Zhao
2021-02-24  8:48     ` Yu Zhao
2021-02-24 13:15     ` Andrew Morton
2021-02-24 19:57       ` Yu Zhao
2021-02-24 21:56       ` Matthew Wilcox
2021-02-24 22:34         ` Yu Zhao
2021-02-24 22:48           ` Matthew Wilcox
2021-02-24 23:50             ` Yu Zhao
2021-02-25  3:55               ` Matthew Wilcox
2021-02-25  5:22                 ` Yu Zhao
2021-02-25 12:12                   ` Matthew Wilcox
2021-02-26  9:17     ` [PATCH v2 0/3] trim the uses of compound_head() Yu Zhao
2021-02-26  9:17       ` Yu Zhao
2021-02-26  9:17       ` [PATCH v2 1/3] mm: bypass compound_head() for PF_NO_TAIL when enforce=1 Yu Zhao
2021-02-26  9:17         ` Yu Zhao
2021-02-26  9:17       ` [PATCH v2 2/3] mm: use PF_NO_TAIL for PG_lru Yu Zhao
2021-02-26  9:17         ` Yu Zhao
2021-02-26 20:22         ` Yu Zhao
2021-02-26  9:17       ` [PATCH v2 3/3] mm: use PF_ONLY_HEAD for PG_active and PG_unevictable Yu Zhao
2021-02-26  9:17         ` Yu Zhao
2021-02-26 12:13         ` Matthew Wilcox
2021-02-26 19:49           ` Yu Zhao
2021-02-26 20:27             ` Matthew Wilcox
2021-03-01 11:50           ` Kirill A. Shutemov
2021-03-01 19:58             ` Yu Zhao
2021-03-01 20:16               ` Hugh Dickins
2021-03-01 20:16                 ` Hugh Dickins
2021-03-01 20:26                 ` Matthew Wilcox
2021-02-26 10:52       ` [PATCH v2 0/3] trim the uses of compound_head() Vlastimil Babka
2021-02-26 19:04         ` Yu Zhao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=85b3e8f2-5982-3329-c20d-cf062b8da71e@suse.cz \
    --to=vbabka@suse.cz \
    --cc=akpm@linux-foundation.org \
    --cc=alex.shi@linux.alibaba.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=vdavydov.dev@gmail.com \
    --cc=willy@infradead.org \
    --cc=yuzhao@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.