linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru()
@ 2020-08-31 17:50 Yu Zhao
  2020-08-31 17:50 ` [PATCH v2 2/2] mm: use self-explanatory macros rather than "2" Yu Zhao
  2020-09-03  8:28 ` [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru() Michal Hocko
  0 siblings, 2 replies; 6+ messages in thread
From: Yu Zhao @ 2020-08-31 17:50 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Alex Shi, linux-mm, linux-kernel, Yu Zhao

This is a trivial but worth having clean-up patch. There should be
no side effects except page->lru is temporarily poisoned after it's
deleted but before it's added to the new list in move_pages_to_lru()
(which is not a problem).

[ I was under false impression that page_off_lru() clears PG_lru;
  Alex Shi <alex.shi@linux.alibaba.com> pointed that out. So in v2,
  we keep __ClearPageLRU(). ]

Signed-off-by: Yu Zhao <yuzhao@google.com>
---
 mm/swap.c   |  4 +---
 mm/vmscan.c | 13 ++++---------
 2 files changed, 5 insertions(+), 12 deletions(-)

diff --git a/mm/swap.c b/mm/swap.c
index 40bf20a75278..2735ecf0f566 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -597,11 +597,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
 {
 	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
 	    !PageSwapCache(page) && !PageUnevictable(page)) {
-		bool active = PageActive(page);
 		int nr_pages = thp_nr_pages(page);
 
-		del_page_from_lru_list(page, lruvec,
-				       LRU_INACTIVE_ANON + active);
+		del_page_from_lru_list(page, lruvec, page_lru(page));
 		ClearPageActive(page);
 		ClearPageReferenced(page);
 		/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99e1796eb833..1b871c3987e7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1845,13 +1845,12 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
 	int nr_pages, nr_moved = 0;
 	LIST_HEAD(pages_to_free);
 	struct page *page;
-	enum lru_list lru;
 
 	while (!list_empty(list)) {
 		page = lru_to_page(list);
 		VM_BUG_ON_PAGE(PageLRU(page), page);
+		list_del(&page->lru);
 		if (unlikely(!page_evictable(page))) {
-			list_del(&page->lru);
 			spin_unlock_irq(&pgdat->lru_lock);
 			putback_lru_page(page);
 			spin_lock_irq(&pgdat->lru_lock);
@@ -1860,16 +1859,11 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
 
 		SetPageLRU(page);
-		lru = page_lru(page);
-
-		nr_pages = thp_nr_pages(page);
-		update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
-		list_move(&page->lru, &lruvec->lists[lru]);
+		add_page_to_lru_list(page, lruvec, page_lru(page));
 
 		if (put_page_testzero(page)) {
 			__ClearPageLRU(page);
-			__ClearPageActive(page);
-			del_page_from_lru_list(page, lruvec, lru);
+			del_page_from_lru_list(page, lruvec, page_off_lru(page));
 
 			if (unlikely(PageCompound(page))) {
 				spin_unlock_irq(&pgdat->lru_lock);
@@ -1878,6 +1872,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
 			} else
 				list_add(&page->lru, &pages_to_free);
 		} else {
+			nr_pages = thp_nr_pages(page);
 			nr_moved += nr_pages;
 			if (PageActive(page))
 				workingset_age_nonresident(lruvec, nr_pages);
-- 
2.28.0.402.g5ffc5be6b7-goog


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/2] mm: use self-explanatory macros rather than "2"
  2020-08-31 17:50 [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru() Yu Zhao
@ 2020-08-31 17:50 ` Yu Zhao
  2020-09-03  8:28 ` [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru() Michal Hocko
  1 sibling, 0 replies; 6+ messages in thread
From: Yu Zhao @ 2020-08-31 17:50 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Alex Shi, linux-mm, linux-kernel, Yu Zhao

This is a trivial clean-up patch. Take it or leave it.

Signed-off-by: Yu Zhao <yuzhao@google.com>
---
 include/linux/mmzone.h | 12 ++++++++----
 include/linux/vmstat.h |  2 +-
 mm/vmscan.c            |  2 +-
 3 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8379432f4f2f..be1cdbf8317f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -266,6 +266,8 @@ static inline bool is_active_lru(enum lru_list lru)
 	return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
 }
 
+#define ANON_AND_FILE 2
+
 enum lruvec_flags {
 	LRUVEC_CONGESTED,		/* lruvec has many dirty pages
 					 * backed by a congested BDI
@@ -283,8 +285,8 @@ struct lruvec {
 	unsigned long			file_cost;
 	/* Non-resident age, driven by LRU movement */
 	atomic_long_t			nonresident_age;
-	/* Refaults at the time of last reclaim cycle, anon=0, file=1 */
-	unsigned long			refaults[2];
+	/* Refaults at the time of last reclaim cycle */
+	unsigned long			refaults[ANON_AND_FILE];
 	/* Various lruvec state flags (enum lruvec_flags) */
 	unsigned long			flags;
 #ifdef CONFIG_MEMCG
@@ -406,6 +408,8 @@ enum zone_type {
 
 #ifndef __GENERATING_BOUNDS_H
 
+#define ASYNC_AND_SYNC 2
+
 struct zone {
 	/* Read-mostly fields */
 
@@ -525,8 +529,8 @@ struct zone {
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
 	/* pfn where compaction free scanner should start */
 	unsigned long		compact_cached_free_pfn;
-	/* pfn where async and sync compaction migration scanner should start */
-	unsigned long		compact_cached_migrate_pfn[2];
+	/* pfn where compaction migration scanner should start */
+	unsigned long		compact_cached_migrate_pfn[ASYNC_AND_SYNC];
 	unsigned long		compact_init_migrate_pfn;
 	unsigned long		compact_init_free_pfn;
 #endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 91220ace31da..d5431c1bf6e5 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -28,7 +28,7 @@ struct reclaim_stat {
 	unsigned nr_writeback;
 	unsigned nr_immediate;
 	unsigned nr_pageout;
-	unsigned nr_activate[2];
+	unsigned nr_activate[ANON_AND_FILE];
 	unsigned nr_ref_keep;
 	unsigned nr_unmap_fail;
 	unsigned nr_lazyfree_fail;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1b871c3987e7..b454cc17e5a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2232,7 +2232,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
 	unsigned long anon_cost, file_cost, total_cost;
 	int swappiness = mem_cgroup_swappiness(memcg);
-	u64 fraction[2];
+	u64 fraction[ANON_AND_FILE];
 	u64 denominator = 0;	/* gcc */
 	enum scan_balance scan_balance;
 	unsigned long ap, fp;
-- 
2.28.0.402.g5ffc5be6b7-goog


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru()
  2020-08-31 17:50 [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru() Yu Zhao
  2020-08-31 17:50 ` [PATCH v2 2/2] mm: use self-explanatory macros rather than "2" Yu Zhao
@ 2020-09-03  8:28 ` Michal Hocko
  2020-09-04  3:24   ` Yu Zhao
  1 sibling, 1 reply; 6+ messages in thread
From: Michal Hocko @ 2020-09-03  8:28 UTC (permalink / raw)
  To: Yu Zhao; +Cc: Andrew Morton, Alex Shi, linux-mm, linux-kernel

On Mon 31-08-20 11:50:41, Yu Zhao wrote:
[...]
> @@ -1860,16 +1859,11 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
>  		lruvec = mem_cgroup_page_lruvec(page, pgdat);
>  
>  		SetPageLRU(page);
> -		lru = page_lru(page);
> -
> -		nr_pages = thp_nr_pages(page);
> -		update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
> -		list_move(&page->lru, &lruvec->lists[lru]);
> +		add_page_to_lru_list(page, lruvec, page_lru(page));
>  
>  		if (put_page_testzero(page)) {
>  			__ClearPageLRU(page);
> -			__ClearPageActive(page);

This should go into its own patch. The rest is a mechanical and clear.
This one needs some head scratching. E.g. Is it correct for all compound
pages. I believe it should be but few words on why would be better.

> -			del_page_from_lru_list(page, lruvec, lru);
> +			del_page_from_lru_list(page, lruvec, page_off_lru(page));
>  
>  			if (unlikely(PageCompound(page))) {
>  				spin_unlock_irq(&pgdat->lru_lock);
> @@ -1878,6 +1872,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
>  			} else
>  				list_add(&page->lru, &pages_to_free);
>  		} else {
> +			nr_pages = thp_nr_pages(page);
>  			nr_moved += nr_pages;
>  			if (PageActive(page))
>  				workingset_age_nonresident(lruvec, nr_pages);
> -- 
> 2.28.0.402.g5ffc5be6b7-goog
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru()
  2020-09-03  8:28 ` [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru() Michal Hocko
@ 2020-09-04  3:24   ` Yu Zhao
  2020-09-04 10:50     ` Michal Hocko
  0 siblings, 1 reply; 6+ messages in thread
From: Yu Zhao @ 2020-09-04  3:24 UTC (permalink / raw)
  To: Michal Hocko; +Cc: Andrew Morton, Alex Shi, linux-mm, linux-kernel

On Thu, Sep 03, 2020 at 10:28:32AM +0200, Michal Hocko wrote:
> On Mon 31-08-20 11:50:41, Yu Zhao wrote:
> [...]
> > @@ -1860,16 +1859,11 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> >  		lruvec = mem_cgroup_page_lruvec(page, pgdat);
> >  
> >  		SetPageLRU(page);
> > -		lru = page_lru(page);
> > -
> > -		nr_pages = thp_nr_pages(page);
> > -		update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
> > -		list_move(&page->lru, &lruvec->lists[lru]);
> > +		add_page_to_lru_list(page, lruvec, page_lru(page));
> >  
> >  		if (put_page_testzero(page)) {
> >  			__ClearPageLRU(page);
> > -			__ClearPageActive(page);
> 
> This should go into its own patch. The rest is a mechanical and clear.

Thanks for reviewing.

I assume you are worrying about PG_unevictable being set on the page
because page_off_lru() checks it first. Well, if this can happen,
wouldn't it have been triggering bad_page()? PG_unevictable is in
PAGE_FLAGS_CHECK_AT_FREE.

> This one needs some head scratching. E.g. Is it correct for all compound
> pages. I believe it should be but few words on why would be better.

It seems nothing special here when compared with other places that
follow the same pattern. So I'm not sure what the concern is.

> > -			del_page_from_lru_list(page, lruvec, lru);
> > +			del_page_from_lru_list(page, lruvec, page_off_lru(page));
> >  
> >  			if (unlikely(PageCompound(page))) {
> >  				spin_unlock_irq(&pgdat->lru_lock);
> > @@ -1878,6 +1872,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> >  			} else
> >  				list_add(&page->lru, &pages_to_free);
> >  		} else {
> > +			nr_pages = thp_nr_pages(page);
> >  			nr_moved += nr_pages;
> >  			if (PageActive(page))
> >  				workingset_age_nonresident(lruvec, nr_pages);
> > -- 
> > 2.28.0.402.g5ffc5be6b7-goog
> > 
> 
> -- 
> Michal Hocko
> SUSE Labs

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru()
  2020-09-04  3:24   ` Yu Zhao
@ 2020-09-04 10:50     ` Michal Hocko
  2020-09-04 16:54       ` Yu Zhao
  0 siblings, 1 reply; 6+ messages in thread
From: Michal Hocko @ 2020-09-04 10:50 UTC (permalink / raw)
  To: Yu Zhao; +Cc: Andrew Morton, Alex Shi, linux-mm, linux-kernel

On Thu 03-09-20 21:24:00, Yu Zhao wrote:
> On Thu, Sep 03, 2020 at 10:28:32AM +0200, Michal Hocko wrote:
> > On Mon 31-08-20 11:50:41, Yu Zhao wrote:
> > [...]
> > > @@ -1860,16 +1859,11 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> > >  		lruvec = mem_cgroup_page_lruvec(page, pgdat);
> > >  
> > >  		SetPageLRU(page);
> > > -		lru = page_lru(page);
> > > -
> > > -		nr_pages = thp_nr_pages(page);
> > > -		update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
> > > -		list_move(&page->lru, &lruvec->lists[lru]);
> > > +		add_page_to_lru_list(page, lruvec, page_lru(page));
> > >  
> > >  		if (put_page_testzero(page)) {
> > >  			__ClearPageLRU(page);
> > > -			__ClearPageActive(page);
> > 
> > This should go into its own patch. The rest is a mechanical and clear.
> 
> Thanks for reviewing.
> 
> I assume you are worrying about PG_unevictable being set on the page
> because page_off_lru() checks it first.

No, I was referring to __ClearPageActive. You are right that this is
cleared in page_off_lru but that is hidden in a release path and e.g.
compound pages are released via their destructor which for some might
not involve releasing the page - e.g. hugetlb pages. This should be fine
because hugetlb pages are not on LRU so as I've said this is fine but it
belongs to its own patch because it is not a pure mechanical change like
the rest of the patch.
-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru()
  2020-09-04 10:50     ` Michal Hocko
@ 2020-09-04 16:54       ` Yu Zhao
  0 siblings, 0 replies; 6+ messages in thread
From: Yu Zhao @ 2020-09-04 16:54 UTC (permalink / raw)
  To: Michal Hocko; +Cc: Andrew Morton, Alex Shi, linux-mm, linux-kernel

On Fri, Sep 04, 2020 at 12:50:01PM +0200, Michal Hocko wrote:
> On Thu 03-09-20 21:24:00, Yu Zhao wrote:
> > On Thu, Sep 03, 2020 at 10:28:32AM +0200, Michal Hocko wrote:
> > > On Mon 31-08-20 11:50:41, Yu Zhao wrote:
> > > [...]
> > > > @@ -1860,16 +1859,11 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
> > > >  		lruvec = mem_cgroup_page_lruvec(page, pgdat);
> > > >  
> > > >  		SetPageLRU(page);
> > > > -		lru = page_lru(page);
> > > > -
> > > > -		nr_pages = thp_nr_pages(page);
> > > > -		update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
> > > > -		list_move(&page->lru, &lruvec->lists[lru]);
> > > > +		add_page_to_lru_list(page, lruvec, page_lru(page));
> > > >  
> > > >  		if (put_page_testzero(page)) {
> > > >  			__ClearPageLRU(page);
> > > > -			__ClearPageActive(page);
> > > 
> > > This should go into its own patch. The rest is a mechanical and clear.
> > 
> > Thanks for reviewing.
> > 
> > I assume you are worrying about PG_unevictable being set on the page
> > because page_off_lru() checks it first.
> 
> No, I was referring to __ClearPageActive. You are right that this is
> cleared in page_off_lru but that is hidden in a release path and e.g.
> compound pages are released via their destructor which for some might
> not involve releasing the page - e.g. hugetlb pages. This should be fine
> because hugetlb pages are not on LRU so as I've said this is fine but it
> belongs to its own patch because it is not a pure mechanical change like
> the rest of the patch.

Please bear with me. This is the change in question:

 		if (put_page_testzero(page)) {
 			__ClearPageLRU(page);
-			__ClearPageActive(page);
-			del_page_from_lru_list(page, lruvec, lru);
+			del_page_from_lru_list(page, lruvec, page_off_lru(page));

 			if (unlikely(PageCompound(page))) {

If the PageUnevictable() check in page_off_lru() is not a concern,
I'm trying to understand what else is different between them:

	Before this path:		After this patch:

					page_off_lru()
	__ClearPageActive()			__ClearPageActive()
	add_page_to_lru_list()		add_page_to_lru_list()

And why is page_off_lru() hidden in a release path?

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-09-04 16:55 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-31 17:50 [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru() Yu Zhao
2020-08-31 17:50 ` [PATCH v2 2/2] mm: use self-explanatory macros rather than "2" Yu Zhao
2020-09-03  8:28 ` [PATCH v2 1/2] mm: use add_page_to_lru_list()/page_lru()/page_off_lru() Michal Hocko
2020-09-04  3:24   ` Yu Zhao
2020-09-04 10:50     ` Michal Hocko
2020-09-04 16:54       ` Yu Zhao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).