All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@kernel.org>
To: <linux-mm@kvack.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Mel Gorman <mgorman@suse.de>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Vlastimil Babka <vbabka@suse.cz>, Rik van Riel <riel@redhat.com>,
	LKML <linux-kernel@vger.kernel.org>,
	Michal Hocko <mhocko@suse.com>
Subject: [PATCH 5/7] mm, vmscan: extract shrink_page_list reclaim counters into a struct
Date: Wed, 28 Dec 2016 16:30:30 +0100	[thread overview]
Message-ID: <20161228153032.10821-6-mhocko@kernel.org> (raw)
In-Reply-To: <20161228153032.10821-1-mhocko@kernel.org>

From: Michal Hocko <mhocko@suse.com>

shrink_page_list returns quite some counters back to its caller. Extract
the existing 5 into struct reclaim_stat because this makes the code
easier to follow and also allows further counters to be returned.

While we are at it, make all of them unsigned rather than unsigned long
as we do not really need full 64b for them (we never scan more than
SWAP_CLUSTER_MAX pages at once). This should reduce some stack space.

This patch shouldn't introduce any functional change.

Signed-off-by: Michal Hocko <mhocko@suse.com>
---
 mm/vmscan.c | 61 ++++++++++++++++++++++++++++++-------------------------------
 1 file changed, 30 insertions(+), 31 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3f0774f30a42..f6f2d828968c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -902,6 +902,14 @@ static void page_check_dirty_writeback(struct page *page,
 		mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
 }
 
+struct reclaim_stat {
+	unsigned nr_dirty;
+	unsigned nr_unqueued_dirty;
+	unsigned nr_congested;
+	unsigned nr_writeback;
+	unsigned nr_immediate;
+};
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -909,22 +917,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 				      struct pglist_data *pgdat,
 				      struct scan_control *sc,
 				      enum ttu_flags ttu_flags,
-				      unsigned long *ret_nr_dirty,
-				      unsigned long *ret_nr_unqueued_dirty,
-				      unsigned long *ret_nr_congested,
-				      unsigned long *ret_nr_writeback,
-				      unsigned long *ret_nr_immediate,
+				      struct reclaim_stat *stat,
 				      bool force_reclaim)
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
 	int pgactivate = 0;
-	unsigned long nr_unqueued_dirty = 0;
-	unsigned long nr_dirty = 0;
-	unsigned long nr_congested = 0;
-	unsigned long nr_reclaimed = 0;
-	unsigned long nr_writeback = 0;
-	unsigned long nr_immediate = 0;
+	unsigned nr_unqueued_dirty = 0;
+	unsigned nr_dirty = 0;
+	unsigned nr_congested = 0;
+	unsigned nr_reclaimed = 0;
+	unsigned nr_writeback = 0;
+	unsigned nr_immediate = 0;
 
 	cond_resched();
 
@@ -1266,11 +1270,13 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 	list_splice(&ret_pages, page_list);
 	count_vm_events(PGACTIVATE, pgactivate);
 
-	*ret_nr_dirty += nr_dirty;
-	*ret_nr_congested += nr_congested;
-	*ret_nr_unqueued_dirty += nr_unqueued_dirty;
-	*ret_nr_writeback += nr_writeback;
-	*ret_nr_immediate += nr_immediate;
+	if (stat) {
+		stat->nr_dirty = nr_dirty;
+		stat->nr_congested = nr_congested;
+		stat->nr_unqueued_dirty = nr_unqueued_dirty;
+		stat->nr_writeback = nr_writeback;
+		stat->nr_immediate = nr_immediate;
+	}
 	return nr_reclaimed;
 }
 
@@ -1282,7 +1288,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
 	};
-	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
+	unsigned long ret;
 	struct page *page, *next;
 	LIST_HEAD(clean_pages);
 
@@ -1295,8 +1301,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 	}
 
 	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
-			TTU_UNMAP|TTU_IGNORE_ACCESS,
-			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+			TTU_UNMAP|TTU_IGNORE_ACCESS, NULL, true);
 	list_splice(&clean_pages, page_list);
 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
 	return ret;
@@ -1696,11 +1701,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	unsigned long nr_scanned;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_taken;
-	unsigned long nr_dirty = 0;
-	unsigned long nr_congested = 0;
-	unsigned long nr_unqueued_dirty = 0;
-	unsigned long nr_writeback = 0;
-	unsigned long nr_immediate = 0;
+	struct reclaim_stat stat = {};
 	isolate_mode_t isolate_mode = 0;
 	int file = is_file_lru(lru);
 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -1745,9 +1746,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		return 0;
 
 	nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
-				&nr_dirty, &nr_unqueued_dirty, &nr_congested,
-				&nr_writeback, &nr_immediate,
-				false);
+				&stat, false);
 
 	spin_lock_irq(&pgdat->lru_lock);
 
@@ -1781,7 +1780,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	 * of pages under pages flagged for immediate reclaim and stall if any
 	 * are encountered in the nr_immediate check below.
 	 */
-	if (nr_writeback && nr_writeback == nr_taken)
+	if (stat.nr_writeback && stat.nr_writeback == nr_taken)
 		set_bit(PGDAT_WRITEBACK, &pgdat->flags);
 
 	/*
@@ -1793,7 +1792,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * Tag a zone as congested if all the dirty pages scanned were
 		 * backed by a congested BDI and wait_iff_congested will stall.
 		 */
-		if (nr_dirty && nr_dirty == nr_congested)
+		if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
 			set_bit(PGDAT_CONGESTED, &pgdat->flags);
 
 		/*
@@ -1802,7 +1801,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
 		 * reclaim context.
 		 */
-		if (nr_unqueued_dirty == nr_taken)
+		if (stat.nr_unqueued_dirty == nr_taken)
 			set_bit(PGDAT_DIRTY, &pgdat->flags);
 
 		/*
@@ -1811,7 +1810,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * that pages are cycling through the LRU faster than
 		 * they are written so also forcibly stall.
 		 */
-		if (nr_immediate && current_may_throttle())
+		if (stat.nr_immediate && current_may_throttle())
 			congestion_wait(BLK_RW_ASYNC, HZ/10);
 	}
 
-- 
2.10.2

WARNING: multiple messages have this Message-ID (diff)
From: Michal Hocko <mhocko@kernel.org>
To: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Mel Gorman <mgorman@suse.de>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Vlastimil Babka <vbabka@suse.cz>, Rik van Riel <riel@redhat.com>,
	LKML <linux-kernel@vger.kernel.org>,
	Michal Hocko <mhocko@suse.com>
Subject: [PATCH 5/7] mm, vmscan: extract shrink_page_list reclaim counters into a struct
Date: Wed, 28 Dec 2016 16:30:30 +0100	[thread overview]
Message-ID: <20161228153032.10821-6-mhocko@kernel.org> (raw)
In-Reply-To: <20161228153032.10821-1-mhocko@kernel.org>

From: Michal Hocko <mhocko@suse.com>

shrink_page_list returns quite some counters back to its caller. Extract
the existing 5 into struct reclaim_stat because this makes the code
easier to follow and also allows further counters to be returned.

While we are at it, make all of them unsigned rather than unsigned long
as we do not really need full 64b for them (we never scan more than
SWAP_CLUSTER_MAX pages at once). This should reduce some stack space.

This patch shouldn't introduce any functional change.

Signed-off-by: Michal Hocko <mhocko@suse.com>
---
 mm/vmscan.c | 61 ++++++++++++++++++++++++++++++-------------------------------
 1 file changed, 30 insertions(+), 31 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3f0774f30a42..f6f2d828968c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -902,6 +902,14 @@ static void page_check_dirty_writeback(struct page *page,
 		mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
 }
 
+struct reclaim_stat {
+	unsigned nr_dirty;
+	unsigned nr_unqueued_dirty;
+	unsigned nr_congested;
+	unsigned nr_writeback;
+	unsigned nr_immediate;
+};
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -909,22 +917,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 				      struct pglist_data *pgdat,
 				      struct scan_control *sc,
 				      enum ttu_flags ttu_flags,
-				      unsigned long *ret_nr_dirty,
-				      unsigned long *ret_nr_unqueued_dirty,
-				      unsigned long *ret_nr_congested,
-				      unsigned long *ret_nr_writeback,
-				      unsigned long *ret_nr_immediate,
+				      struct reclaim_stat *stat,
 				      bool force_reclaim)
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
 	int pgactivate = 0;
-	unsigned long nr_unqueued_dirty = 0;
-	unsigned long nr_dirty = 0;
-	unsigned long nr_congested = 0;
-	unsigned long nr_reclaimed = 0;
-	unsigned long nr_writeback = 0;
-	unsigned long nr_immediate = 0;
+	unsigned nr_unqueued_dirty = 0;
+	unsigned nr_dirty = 0;
+	unsigned nr_congested = 0;
+	unsigned nr_reclaimed = 0;
+	unsigned nr_writeback = 0;
+	unsigned nr_immediate = 0;
 
 	cond_resched();
 
@@ -1266,11 +1270,13 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 	list_splice(&ret_pages, page_list);
 	count_vm_events(PGACTIVATE, pgactivate);
 
-	*ret_nr_dirty += nr_dirty;
-	*ret_nr_congested += nr_congested;
-	*ret_nr_unqueued_dirty += nr_unqueued_dirty;
-	*ret_nr_writeback += nr_writeback;
-	*ret_nr_immediate += nr_immediate;
+	if (stat) {
+		stat->nr_dirty = nr_dirty;
+		stat->nr_congested = nr_congested;
+		stat->nr_unqueued_dirty = nr_unqueued_dirty;
+		stat->nr_writeback = nr_writeback;
+		stat->nr_immediate = nr_immediate;
+	}
 	return nr_reclaimed;
 }
 
@@ -1282,7 +1288,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
 	};
-	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
+	unsigned long ret;
 	struct page *page, *next;
 	LIST_HEAD(clean_pages);
 
@@ -1295,8 +1301,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 	}
 
 	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
-			TTU_UNMAP|TTU_IGNORE_ACCESS,
-			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+			TTU_UNMAP|TTU_IGNORE_ACCESS, NULL, true);
 	list_splice(&clean_pages, page_list);
 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
 	return ret;
@@ -1696,11 +1701,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	unsigned long nr_scanned;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_taken;
-	unsigned long nr_dirty = 0;
-	unsigned long nr_congested = 0;
-	unsigned long nr_unqueued_dirty = 0;
-	unsigned long nr_writeback = 0;
-	unsigned long nr_immediate = 0;
+	struct reclaim_stat stat = {};
 	isolate_mode_t isolate_mode = 0;
 	int file = is_file_lru(lru);
 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -1745,9 +1746,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		return 0;
 
 	nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, TTU_UNMAP,
-				&nr_dirty, &nr_unqueued_dirty, &nr_congested,
-				&nr_writeback, &nr_immediate,
-				false);
+				&stat, false);
 
 	spin_lock_irq(&pgdat->lru_lock);
 
@@ -1781,7 +1780,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 	 * of pages under pages flagged for immediate reclaim and stall if any
 	 * are encountered in the nr_immediate check below.
 	 */
-	if (nr_writeback && nr_writeback == nr_taken)
+	if (stat.nr_writeback && stat.nr_writeback == nr_taken)
 		set_bit(PGDAT_WRITEBACK, &pgdat->flags);
 
 	/*
@@ -1793,7 +1792,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * Tag a zone as congested if all the dirty pages scanned were
 		 * backed by a congested BDI and wait_iff_congested will stall.
 		 */
-		if (nr_dirty && nr_dirty == nr_congested)
+		if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
 			set_bit(PGDAT_CONGESTED, &pgdat->flags);
 
 		/*
@@ -1802,7 +1801,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
 		 * reclaim context.
 		 */
-		if (nr_unqueued_dirty == nr_taken)
+		if (stat.nr_unqueued_dirty == nr_taken)
 			set_bit(PGDAT_DIRTY, &pgdat->flags);
 
 		/*
@@ -1811,7 +1810,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 		 * that pages are cycling through the LRU faster than
 		 * they are written so also forcibly stall.
 		 */
-		if (nr_immediate && current_may_throttle())
+		if (stat.nr_immediate && current_may_throttle())
 			congestion_wait(BLK_RW_ASYNC, HZ/10);
 	}
 
-- 
2.10.2

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-12-28 15:31 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-28 15:30 [PATCH 0/7] vm, vmscan: enahance vmscan tracepoints Michal Hocko
2016-12-28 15:30 ` Michal Hocko
2016-12-28 15:30 ` [PATCH 1/7] mm, vmscan: remove unused mm_vmscan_memcg_isolate Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  7:33   ` Hillf Danton
2016-12-29  7:33     ` Hillf Danton
2016-12-28 15:30 ` [PATCH 2/7] mm, vmscan: add active list aging tracepoint Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  5:33   ` Minchan Kim
2016-12-29  5:33     ` Minchan Kim
2016-12-29  7:52     ` Michal Hocko
2016-12-29  7:52       ` Michal Hocko
2016-12-30  1:48       ` Minchan Kim
2016-12-30  1:48         ` Minchan Kim
2016-12-30  9:26         ` Michal Hocko
2016-12-30  9:26           ` Michal Hocko
2016-12-30  9:38           ` Hillf Danton
2016-12-30  9:38             ` Hillf Danton
2016-12-30 16:04           ` Minchan Kim
2016-12-30 16:04             ` Minchan Kim
2016-12-30 16:37             ` Michal Hocko
2016-12-30 16:37               ` Michal Hocko
2016-12-30 17:30               ` Michal Hocko
2016-12-30 17:30                 ` Michal Hocko
2017-01-03  5:03               ` Minchan Kim
2017-01-03  5:03                 ` Minchan Kim
2017-01-03  8:21                 ` Michal Hocko
2017-01-03  8:21                   ` Michal Hocko
2017-01-04  5:07                   ` Minchan Kim
2017-01-04  5:07                     ` Minchan Kim
2017-01-04  7:28                     ` Vlastimil Babka
2017-01-04  7:28                       ` Vlastimil Babka
2017-01-04  7:50                     ` Michal Hocko
2017-01-04  7:50                       ` Michal Hocko
2016-12-29  7:44   ` Hillf Danton
2016-12-29  7:44     ` Hillf Danton
2016-12-28 15:30 ` [PATCH 3/7] mm, vmscan: show the number of skipped pages in mm_vmscan_lru_isolate Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  5:53   ` Minchan Kim
2016-12-29  5:53     ` Minchan Kim
2016-12-29  7:49   ` Hillf Danton
2016-12-29  7:49     ` Hillf Danton
2017-01-03 17:21   ` Vlastimil Babka
2017-01-03 17:21     ` Vlastimil Babka
2017-01-03 20:43     ` Michal Hocko
2017-01-03 20:43       ` Michal Hocko
2016-12-28 15:30 ` [PATCH 4/7] mm, vmscan: show LRU name in mm_vmscan_lru_isolate tracepoint Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-28 15:50   ` Nikolay Borisov
2016-12-28 15:50     ` Nikolay Borisov
2016-12-28 16:00     ` Michal Hocko
2016-12-28 16:00       ` Michal Hocko
2016-12-28 16:40       ` Nikolay Borisov
2016-12-28 16:40         ` Nikolay Borisov
2016-12-28 16:49         ` Michal Hocko
2016-12-28 16:49           ` Michal Hocko
2016-12-29  6:02   ` Minchan Kim
2016-12-29  6:02     ` Minchan Kim
2016-12-29  7:56     ` Michal Hocko
2016-12-29  7:56       ` Michal Hocko
2016-12-30  1:56       ` Minchan Kim
2016-12-30  1:56         ` Minchan Kim
2016-12-30  9:33         ` Michal Hocko
2016-12-30  9:33           ` Michal Hocko
2016-12-29  7:53   ` Hillf Danton
2016-12-29  7:53     ` Hillf Danton
2017-01-03 17:08   ` Vlastimil Babka
2017-01-03 17:08     ` Vlastimil Babka
2017-01-03 20:47     ` Michal Hocko
2017-01-03 20:47       ` Michal Hocko
2017-01-03 20:52       ` Michal Hocko
2017-01-03 20:52         ` Michal Hocko
2017-01-03 21:24         ` Michal Hocko
2017-01-03 21:24           ` Michal Hocko
2017-01-03 21:40           ` Vlastimil Babka
2017-01-03 21:40             ` Vlastimil Babka
2017-01-03 21:48             ` Michal Hocko
2017-01-03 21:48               ` Michal Hocko
2016-12-28 15:30 ` Michal Hocko [this message]
2016-12-28 15:30   ` [PATCH 5/7] mm, vmscan: extract shrink_page_list reclaim counters into a struct Michal Hocko
2016-12-29  8:00   ` Hillf Danton
2016-12-29  8:00     ` Hillf Danton
2016-12-28 15:30 ` [PATCH 6/7] mm, vmscan: enhance mm_vmscan_lru_shrink_inactive tracepoint Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  8:05   ` Hillf Danton
2016-12-29  8:05     ` Hillf Danton
2016-12-28 15:30 ` [PATCH 7/7] mm, vmscan: add mm_vmscan_inactive_list_is_low tracepoint Michal Hocko
2016-12-28 15:30   ` Michal Hocko
2016-12-29  8:19   ` Hillf Danton
2016-12-29  8:19     ` Hillf Danton
2016-12-30  9:11 ` [PATCH 0/7] vm, vmscan: enahance vmscan tracepoints Mel Gorman
2016-12-30  9:11   ` Mel Gorman
2016-12-30  9:36   ` Michal Hocko
2016-12-30  9:36     ` Michal Hocko
2016-12-30 10:20     ` Mel Gorman
2016-12-30 10:20       ` Mel Gorman
2017-01-04 10:19 [PATCH 0/7 v2] " Michal Hocko
2017-01-04 10:19 ` [PATCH 5/7] mm, vmscan: extract shrink_page_list reclaim counters into a struct Michal Hocko
2017-01-04 10:19   ` Michal Hocko
2017-01-04 14:51   ` Vlastimil Babka
2017-01-04 14:51     ` Vlastimil Babka
2017-01-04 15:09     ` Michal Hocko
2017-01-04 15:09       ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161228153032.10821-6-mhocko@kernel.org \
    --to=mhocko@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=riel@redhat.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.