All of lore.kernel.org
 help / color / mirror / Atom feed
From: Minchan Kim <minchan@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Rik van Riel <riel@redhat.com>, Minchan Kim <minchan@kernel.org>
Subject: [PATCH v1 1/3] mm: vmscan: refactoring force_reclaim
Date: Mon, 13 Jun 2016 16:50:56 +0900	[thread overview]
Message-ID: <1465804259-29345-2-git-send-email-minchan@kernel.org> (raw)
In-Reply-To: <1465804259-29345-1-git-send-email-minchan@kernel.org>

local variable references has PAGEREF_RECLAIM_CLEAN default value
so that it can prevent dirty page writeout effectively for reclaim
latency(introduced by [1]) if force_reclaim is true.

However, it is irony because user wanted *force reclaim* but
we prohibit dirty page writeout.

Let's make it more clear.
This patch is refactoring so it shouldn't change any behavior.

[1] <02c6de8d757c, mm: cma: discard clean pages during contiguous
allocation instead of migration>

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 mm/vmscan.c | 35 ++++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 21d417ccff69..05119983c92e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -95,6 +95,9 @@ struct scan_control {
 	/* Can cgroups be reclaimed below their normal consumption range? */
 	unsigned int may_thrash:1;
 
+	/* reclaim pages unconditionally */
+	unsigned int force_reclaim:1;
+
 	unsigned int hibernation_mode:1;
 
 	/* One of the zones is ready for compaction */
@@ -783,6 +786,7 @@ void putback_lru_page(struct page *page)
 }
 
 enum page_references {
+	PAGEREF_NONE,
 	PAGEREF_RECLAIM,
 	PAGEREF_RECLAIM_CLEAN,
 	PAGEREF_KEEP,
@@ -884,8 +888,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 				      unsigned long *ret_nr_unqueued_dirty,
 				      unsigned long *ret_nr_congested,
 				      unsigned long *ret_nr_writeback,
-				      unsigned long *ret_nr_immediate,
-				      bool force_reclaim)
+				      unsigned long *ret_nr_immediate)
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
@@ -903,7 +906,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		struct address_space *mapping;
 		struct page *page;
 		int may_enter_fs;
-		enum page_references references = PAGEREF_RECLAIM_CLEAN;
+		enum page_references references = PAGEREF_NONE;
 		bool dirty, writeback;
 		bool lazyfree = false;
 		int ret = SWAP_SUCCESS;
@@ -927,13 +930,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		if (!sc->may_unmap && page_mapped(page))
 			goto keep_locked;
 
-		/* Double the slab pressure for mapped and swapcache pages */
-		if (page_mapped(page) || PageSwapCache(page))
-			sc->nr_scanned++;
-
 		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
 			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
+		if (sc->force_reclaim)
+			goto force_reclaim;
+
+		/* Double the slab pressure for mapped and swapcache pages */
+		if (page_mapped(page) || PageSwapCache(page))
+			sc->nr_scanned++;
 		/*
 		 * The number of dirty pages determines if a zone is marked
 		 * reclaim_congested which affects wait_iff_congested. kswapd
@@ -1028,19 +1033,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			}
 		}
 
-		if (!force_reclaim)
-			references = page_check_references(page, sc);
+		references = page_check_references(page, sc);
 
 		switch (references) {
 		case PAGEREF_ACTIVATE:
 			goto activate_locked;
 		case PAGEREF_KEEP:
 			goto keep_locked;
-		case PAGEREF_RECLAIM:
-		case PAGEREF_RECLAIM_CLEAN:
-			; /* try to reclaim the page below */
+		default:
+			break; /* try to reclaim the page below */
 		}
 
+force_reclaim:
 		/*
 		 * Anonymous process memory has backing store?
 		 * Try to allocate it some swap space here.
@@ -1253,6 +1257,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 		.gfp_mask = GFP_KERNEL,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
+		.may_writepage = 0,
+		.force_reclaim = 1,
 	};
 	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
 	struct page *page, *next;
@@ -1268,7 +1274,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 
 	ret = shrink_page_list(&clean_pages, zone, &sc,
 			TTU_UNMAP|TTU_IGNORE_ACCESS,
-			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5);
 	list_splice(&clean_pages, page_list);
 	mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
 	return ret;
@@ -1623,8 +1629,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 
 	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
 				&nr_dirty, &nr_unqueued_dirty, &nr_congested,
-				&nr_writeback, &nr_immediate,
-				false);
+				&nr_writeback, &nr_immediate);
 
 	spin_lock_irq(&zone->lru_lock);
 
-- 
1.9.1

WARNING: multiple messages have this Message-ID (diff)
From: Minchan Kim <minchan@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Rik van Riel <riel@redhat.com>, Minchan Kim <minchan@kernel.org>
Subject: [PATCH v1 1/3] mm: vmscan: refactoring force_reclaim
Date: Mon, 13 Jun 2016 16:50:56 +0900	[thread overview]
Message-ID: <1465804259-29345-2-git-send-email-minchan@kernel.org> (raw)
In-Reply-To: <1465804259-29345-1-git-send-email-minchan@kernel.org>

local variable references has PAGEREF_RECLAIM_CLEAN default value
so that it can prevent dirty page writeout effectively for reclaim
latency(introduced by [1]) if force_reclaim is true.

However, it is irony because user wanted *force reclaim* but
we prohibit dirty page writeout.

Let's make it more clear.
This patch is refactoring so it shouldn't change any behavior.

[1] <02c6de8d757c, mm: cma: discard clean pages during contiguous
allocation instead of migration>

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 mm/vmscan.c | 35 ++++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 21d417ccff69..05119983c92e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -95,6 +95,9 @@ struct scan_control {
 	/* Can cgroups be reclaimed below their normal consumption range? */
 	unsigned int may_thrash:1;
 
+	/* reclaim pages unconditionally */
+	unsigned int force_reclaim:1;
+
 	unsigned int hibernation_mode:1;
 
 	/* One of the zones is ready for compaction */
@@ -783,6 +786,7 @@ void putback_lru_page(struct page *page)
 }
 
 enum page_references {
+	PAGEREF_NONE,
 	PAGEREF_RECLAIM,
 	PAGEREF_RECLAIM_CLEAN,
 	PAGEREF_KEEP,
@@ -884,8 +888,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 				      unsigned long *ret_nr_unqueued_dirty,
 				      unsigned long *ret_nr_congested,
 				      unsigned long *ret_nr_writeback,
-				      unsigned long *ret_nr_immediate,
-				      bool force_reclaim)
+				      unsigned long *ret_nr_immediate)
 {
 	LIST_HEAD(ret_pages);
 	LIST_HEAD(free_pages);
@@ -903,7 +906,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		struct address_space *mapping;
 		struct page *page;
 		int may_enter_fs;
-		enum page_references references = PAGEREF_RECLAIM_CLEAN;
+		enum page_references references = PAGEREF_NONE;
 		bool dirty, writeback;
 		bool lazyfree = false;
 		int ret = SWAP_SUCCESS;
@@ -927,13 +930,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 		if (!sc->may_unmap && page_mapped(page))
 			goto keep_locked;
 
-		/* Double the slab pressure for mapped and swapcache pages */
-		if (page_mapped(page) || PageSwapCache(page))
-			sc->nr_scanned++;
-
 		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
 			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
+		if (sc->force_reclaim)
+			goto force_reclaim;
+
+		/* Double the slab pressure for mapped and swapcache pages */
+		if (page_mapped(page) || PageSwapCache(page))
+			sc->nr_scanned++;
 		/*
 		 * The number of dirty pages determines if a zone is marked
 		 * reclaim_congested which affects wait_iff_congested. kswapd
@@ -1028,19 +1033,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 			}
 		}
 
-		if (!force_reclaim)
-			references = page_check_references(page, sc);
+		references = page_check_references(page, sc);
 
 		switch (references) {
 		case PAGEREF_ACTIVATE:
 			goto activate_locked;
 		case PAGEREF_KEEP:
 			goto keep_locked;
-		case PAGEREF_RECLAIM:
-		case PAGEREF_RECLAIM_CLEAN:
-			; /* try to reclaim the page below */
+		default:
+			break; /* try to reclaim the page below */
 		}
 
+force_reclaim:
 		/*
 		 * Anonymous process memory has backing store?
 		 * Try to allocate it some swap space here.
@@ -1253,6 +1257,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 		.gfp_mask = GFP_KERNEL,
 		.priority = DEF_PRIORITY,
 		.may_unmap = 1,
+		.may_writepage = 0,
+		.force_reclaim = 1,
 	};
 	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
 	struct page *page, *next;
@@ -1268,7 +1274,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 
 	ret = shrink_page_list(&clean_pages, zone, &sc,
 			TTU_UNMAP|TTU_IGNORE_ACCESS,
-			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5);
 	list_splice(&clean_pages, page_list);
 	mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
 	return ret;
@@ -1623,8 +1629,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 
 	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
 				&nr_dirty, &nr_unqueued_dirty, &nr_congested,
-				&nr_writeback, &nr_immediate,
-				false);
+				&nr_writeback, &nr_immediate);
 
 	spin_lock_irq(&zone->lru_lock);
 
-- 
1.9.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2016-06-13  7:51 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-13  7:50 [PATCH v1 0/3] per-process reclaim Minchan Kim
2016-06-13  7:50 ` Minchan Kim
2016-06-13  7:50 ` Minchan Kim [this message]
2016-06-13  7:50   ` [PATCH v1 1/3] mm: vmscan: refactoring force_reclaim Minchan Kim
2016-06-13  7:50 ` [PATCH v1 2/3] mm: vmscan: shrink_page_list with multiple zones Minchan Kim
2016-06-13  7:50   ` Minchan Kim
2016-06-13  7:50 ` [PATCH v1 3/3] mm: per-process reclaim Minchan Kim
2016-06-13  7:50   ` Minchan Kim
2016-06-13 15:06   ` Johannes Weiner
2016-06-13 15:06     ` Johannes Weiner
2016-06-15  0:40     ` Minchan Kim
2016-06-15  0:40       ` Minchan Kim
2016-06-16 11:07       ` Michal Hocko
2016-06-16 11:07         ` Michal Hocko
2016-06-16 14:41       ` Johannes Weiner
2016-06-16 14:41         ` Johannes Weiner
2016-06-17  6:43         ` Minchan Kim
2016-06-17  6:43           ` Minchan Kim
2016-06-17  7:24     ` Balbir Singh
2016-06-17  7:24       ` Balbir Singh
2016-06-17  7:57       ` Vinayak Menon
2016-06-17  7:57         ` Vinayak Menon
2016-06-13 17:06   ` Rik van Riel
2016-06-15  1:01     ` Minchan Kim
2016-06-15  1:01       ` Minchan Kim
2016-06-13 11:50 ` [PATCH v1 0/3] " Chen Feng
2016-06-13 11:50   ` Chen Feng
2016-06-13 12:22   ` ZhaoJunmin Zhao(Junmin)
2016-06-13 12:22     ` ZhaoJunmin Zhao(Junmin)
2016-06-15  0:43   ` Minchan Kim
2016-06-15  0:43     ` Minchan Kim
2016-06-13 13:29 ` Vinayak Menon
2016-06-13 13:29   ` Vinayak Menon
2016-06-15  0:57   ` Minchan Kim
2016-06-15  0:57     ` Minchan Kim
2016-06-16  4:21     ` Vinayak Menon
2016-06-16  4:21       ` Vinayak Menon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1465804259-29345-2-git-send-email-minchan@kernel.org \
    --to=minchan@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=riel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.