All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-09 13:27 ` Chen Yucong
  0 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-09 13:27 UTC (permalink / raw)
  To: mgorman; +Cc: mhocko, hannes, akpm, linux-mm, linux-kernel, Chen Yucong

Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
original scan targets introduces extra 40 bytes on the stack. This patch
is able to avoid this situation and the call to memcpy(). At the same time,
it does not change the relative design idea.

ratio = original_nr_file / original_nr_anon;

If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
 x = nr_file - ratio * nr_anon;

if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
 x = nr_anon - nr_file / ratio;

Signed-off-by: Chen Yucong <slaoub@gmail.com>
---
 mm/vmscan.c |   28 +++++++++-------------------
 1 file changed, 9 insertions(+), 19 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a8ffe4e..daaf89c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2057,8 +2057,7 @@ out:
 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 {
 	unsigned long nr[NR_LRU_LISTS];
-	unsigned long targets[NR_LRU_LISTS];
-	unsigned long nr_to_scan;
+	unsigned long nr_to_scan, ratio;
 	enum lru_list lru;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
@@ -2067,8 +2066,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 
 	get_scan_count(lruvec, sc, nr);
 
-	/* Record the original scan target for proportional adjustments later */
-	memcpy(targets, nr, sizeof(nr));
+	ratio = (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + 1) /
+			(nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + 1);
 
 	/*
 	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
@@ -2088,7 +2087,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
 					nr[LRU_INACTIVE_FILE]) {
 		unsigned long nr_anon, nr_file, percentage;
-		unsigned long nr_scanned;
 
 		for_each_evictable_lru(lru) {
 			if (nr[lru]) {
@@ -2123,15 +2121,13 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 			break;
 
 		if (nr_file > nr_anon) {
-			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
-						targets[LRU_ACTIVE_ANON] + 1;
+			nr_to_scan = nr_file - ratio * nr_anon;
+			percentage = nr[LRU_FILE] * 100 / nr_file;
 			lru = LRU_BASE;
-			percentage = nr_anon * 100 / scan_target;
 		} else {
-			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
-						targets[LRU_ACTIVE_FILE] + 1;
+			nr_to_scan = nr_anon - nr_file / ratio;
+			percentage = nr[LRU_BASE] * 100 / nr_anon;
 			lru = LRU_FILE;
-			percentage = nr_file * 100 / scan_target;
 		}
 
 		/* Stop scanning the smaller of the LRU */
@@ -2143,14 +2139,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 		 * scan target and the percentage scanning already complete
 		 */
 		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
-
-		lru += LRU_ACTIVE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
+		nr[lru] = nr_to_scan * percentage / 100;
+		nr[lru + LRU_ACTIVE] = nr_to_scan - nr[lru];
 
 		scan_adjusted = true;
 	}
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-09 13:27 ` Chen Yucong
  0 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-09 13:27 UTC (permalink / raw)
  To: mgorman; +Cc: mhocko, hannes, akpm, linux-mm, linux-kernel, Chen Yucong

Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
original scan targets introduces extra 40 bytes on the stack. This patch
is able to avoid this situation and the call to memcpy(). At the same time,
it does not change the relative design idea.

ratio = original_nr_file / original_nr_anon;

If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
 x = nr_file - ratio * nr_anon;

if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
 x = nr_anon - nr_file / ratio;

Signed-off-by: Chen Yucong <slaoub@gmail.com>
---
 mm/vmscan.c |   28 +++++++++-------------------
 1 file changed, 9 insertions(+), 19 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a8ffe4e..daaf89c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2057,8 +2057,7 @@ out:
 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 {
 	unsigned long nr[NR_LRU_LISTS];
-	unsigned long targets[NR_LRU_LISTS];
-	unsigned long nr_to_scan;
+	unsigned long nr_to_scan, ratio;
 	enum lru_list lru;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
@@ -2067,8 +2066,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 
 	get_scan_count(lruvec, sc, nr);
 
-	/* Record the original scan target for proportional adjustments later */
-	memcpy(targets, nr, sizeof(nr));
+	ratio = (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + 1) /
+			(nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + 1);
 
 	/*
 	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
@@ -2088,7 +2087,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
 					nr[LRU_INACTIVE_FILE]) {
 		unsigned long nr_anon, nr_file, percentage;
-		unsigned long nr_scanned;
 
 		for_each_evictable_lru(lru) {
 			if (nr[lru]) {
@@ -2123,15 +2121,13 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 			break;
 
 		if (nr_file > nr_anon) {
-			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
-						targets[LRU_ACTIVE_ANON] + 1;
+			nr_to_scan = nr_file - ratio * nr_anon;
+			percentage = nr[LRU_FILE] * 100 / nr_file;
 			lru = LRU_BASE;
-			percentage = nr_anon * 100 / scan_target;
 		} else {
-			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
-						targets[LRU_ACTIVE_FILE] + 1;
+			nr_to_scan = nr_anon - nr_file / ratio;
+			percentage = nr[LRU_BASE] * 100 / nr_anon;
 			lru = LRU_FILE;
-			percentage = nr_file * 100 / scan_target;
 		}
 
 		/* Stop scanning the smaller of the LRU */
@@ -2143,14 +2139,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 		 * scan target and the percentage scanning already complete
 		 */
 		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
-
-		lru += LRU_ACTIVE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
+		nr[lru] = nr_to_scan * percentage / 100;
+		nr[lru + LRU_ACTIVE] = nr_to_scan - nr[lru];
 
 		scan_adjusted = true;
 	}
-- 
1.7.10.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-09 13:27 ` Chen Yucong
@ 2014-06-09 23:24   ` Minchan Kim
  -1 siblings, 0 replies; 28+ messages in thread
From: Minchan Kim @ 2014-06-09 23:24 UTC (permalink / raw)
  To: Chen Yucong; +Cc: mgorman, mhocko, hannes, akpm, linux-mm, linux-kernel

Hello,

On Mon, Jun 09, 2014 at 09:27:16PM +0800, Chen Yucong wrote:
> Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> original scan targets introduces extra 40 bytes on the stack. This patch
> is able to avoid this situation and the call to memcpy(). At the same time,
> it does not change the relative design idea.
> 
> ratio = original_nr_file / original_nr_anon;
> 
> If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
>  x = nr_file - ratio * nr_anon;
> 
> if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
>  x = nr_anon - nr_file / ratio;

Nice cleanup!

Below one nitpick.

> 
> Signed-off-by: Chen Yucong <slaoub@gmail.com>
> ---
>  mm/vmscan.c |   28 +++++++++-------------------
>  1 file changed, 9 insertions(+), 19 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a8ffe4e..daaf89c 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2057,8 +2057,7 @@ out:
>  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  {
>  	unsigned long nr[NR_LRU_LISTS];
> -	unsigned long targets[NR_LRU_LISTS];
> -	unsigned long nr_to_scan;
> +	unsigned long nr_to_scan, ratio;
>  	enum lru_list lru;
>  	unsigned long nr_reclaimed = 0;
>  	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
> @@ -2067,8 +2066,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  
>  	get_scan_count(lruvec, sc, nr);
>  
> -	/* Record the original scan target for proportional adjustments later */
> -	memcpy(targets, nr, sizeof(nr));
> +	ratio = (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + 1) /
> +			(nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + 1);
>  
>  	/*
>  	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
> @@ -2088,7 +2087,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
>  					nr[LRU_INACTIVE_FILE]) {
>  		unsigned long nr_anon, nr_file, percentage;
> -		unsigned long nr_scanned;
>  
>  		for_each_evictable_lru(lru) {
>  			if (nr[lru]) {
> @@ -2123,15 +2121,13 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  			break;
>  
>  		if (nr_file > nr_anon) {
> -			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
> -						targets[LRU_ACTIVE_ANON] + 1;
> +			nr_to_scan = nr_file - ratio * nr_anon;
> +			percentage = nr[LRU_FILE] * 100 / nr_file;
>  			lru = LRU_BASE;
> -			percentage = nr_anon * 100 / scan_target;
>  		} else {
> -			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
> -						targets[LRU_ACTIVE_FILE] + 1;
> +			nr_to_scan = nr_anon - nr_file / ratio;
> +			percentage = nr[LRU_BASE] * 100 / nr_anon;

If both nr_file and nr_anon are zero, then the nr_anon could be zero
if HugePage are reclaimed so that it could pass the below check

        if (nr_reclaimed < nr_to_reclaim || scan_adjusted)


>  			lru = LRU_FILE;
> -			percentage = nr_file * 100 / scan_target;
>  		}
>  
>  		/* Stop scanning the smaller of the LRU */
> @@ -2143,14 +2139,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  		 * scan target and the percentage scanning already complete
>  		 */
>  		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
> -		nr_scanned = targets[lru] - nr[lru];
> -		nr[lru] = targets[lru] * (100 - percentage) / 100;
> -		nr[lru] -= min(nr[lru], nr_scanned);
> -
> -		lru += LRU_ACTIVE;
> -		nr_scanned = targets[lru] - nr[lru];
> -		nr[lru] = targets[lru] * (100 - percentage) / 100;
> -		nr[lru] -= min(nr[lru], nr_scanned);
> +		nr[lru] = nr_to_scan * percentage / 100;
> +		nr[lru + LRU_ACTIVE] = nr_to_scan - nr[lru];
>  
>  		scan_adjusted = true;
>  	}
> -- 
> 1.7.10.4
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-09 23:24   ` Minchan Kim
  0 siblings, 0 replies; 28+ messages in thread
From: Minchan Kim @ 2014-06-09 23:24 UTC (permalink / raw)
  To: Chen Yucong; +Cc: mgorman, mhocko, hannes, akpm, linux-mm, linux-kernel

Hello,

On Mon, Jun 09, 2014 at 09:27:16PM +0800, Chen Yucong wrote:
> Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> original scan targets introduces extra 40 bytes on the stack. This patch
> is able to avoid this situation and the call to memcpy(). At the same time,
> it does not change the relative design idea.
> 
> ratio = original_nr_file / original_nr_anon;
> 
> If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
>  x = nr_file - ratio * nr_anon;
> 
> if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
>  x = nr_anon - nr_file / ratio;

Nice cleanup!

Below one nitpick.

> 
> Signed-off-by: Chen Yucong <slaoub@gmail.com>
> ---
>  mm/vmscan.c |   28 +++++++++-------------------
>  1 file changed, 9 insertions(+), 19 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a8ffe4e..daaf89c 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2057,8 +2057,7 @@ out:
>  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  {
>  	unsigned long nr[NR_LRU_LISTS];
> -	unsigned long targets[NR_LRU_LISTS];
> -	unsigned long nr_to_scan;
> +	unsigned long nr_to_scan, ratio;
>  	enum lru_list lru;
>  	unsigned long nr_reclaimed = 0;
>  	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
> @@ -2067,8 +2066,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  
>  	get_scan_count(lruvec, sc, nr);
>  
> -	/* Record the original scan target for proportional adjustments later */
> -	memcpy(targets, nr, sizeof(nr));
> +	ratio = (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + 1) /
> +			(nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + 1);
>  
>  	/*
>  	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
> @@ -2088,7 +2087,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
>  					nr[LRU_INACTIVE_FILE]) {
>  		unsigned long nr_anon, nr_file, percentage;
> -		unsigned long nr_scanned;
>  
>  		for_each_evictable_lru(lru) {
>  			if (nr[lru]) {
> @@ -2123,15 +2121,13 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  			break;
>  
>  		if (nr_file > nr_anon) {
> -			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
> -						targets[LRU_ACTIVE_ANON] + 1;
> +			nr_to_scan = nr_file - ratio * nr_anon;
> +			percentage = nr[LRU_FILE] * 100 / nr_file;
>  			lru = LRU_BASE;
> -			percentage = nr_anon * 100 / scan_target;
>  		} else {
> -			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
> -						targets[LRU_ACTIVE_FILE] + 1;
> +			nr_to_scan = nr_anon - nr_file / ratio;
> +			percentage = nr[LRU_BASE] * 100 / nr_anon;

If both nr_file and nr_anon are zero, then the nr_anon could be zero
if HugePage are reclaimed so that it could pass the below check

        if (nr_reclaimed < nr_to_reclaim || scan_adjusted)


>  			lru = LRU_FILE;
> -			percentage = nr_file * 100 / scan_target;
>  		}
>  
>  		/* Stop scanning the smaller of the LRU */
> @@ -2143,14 +2139,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  		 * scan target and the percentage scanning already complete
>  		 */
>  		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
> -		nr_scanned = targets[lru] - nr[lru];
> -		nr[lru] = targets[lru] * (100 - percentage) / 100;
> -		nr[lru] -= min(nr[lru], nr_scanned);
> -
> -		lru += LRU_ACTIVE;
> -		nr_scanned = targets[lru] - nr[lru];
> -		nr[lru] = targets[lru] * (100 - percentage) / 100;
> -		nr[lru] -= min(nr[lru], nr_scanned);
> +		nr[lru] = nr_to_scan * percentage / 100;
> +		nr[lru + LRU_ACTIVE] = nr_to_scan - nr[lru];
>  
>  		scan_adjusted = true;
>  	}
> -- 
> 1.7.10.4
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-09 23:24   ` Minchan Kim
@ 2014-06-10  0:10     ` Chen Yucong
  -1 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-10  0:10 UTC (permalink / raw)
  To: Minchan Kim; +Cc: mgorman, mhocko, hannes, akpm, linux-mm, linux-kernel

On Tue, 2014-06-10 at 08:24 +0900, Minchan Kim wrote:
> Hello,
> 
> On Mon, Jun 09, 2014 at 09:27:16PM +0800, Chen Yucong wrote:
> > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > original scan targets introduces extra 40 bytes on the stack. This patch
> > is able to avoid this situation and the call to memcpy(). At the same time,
> > it does not change the relative design idea.
> > 
> > ratio = original_nr_file / original_nr_anon;
> > 
> > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> >  x = nr_file - ratio * nr_anon;
> > 
> > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> >  x = nr_anon - nr_file / ratio;
> 
> Nice cleanup!
> 
> Below one nitpick.
> 

> 
> If both nr_file and nr_anon are zero, then the nr_anon could be zero
> if HugePage are reclaimed so that it could pass the below check
> 
>         if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
> 
> 
The Mel Gorman's patch has already handled this situation you're
describing. It's called:
 
mm: vmscan: use proportional scanning during direct reclaim and full
scan at DEF_PRIORITY

thx!
cyc


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-10  0:10     ` Chen Yucong
  0 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-10  0:10 UTC (permalink / raw)
  To: Minchan Kim; +Cc: mgorman, mhocko, hannes, akpm, linux-mm, linux-kernel

On Tue, 2014-06-10 at 08:24 +0900, Minchan Kim wrote:
> Hello,
> 
> On Mon, Jun 09, 2014 at 09:27:16PM +0800, Chen Yucong wrote:
> > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > original scan targets introduces extra 40 bytes on the stack. This patch
> > is able to avoid this situation and the call to memcpy(). At the same time,
> > it does not change the relative design idea.
> > 
> > ratio = original_nr_file / original_nr_anon;
> > 
> > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> >  x = nr_file - ratio * nr_anon;
> > 
> > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> >  x = nr_anon - nr_file / ratio;
> 
> Nice cleanup!
> 
> Below one nitpick.
> 

> 
> If both nr_file and nr_anon are zero, then the nr_anon could be zero
> if HugePage are reclaimed so that it could pass the below check
> 
>         if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
> 
> 
The Mel Gorman's patch has already handled this situation you're
describing. It's called:
 
mm: vmscan: use proportional scanning during direct reclaim and full
scan at DEF_PRIORITY

thx!
cyc

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-10  0:10     ` Chen Yucong
@ 2014-06-10  0:24       ` Minchan Kim
  -1 siblings, 0 replies; 28+ messages in thread
From: Minchan Kim @ 2014-06-10  0:24 UTC (permalink / raw)
  To: Chen Yucong; +Cc: mgorman, mhocko, hannes, akpm, linux-mm, linux-kernel

On Tue, Jun 10, 2014 at 08:10:51AM +0800, Chen Yucong wrote:
> On Tue, 2014-06-10 at 08:24 +0900, Minchan Kim wrote:
> > Hello,
> > 
> > On Mon, Jun 09, 2014 at 09:27:16PM +0800, Chen Yucong wrote:
> > > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > > original scan targets introduces extra 40 bytes on the stack. This patch
> > > is able to avoid this situation and the call to memcpy(). At the same time,
> > > it does not change the relative design idea.
> > > 
> > > ratio = original_nr_file / original_nr_anon;
> > > 
> > > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> > >  x = nr_file - ratio * nr_anon;
> > > 
> > > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> > >  x = nr_anon - nr_file / ratio;
> > 
> > Nice cleanup!
> > 
> > Below one nitpick.
> > 
> 
> > 
> > If both nr_file and nr_anon are zero, then the nr_anon could be zero
> > if HugePage are reclaimed so that it could pass the below check
> > 
> >         if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
> > 
> > 
> The Mel Gorman's patch has already handled this situation you're
> describing. It's called:
>  
> mm: vmscan: use proportional scanning during direct reclaim and full
> scan at DEF_PRIORITY

It seems I was far away from vmscan.c for a while.
Thanks for the pointing out. So,

Acked-by: Minchan Kim <minchan@kernel.org>

-- 
Kind regards,
Minchan Kim

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-10  0:24       ` Minchan Kim
  0 siblings, 0 replies; 28+ messages in thread
From: Minchan Kim @ 2014-06-10  0:24 UTC (permalink / raw)
  To: Chen Yucong; +Cc: mgorman, mhocko, hannes, akpm, linux-mm, linux-kernel

On Tue, Jun 10, 2014 at 08:10:51AM +0800, Chen Yucong wrote:
> On Tue, 2014-06-10 at 08:24 +0900, Minchan Kim wrote:
> > Hello,
> > 
> > On Mon, Jun 09, 2014 at 09:27:16PM +0800, Chen Yucong wrote:
> > > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > > original scan targets introduces extra 40 bytes on the stack. This patch
> > > is able to avoid this situation and the call to memcpy(). At the same time,
> > > it does not change the relative design idea.
> > > 
> > > ratio = original_nr_file / original_nr_anon;
> > > 
> > > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> > >  x = nr_file - ratio * nr_anon;
> > > 
> > > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> > >  x = nr_anon - nr_file / ratio;
> > 
> > Nice cleanup!
> > 
> > Below one nitpick.
> > 
> 
> > 
> > If both nr_file and nr_anon are zero, then the nr_anon could be zero
> > if HugePage are reclaimed so that it could pass the below check
> > 
> >         if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
> > 
> > 
> The Mel Gorman's patch has already handled this situation you're
> describing. It's called:
>  
> mm: vmscan: use proportional scanning during direct reclaim and full
> scan at DEF_PRIORITY

It seems I was far away from vmscan.c for a while.
Thanks for the pointing out. So,

Acked-by: Minchan Kim <minchan@kernel.org>

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-09 13:27 ` Chen Yucong
@ 2014-06-10 23:33   ` Andrew Morton
  -1 siblings, 0 replies; 28+ messages in thread
From: Andrew Morton @ 2014-06-10 23:33 UTC (permalink / raw)
  To: Chen Yucong; +Cc: mgorman, mhocko, hannes, linux-mm, linux-kernel

On Mon,  9 Jun 2014 21:27:16 +0800 Chen Yucong <slaoub@gmail.com> wrote:

> Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> original scan targets introduces extra 40 bytes on the stack. This patch
> is able to avoid this situation and the call to memcpy(). At the same time,
> it does not change the relative design idea.
> 
> ratio = original_nr_file / original_nr_anon;
> 
> If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
>  x = nr_file - ratio * nr_anon;
> 
> if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
>  x = nr_anon - nr_file / ratio;
> 
> ...
>

Are you sure this is an equivalent-to-before change?  If so, then I
can't immediately see why :(

> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2057,8 +2057,7 @@ out:
>  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  {
>  	unsigned long nr[NR_LRU_LISTS];
> -	unsigned long targets[NR_LRU_LISTS];
> -	unsigned long nr_to_scan;
> +	unsigned long nr_to_scan, ratio;
>  	enum lru_list lru;
>  	unsigned long nr_reclaimed = 0;
>  	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
> @@ -2067,8 +2066,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  
>  	get_scan_count(lruvec, sc, nr);
>  
> -	/* Record the original scan target for proportional adjustments later */
> -	memcpy(targets, nr, sizeof(nr));
> +	ratio = (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + 1) /
> +			(nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + 1);
>  
>  	/*
>  	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
> @@ -2088,7 +2087,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
>  					nr[LRU_INACTIVE_FILE]) {
>  		unsigned long nr_anon, nr_file, percentage;
> -		unsigned long nr_scanned;
>  
>  		for_each_evictable_lru(lru) {
>  			if (nr[lru]) {
> @@ -2123,15 +2121,13 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  			break;
>  
>  		if (nr_file > nr_anon) {
> -			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
> -						targets[LRU_ACTIVE_ANON] + 1;
> +			nr_to_scan = nr_file - ratio * nr_anon;
> +			percentage = nr[LRU_FILE] * 100 / nr_file;

here, nr_file and nr_anon are derived from the contents of nr[].  But
nr[] was modified in the for_each_evictable_lru() loop, so its contents
now may differ from what was in targets[]?

>  			lru = LRU_BASE;
> -			percentage = nr_anon * 100 / scan_target;
>  		} else {
> -			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
> -						targets[LRU_ACTIVE_FILE] + 1;
> +			nr_to_scan = nr_anon - nr_file / ratio;
> +			percentage = nr[LRU_BASE] * 100 / nr_anon;
>  			lru = LRU_FILE;
> -			percentage = nr_file * 100 / scan_target;
>  		}
>  
>  		/* Stop scanning the smaller of the LRU */
> ...


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-10 23:33   ` Andrew Morton
  0 siblings, 0 replies; 28+ messages in thread
From: Andrew Morton @ 2014-06-10 23:33 UTC (permalink / raw)
  To: Chen Yucong; +Cc: mgorman, mhocko, hannes, linux-mm, linux-kernel

On Mon,  9 Jun 2014 21:27:16 +0800 Chen Yucong <slaoub@gmail.com> wrote:

> Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> original scan targets introduces extra 40 bytes on the stack. This patch
> is able to avoid this situation and the call to memcpy(). At the same time,
> it does not change the relative design idea.
> 
> ratio = original_nr_file / original_nr_anon;
> 
> If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
>  x = nr_file - ratio * nr_anon;
> 
> if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
>  x = nr_anon - nr_file / ratio;
> 
> ...
>

Are you sure this is an equivalent-to-before change?  If so, then I
can't immediately see why :(

> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2057,8 +2057,7 @@ out:
>  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  {
>  	unsigned long nr[NR_LRU_LISTS];
> -	unsigned long targets[NR_LRU_LISTS];
> -	unsigned long nr_to_scan;
> +	unsigned long nr_to_scan, ratio;
>  	enum lru_list lru;
>  	unsigned long nr_reclaimed = 0;
>  	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
> @@ -2067,8 +2066,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  
>  	get_scan_count(lruvec, sc, nr);
>  
> -	/* Record the original scan target for proportional adjustments later */
> -	memcpy(targets, nr, sizeof(nr));
> +	ratio = (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + 1) /
> +			(nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + 1);
>  
>  	/*
>  	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
> @@ -2088,7 +2087,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
>  					nr[LRU_INACTIVE_FILE]) {
>  		unsigned long nr_anon, nr_file, percentage;
> -		unsigned long nr_scanned;
>  
>  		for_each_evictable_lru(lru) {
>  			if (nr[lru]) {
> @@ -2123,15 +2121,13 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>  			break;
>  
>  		if (nr_file > nr_anon) {
> -			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
> -						targets[LRU_ACTIVE_ANON] + 1;
> +			nr_to_scan = nr_file - ratio * nr_anon;
> +			percentage = nr[LRU_FILE] * 100 / nr_file;

here, nr_file and nr_anon are derived from the contents of nr[].  But
nr[] was modified in the for_each_evictable_lru() loop, so its contents
now may differ from what was in targets[]?

>  			lru = LRU_BASE;
> -			percentage = nr_anon * 100 / scan_target;
>  		} else {
> -			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
> -						targets[LRU_ACTIVE_FILE] + 1;
> +			nr_to_scan = nr_anon - nr_file / ratio;
> +			percentage = nr[LRU_BASE] * 100 / nr_anon;
>  			lru = LRU_FILE;
> -			percentage = nr_file * 100 / scan_target;
>  		}
>  
>  		/* Stop scanning the smaller of the LRU */
> ...

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-10 23:33   ` Andrew Morton
@ 2014-06-11  2:08     ` Chen Yucong
  -1 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-11  2:08 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mgorman, mhocko, hannes, linux-mm, linux-kernel

On Tue, 2014-06-10 at 16:33 -0700, Andrew Morton wrote:
> On Mon,  9 Jun 2014 21:27:16 +0800 Chen Yucong <slaoub@gmail.com> wrote:
> 
> > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > original scan targets introduces extra 40 bytes on the stack. This patch
> > is able to avoid this situation and the call to memcpy(). At the same time,
> > it does not change the relative design idea.
> > 
> > ratio = original_nr_file / original_nr_anon;
> > 
> > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> >  x = nr_file - ratio * nr_anon;
> > 
> > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> >  x = nr_anon - nr_file / ratio;
> > 
> > ...
> >
> 
> Are you sure this is an equivalent-to-before change?  If so, then I
> can't immediately see why :(
> 
The relative design idea is to keep

   ratio
	== scan_target[anon] : scan_target[file]
              == really_scanned_num[anon] : really_scanned_num[file]

The original implementation is 
   ratio 
        == (scan_target[anon] * percentage_anon) / 
           (scan_target[file] * percentage_file) 

To keep the original ratio, percentage_anon should equal to
percentage_file. In other word, we need to calculate the difference
value between percentage_anon and percentage_file, we also have to
record the original scan targets for this.

Instead, we can calculate the *ratio* at the beginning of
shrink_lruvec(). As a result, this can avoid introducing the extra 40
bytes.

In short, we have the same goal: keep the same *ratio* from beginning to
end.

thx!
cyc
 
       


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-11  2:08     ` Chen Yucong
  0 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-11  2:08 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mgorman, mhocko, hannes, linux-mm, linux-kernel

On Tue, 2014-06-10 at 16:33 -0700, Andrew Morton wrote:
> On Mon,  9 Jun 2014 21:27:16 +0800 Chen Yucong <slaoub@gmail.com> wrote:
> 
> > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > original scan targets introduces extra 40 bytes on the stack. This patch
> > is able to avoid this situation and the call to memcpy(). At the same time,
> > it does not change the relative design idea.
> > 
> > ratio = original_nr_file / original_nr_anon;
> > 
> > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> >  x = nr_file - ratio * nr_anon;
> > 
> > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> >  x = nr_anon - nr_file / ratio;
> > 
> > ...
> >
> 
> Are you sure this is an equivalent-to-before change?  If so, then I
> can't immediately see why :(
> 
The relative design idea is to keep

   ratio
	== scan_target[anon] : scan_target[file]
              == really_scanned_num[anon] : really_scanned_num[file]

The original implementation is 
   ratio 
        == (scan_target[anon] * percentage_anon) / 
           (scan_target[file] * percentage_file) 

To keep the original ratio, percentage_anon should equal to
percentage_file. In other word, we need to calculate the difference
value between percentage_anon and percentage_file, we also have to
record the original scan targets for this.

Instead, we can calculate the *ratio* at the beginning of
shrink_lruvec(). As a result, this can avoid introducing the extra 40
bytes.

In short, we have the same goal: keep the same *ratio* from beginning to
end.

thx!
cyc
 
       

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-10 23:33   ` Andrew Morton
@ 2014-06-11  3:21     ` Chen Yucong
  -1 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-11  3:21 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mgorman, mhocko, hannes, linux-mm, linux-kernel

On Tue, 2014-06-10 at 16:33 -0700, Andrew Morton wrote:
> >                       break;
> >  
> >               if (nr_file > nr_anon) {
> > -                     unsigned long scan_target =
> targets[LRU_INACTIVE_ANON] +
> >
> -                                             targets[LRU_ACTIVE_ANON]
> + 1;
> > +                     nr_to_scan = nr_file - ratio * nr_anon;
> > +                     percentage = nr[LRU_FILE] * 100 / nr_file;
> 
> here, nr_file and nr_anon are derived from the contents of nr[].  But
> nr[] was modified in the for_each_evictable_lru() loop, so its
> contents
> now may differ from what was in targets[]? 

nr_to_scan is used for recording the number of pages that should be
scanned to keep original *ratio*.

We can assume that the value of (nr_file > nr_anon) is true, nr_to_scan
should be distribute to nr[LRU_ACTIVE_FILE] and nr[LRU_INACTIVE_FILE] in
proportion.

    nr_file = nr[LRU_ACTIVE_FILE] + nr[LRU_INACTIVE_FILE];
    percentage = nr[LRU_FILE] / nr_file;

Note that in comparison with *old* percentage, the "new" percentage has
the different meaning. It is just used to divide nr_so_scan pages
appropriately.

thx!
cyc     


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-11  3:21     ` Chen Yucong
  0 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-11  3:21 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mgorman, mhocko, hannes, linux-mm, linux-kernel

On Tue, 2014-06-10 at 16:33 -0700, Andrew Morton wrote:
> >                       break;
> >  
> >               if (nr_file > nr_anon) {
> > -                     unsigned long scan_target =
> targets[LRU_INACTIVE_ANON] +
> >
> -                                             targets[LRU_ACTIVE_ANON]
> + 1;
> > +                     nr_to_scan = nr_file - ratio * nr_anon;
> > +                     percentage = nr[LRU_FILE] * 100 / nr_file;
> 
> here, nr_file and nr_anon are derived from the contents of nr[].  But
> nr[] was modified in the for_each_evictable_lru() loop, so its
> contents
> now may differ from what was in targets[]? 

nr_to_scan is used for recording the number of pages that should be
scanned to keep original *ratio*.

We can assume that the value of (nr_file > nr_anon) is true, nr_to_scan
should be distribute to nr[LRU_ACTIVE_FILE] and nr[LRU_INACTIVE_FILE] in
proportion.

    nr_file = nr[LRU_ACTIVE_FILE] + nr[LRU_INACTIVE_FILE];
    percentage = nr[LRU_FILE] / nr_file;

Note that in comparison with *old* percentage, the "new" percentage has
the different meaning. It is just used to divide nr_so_scan pages
appropriately.

thx!
cyc     

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-11  3:21     ` Chen Yucong
@ 2014-06-16  0:47       ` Hugh Dickins
  -1 siblings, 0 replies; 28+ messages in thread
From: Hugh Dickins @ 2014-06-16  0:47 UTC (permalink / raw)
  To: Chen Yucong
  Cc: Andrew Morton, mgorman, mhocko, hannes, linux-mm, linux-kernel

On Wed, 11 Jun 2014, Chen Yucong wrote:
> On Tue, 2014-06-10 at 16:33 -0700, Andrew Morton wrote:
> > >                       break;
> > >  
> > >               if (nr_file > nr_anon) {
> > > -                     unsigned long scan_target =
> > targets[LRU_INACTIVE_ANON] +
> > >
> > -                                             targets[LRU_ACTIVE_ANON]
> > + 1;
> > > +                     nr_to_scan = nr_file - ratio * nr_anon;
> > > +                     percentage = nr[LRU_FILE] * 100 / nr_file;
> > 
> > here, nr_file and nr_anon are derived from the contents of nr[].  But
> > nr[] was modified in the for_each_evictable_lru() loop, so its
> > contents
> > now may differ from what was in targets[]? 
> 
> nr_to_scan is used for recording the number of pages that should be
> scanned to keep original *ratio*.
> 
> We can assume that the value of (nr_file > nr_anon) is true, nr_to_scan
> should be distribute to nr[LRU_ACTIVE_FILE] and nr[LRU_INACTIVE_FILE] in
> proportion.
> 
>     nr_file = nr[LRU_ACTIVE_FILE] + nr[LRU_INACTIVE_FILE];
>     percentage = nr[LRU_FILE] / nr_file;
> 
> Note that in comparison with *old* percentage, the "new" percentage has
> the different meaning. It is just used to divide nr_so_scan pages
> appropriately.

[PATCH] mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch

I have not reviewed your logic at all, but soon hit a divide-by-zero
crash on mmotm: it needs some such fix as below.

Signed-off-by: Hugh Dickins <hughd@google.com>
---

 mm/vmscan.c |    5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

--- mmotm/mm/vmscan.c	2014-06-12 17:46:36.632008452 -0700
+++ linux/mm/vmscan.c	2014-06-12 18:55:18.832425713 -0700
@@ -2122,11 +2122,12 @@ static void shrink_lruvec(struct lruvec
 			nr_to_scan = nr_file - ratio * nr_anon;
 			percentage = nr[LRU_FILE] * 100 / nr_file;
 			lru = LRU_BASE;
-		} else {
+		} else if (ratio) {
 			nr_to_scan = nr_anon - nr_file / ratio;
 			percentage = nr[LRU_BASE] * 100 / nr_anon;
 			lru = LRU_FILE;
-		}
+		} else
+			break;
 
 		/* Stop scanning the smaller of the LRU */
 		nr[lru] = 0;

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-16  0:47       ` Hugh Dickins
  0 siblings, 0 replies; 28+ messages in thread
From: Hugh Dickins @ 2014-06-16  0:47 UTC (permalink / raw)
  To: Chen Yucong
  Cc: Andrew Morton, mgorman, mhocko, hannes, linux-mm, linux-kernel

On Wed, 11 Jun 2014, Chen Yucong wrote:
> On Tue, 2014-06-10 at 16:33 -0700, Andrew Morton wrote:
> > >                       break;
> > >  
> > >               if (nr_file > nr_anon) {
> > > -                     unsigned long scan_target =
> > targets[LRU_INACTIVE_ANON] +
> > >
> > -                                             targets[LRU_ACTIVE_ANON]
> > + 1;
> > > +                     nr_to_scan = nr_file - ratio * nr_anon;
> > > +                     percentage = nr[LRU_FILE] * 100 / nr_file;
> > 
> > here, nr_file and nr_anon are derived from the contents of nr[].  But
> > nr[] was modified in the for_each_evictable_lru() loop, so its
> > contents
> > now may differ from what was in targets[]? 
> 
> nr_to_scan is used for recording the number of pages that should be
> scanned to keep original *ratio*.
> 
> We can assume that the value of (nr_file > nr_anon) is true, nr_to_scan
> should be distribute to nr[LRU_ACTIVE_FILE] and nr[LRU_INACTIVE_FILE] in
> proportion.
> 
>     nr_file = nr[LRU_ACTIVE_FILE] + nr[LRU_INACTIVE_FILE];
>     percentage = nr[LRU_FILE] / nr_file;
> 
> Note that in comparison with *old* percentage, the "new" percentage has
> the different meaning. It is just used to divide nr_so_scan pages
> appropriately.

[PATCH] mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch

I have not reviewed your logic at all, but soon hit a divide-by-zero
crash on mmotm: it needs some such fix as below.

Signed-off-by: Hugh Dickins <hughd@google.com>
---

 mm/vmscan.c |    5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

--- mmotm/mm/vmscan.c	2014-06-12 17:46:36.632008452 -0700
+++ linux/mm/vmscan.c	2014-06-12 18:55:18.832425713 -0700
@@ -2122,11 +2122,12 @@ static void shrink_lruvec(struct lruvec
 			nr_to_scan = nr_file - ratio * nr_anon;
 			percentage = nr[LRU_FILE] * 100 / nr_file;
 			lru = LRU_BASE;
-		} else {
+		} else if (ratio) {
 			nr_to_scan = nr_anon - nr_file / ratio;
 			percentage = nr[LRU_BASE] * 100 / nr_anon;
 			lru = LRU_FILE;
-		}
+		} else
+			break;
 
 		/* Stop scanning the smaller of the LRU */
 		nr[lru] = 0;

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-16  0:47       ` Hugh Dickins
@ 2014-06-16  6:21         ` Chen Yucong
  -1 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-16  6:21 UTC (permalink / raw)
  To: Hugh Dickins
  Cc: Andrew Morton, mgorman, mhocko, hannes, linux-mm, linux-kernel

On Sun, 2014-06-15 at 17:47 -0700, Hugh Dickins wrote:
> On Wed, 11 Jun 2014, Chen Yucong wrote:
> > On Tue, 2014-06-10 at 16:33 -0700, Andrew Morton wrote:
> > > >                       break;
> > > >  
> > > >               if (nr_file > nr_anon) {
> > > > -                     unsigned long scan_target =
> > > targets[LRU_INACTIVE_ANON] +
> > > >
> > > -                                             targets[LRU_ACTIVE_ANON]
> > > + 1;
> > > > +                     nr_to_scan = nr_file - ratio * nr_anon;
> > > > +                     percentage = nr[LRU_FILE] * 100 / nr_file;
> > > 
> > > here, nr_file and nr_anon are derived from the contents of nr[].  But
> > > nr[] was modified in the for_each_evictable_lru() loop, so its
> > > contents
> > > now may differ from what was in targets[]? 
> > 
> > nr_to_scan is used for recording the number of pages that should be
> > scanned to keep original *ratio*.
> > 
> > We can assume that the value of (nr_file > nr_anon) is true, nr_to_scan
> > should be distribute to nr[LRU_ACTIVE_FILE] and nr[LRU_INACTIVE_FILE] in
> > proportion.
> > 
> >     nr_file = nr[LRU_ACTIVE_FILE] + nr[LRU_INACTIVE_FILE];
> >     percentage = nr[LRU_FILE] / nr_file;
> > 
> > Note that in comparison with *old* percentage, the "new" percentage has
> > the different meaning. It is just used to divide nr_so_scan pages
> > appropriately.
> 
> [PATCH] mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> 
> I have not reviewed your logic at all, but soon hit a divide-by-zero
> crash on mmotm: it needs some such fix as below.
> 
> Signed-off-by: Hugh Dickins <hughd@google.com>
> ---
> 
>  mm/vmscan.c |    5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
> 
> --- mmotm/mm/vmscan.c	2014-06-12 17:46:36.632008452 -0700
> +++ linux/mm/vmscan.c	2014-06-12 18:55:18.832425713 -0700
> @@ -2122,11 +2122,12 @@ static void shrink_lruvec(struct lruvec
>  			nr_to_scan = nr_file - ratio * nr_anon;
>  			percentage = nr[LRU_FILE] * 100 / nr_file;
>  			lru = LRU_BASE;
> -		} else {
> +		} else if (ratio) {
>  			nr_to_scan = nr_anon - nr_file / ratio;
>  			percentage = nr[LRU_BASE] * 100 / nr_anon;
>  			lru = LRU_FILE;
> -		}
> +		} else
> +			break;
>  
>  		/* Stop scanning the smaller of the LRU */
>  		nr[lru] = 0;

I think I made a terrible mistake. If the value of
     (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]) <
     (nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON])
is true , the ratio will always be zero in original patch. This is too
terrible. It is unfair for anon list. Although the above fix can avoid
hitting a divide-by-zero crash, it can not solve the problem of
fairness.

The following fix can solve divide-by-zero and unfair problems
simultaneously. But it needs to introduce a new variable for saving the
ratio of anon to file and relative operations.

thx!
cyc


Signed-off-by: Chen Yucong <slaoub@gmail.com>
---
 mm/vmscan.c |   30 +++++++++++-------------------
 1 file changed, 11 insertions(+), 19 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a8ffe4e..cf8d0a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2057,8 +2057,7 @@ out:
 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control
*sc)
 {
 	unsigned long nr[NR_LRU_LISTS];
-	unsigned long targets[NR_LRU_LISTS];
-	unsigned long nr_to_scan;
+	unsigned long nr_to_scan, ratio_file_to_anon, ratio_anon_to_file;
 	enum lru_list lru;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
@@ -2067,8 +2066,10 @@ static void shrink_lruvec(struct lruvec *lruvec,
struct scan_control *sc)
 
 	get_scan_count(lruvec, sc, nr);
 
-	/* Record the original scan target for proportional adjustments later
*/
-	memcpy(targets, nr, sizeof(nr));
+	ratio_file_to_anon = (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] +
1) /
+			     (nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + 1);
+	ratio_anon_to_file = (nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] +
1) /
+			     (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + 1);
 
 	/*
 	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
@@ -2088,7 +2089,6 @@ static void shrink_lruvec(struct lruvec *lruvec,
struct scan_control *sc)
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
 					nr[LRU_INACTIVE_FILE]) {
 		unsigned long nr_anon, nr_file, percentage;
-		unsigned long nr_scanned;
 
 		for_each_evictable_lru(lru) {
 			if (nr[lru]) {
@@ -2123,15 +2123,13 @@ static void shrink_lruvec(struct lruvec *lruvec,
struct scan_control *sc)
 			break;
 
 		if (nr_file > nr_anon) {
-			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
-						targets[LRU_ACTIVE_ANON] + 1;
+			nr_to_scan = nr_file - ratio_file_to_anon * nr_anon;
+			percentage = nr[LRU_FILE] * 100 / nr_file;
 			lru = LRU_BASE;
-			percentage = nr_anon * 100 / scan_target;
 		} else {
-			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
-						targets[LRU_ACTIVE_FILE] + 1;
+			nr_to_scan = nr_anon - ratio_anon_to_file * nr_file;
+			percentage = nr[LRU_BASE] * 100 / nr_anon;
 			lru = LRU_FILE;
-			percentage = nr_file * 100 / scan_target;
 		}
 
 		/* Stop scanning the smaller of the LRU */
@@ -2143,14 +2141,8 @@ static void shrink_lruvec(struct lruvec *lruvec,
struct scan_control *sc)
 		 * scan target and the percentage scanning already complete
 		 */
 		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
-
-		lru += LRU_ACTIVE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
+		nr[lru] = nr_to_scan * percentage / 100;
+		nr[lru + LRU_ACTIVE] = nr_to_scan - nr[lru];
 
 		scan_adjusted = true;
 	}
-- 
1.7.10.4




^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-16  6:21         ` Chen Yucong
  0 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-16  6:21 UTC (permalink / raw)
  To: Hugh Dickins
  Cc: Andrew Morton, mgorman, mhocko, hannes, linux-mm, linux-kernel

On Sun, 2014-06-15 at 17:47 -0700, Hugh Dickins wrote:
> On Wed, 11 Jun 2014, Chen Yucong wrote:
> > On Tue, 2014-06-10 at 16:33 -0700, Andrew Morton wrote:
> > > >                       break;
> > > >  
> > > >               if (nr_file > nr_anon) {
> > > > -                     unsigned long scan_target =
> > > targets[LRU_INACTIVE_ANON] +
> > > >
> > > -                                             targets[LRU_ACTIVE_ANON]
> > > + 1;
> > > > +                     nr_to_scan = nr_file - ratio * nr_anon;
> > > > +                     percentage = nr[LRU_FILE] * 100 / nr_file;
> > > 
> > > here, nr_file and nr_anon are derived from the contents of nr[].  But
> > > nr[] was modified in the for_each_evictable_lru() loop, so its
> > > contents
> > > now may differ from what was in targets[]? 
> > 
> > nr_to_scan is used for recording the number of pages that should be
> > scanned to keep original *ratio*.
> > 
> > We can assume that the value of (nr_file > nr_anon) is true, nr_to_scan
> > should be distribute to nr[LRU_ACTIVE_FILE] and nr[LRU_INACTIVE_FILE] in
> > proportion.
> > 
> >     nr_file = nr[LRU_ACTIVE_FILE] + nr[LRU_INACTIVE_FILE];
> >     percentage = nr[LRU_FILE] / nr_file;
> > 
> > Note that in comparison with *old* percentage, the "new" percentage has
> > the different meaning. It is just used to divide nr_so_scan pages
> > appropriately.
> 
> [PATCH] mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> 
> I have not reviewed your logic at all, but soon hit a divide-by-zero
> crash on mmotm: it needs some such fix as below.
> 
> Signed-off-by: Hugh Dickins <hughd@google.com>
> ---
> 
>  mm/vmscan.c |    5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
> 
> --- mmotm/mm/vmscan.c	2014-06-12 17:46:36.632008452 -0700
> +++ linux/mm/vmscan.c	2014-06-12 18:55:18.832425713 -0700
> @@ -2122,11 +2122,12 @@ static void shrink_lruvec(struct lruvec
>  			nr_to_scan = nr_file - ratio * nr_anon;
>  			percentage = nr[LRU_FILE] * 100 / nr_file;
>  			lru = LRU_BASE;
> -		} else {
> +		} else if (ratio) {
>  			nr_to_scan = nr_anon - nr_file / ratio;
>  			percentage = nr[LRU_BASE] * 100 / nr_anon;
>  			lru = LRU_FILE;
> -		}
> +		} else
> +			break;
>  
>  		/* Stop scanning the smaller of the LRU */
>  		nr[lru] = 0;

I think I made a terrible mistake. If the value of
     (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]) <
     (nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON])
is true , the ratio will always be zero in original patch. This is too
terrible. It is unfair for anon list. Although the above fix can avoid
hitting a divide-by-zero crash, it can not solve the problem of
fairness.

The following fix can solve divide-by-zero and unfair problems
simultaneously. But it needs to introduce a new variable for saving the
ratio of anon to file and relative operations.

thx!
cyc


Signed-off-by: Chen Yucong <slaoub@gmail.com>
---
 mm/vmscan.c |   30 +++++++++++-------------------
 1 file changed, 11 insertions(+), 19 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a8ffe4e..cf8d0a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2057,8 +2057,7 @@ out:
 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control
*sc)
 {
 	unsigned long nr[NR_LRU_LISTS];
-	unsigned long targets[NR_LRU_LISTS];
-	unsigned long nr_to_scan;
+	unsigned long nr_to_scan, ratio_file_to_anon, ratio_anon_to_file;
 	enum lru_list lru;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
@@ -2067,8 +2066,10 @@ static void shrink_lruvec(struct lruvec *lruvec,
struct scan_control *sc)
 
 	get_scan_count(lruvec, sc, nr);
 
-	/* Record the original scan target for proportional adjustments later
*/
-	memcpy(targets, nr, sizeof(nr));
+	ratio_file_to_anon = (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] +
1) /
+			     (nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + 1);
+	ratio_anon_to_file = (nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] +
1) /
+			     (nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + 1);
 
 	/*
 	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
@@ -2088,7 +2089,6 @@ static void shrink_lruvec(struct lruvec *lruvec,
struct scan_control *sc)
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
 					nr[LRU_INACTIVE_FILE]) {
 		unsigned long nr_anon, nr_file, percentage;
-		unsigned long nr_scanned;
 
 		for_each_evictable_lru(lru) {
 			if (nr[lru]) {
@@ -2123,15 +2123,13 @@ static void shrink_lruvec(struct lruvec *lruvec,
struct scan_control *sc)
 			break;
 
 		if (nr_file > nr_anon) {
-			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
-						targets[LRU_ACTIVE_ANON] + 1;
+			nr_to_scan = nr_file - ratio_file_to_anon * nr_anon;
+			percentage = nr[LRU_FILE] * 100 / nr_file;
 			lru = LRU_BASE;
-			percentage = nr_anon * 100 / scan_target;
 		} else {
-			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
-						targets[LRU_ACTIVE_FILE] + 1;
+			nr_to_scan = nr_anon - ratio_anon_to_file * nr_file;
+			percentage = nr[LRU_BASE] * 100 / nr_anon;
 			lru = LRU_FILE;
-			percentage = nr_file * 100 / scan_target;
 		}
 
 		/* Stop scanning the smaller of the LRU */
@@ -2143,14 +2141,8 @@ static void shrink_lruvec(struct lruvec *lruvec,
struct scan_control *sc)
 		 * scan target and the percentage scanning already complete
 		 */
 		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
-
-		lru += LRU_ACTIVE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
+		nr[lru] = nr_to_scan * percentage / 100;
+		nr[lru + LRU_ACTIVE] = nr_to_scan - nr[lru];
 
 		scan_adjusted = true;
 	}
-- 
1.7.10.4



--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-09 13:27 ` Chen Yucong
@ 2014-06-16 12:57   ` Chen Yucong
  -1 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-16 12:57 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mhocko, hannes, akpm, linux-mm, linux-kernel

On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> original scan targets introduces extra 40 bytes on the stack. This patch
> is able to avoid this situation and the call to memcpy(). At the same time,
> it does not change the relative design idea.
> 
> ratio = original_nr_file / original_nr_anon;
> 
> If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
>  x = nr_file - ratio * nr_anon;
> 
> if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
>  x = nr_anon - nr_file / ratio;
> 
Hi Andrew Morton,

I think the patch
 
[PATCH]
mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch

which I committed should be discarded. Because It have some critical
defects.
    1) If we want to solve the divide-by-zero and unfair problems, it
needs to two variables for recording the ratios.
 
    2) For "x = nr_file - ratio * nr_anon", the "x" is likely to be a
negative number. we can assume:

      nr[LRU_ACTIVE_FILE] = 30
      nr[LRU_INACTIVE_FILE] = 30
      nr[LRU_ACTIVE_ANON] = 0
      nr[LRU_INACTIVE_ANON] = 40

      ratio = 60/40 = 3/2

When the value of (nr_reclaimed < nr_to_reclaim) become false, there are
the following results:
      nr[LRU_ACTIVE_FILE] = 15
      nr[LRU_INACTIVE_FILE] = 15
      nr[LRU_ACTIVE_ANON] = 0
      nr[LRU_INACTIVE_ANON] = 25
 
      nr_file = 30
      nr_anon = 25

      x = 30 - 25 * (3/2) = 30 - 37.5 = -7.5.

The result is too terrible. 
   
   3) This method is less accurate than the original, especially for the
qualitative difference between FILE and ANON that is very small.

thx!
cyc  
   


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-16 12:57   ` Chen Yucong
  0 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-16 12:57 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mhocko, hannes, linux-mm, linux-kernel

On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> original scan targets introduces extra 40 bytes on the stack. This patch
> is able to avoid this situation and the call to memcpy(). At the same time,
> it does not change the relative design idea.
> 
> ratio = original_nr_file / original_nr_anon;
> 
> If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
>  x = nr_file - ratio * nr_anon;
> 
> if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
>  x = nr_anon - nr_file / ratio;
> 
Hi Andrew Morton,

I think the patch
 
[PATCH]
mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch

which I committed should be discarded. Because It have some critical
defects.
    1) If we want to solve the divide-by-zero and unfair problems, it
needs to two variables for recording the ratios.
 
    2) For "x = nr_file - ratio * nr_anon", the "x" is likely to be a
negative number. we can assume:

      nr[LRU_ACTIVE_FILE] = 30
      nr[LRU_INACTIVE_FILE] = 30
      nr[LRU_ACTIVE_ANON] = 0
      nr[LRU_INACTIVE_ANON] = 40

      ratio = 60/40 = 3/2

When the value of (nr_reclaimed < nr_to_reclaim) become false, there are
the following results:
      nr[LRU_ACTIVE_FILE] = 15
      nr[LRU_INACTIVE_FILE] = 15
      nr[LRU_ACTIVE_ANON] = 0
      nr[LRU_INACTIVE_ANON] = 25
 
      nr_file = 30
      nr_anon = 25

      x = 30 - 25 * (3/2) = 30 - 37.5 = -7.5.

The result is too terrible. 
   
   3) This method is less accurate than the original, especially for the
qualitative difference between FILE and ANON that is very small.

thx!
cyc  
   

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-16 12:57   ` Chen Yucong
@ 2014-06-16 23:42     ` Andrew Morton
  -1 siblings, 0 replies; 28+ messages in thread
From: Andrew Morton @ 2014-06-16 23:42 UTC (permalink / raw)
  To: Chen Yucong; +Cc: mhocko, hannes, linux-mm, linux-kernel

On Mon, 16 Jun 2014 20:57:54 +0800 Chen Yucong <slaoub@gmail.com> wrote:

> On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > original scan targets introduces extra 40 bytes on the stack. This patch
> > is able to avoid this situation and the call to memcpy(). At the same time,
> > it does not change the relative design idea.
> > 
> > ratio = original_nr_file / original_nr_anon;
> > 
> > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> >  x = nr_file - ratio * nr_anon;
> > 
> > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> >  x = nr_anon - nr_file / ratio;
> > 
> Hi Andrew Morton,
> 
> I think the patch
>  
> [PATCH]
> mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> 
> which I committed should be discarded.

OK, thanks.

I assume you're referring to
mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec.patch
- I don't think a -fix.patch existed?


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-16 23:42     ` Andrew Morton
  0 siblings, 0 replies; 28+ messages in thread
From: Andrew Morton @ 2014-06-16 23:42 UTC (permalink / raw)
  To: Chen Yucong; +Cc: mhocko, hannes, linux-mm, linux-kernel

On Mon, 16 Jun 2014 20:57:54 +0800 Chen Yucong <slaoub@gmail.com> wrote:

> On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > original scan targets introduces extra 40 bytes on the stack. This patch
> > is able to avoid this situation and the call to memcpy(). At the same time,
> > it does not change the relative design idea.
> > 
> > ratio = original_nr_file / original_nr_anon;
> > 
> > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> >  x = nr_file - ratio * nr_anon;
> > 
> > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> >  x = nr_anon - nr_file / ratio;
> > 
> Hi Andrew Morton,
> 
> I think the patch
>  
> [PATCH]
> mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> 
> which I committed should be discarded.

OK, thanks.

I assume you're referring to
mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec.patch
- I don't think a -fix.patch existed?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-16 12:57   ` Chen Yucong
@ 2014-06-16 23:46     ` Minchan Kim
  -1 siblings, 0 replies; 28+ messages in thread
From: Minchan Kim @ 2014-06-16 23:46 UTC (permalink / raw)
  To: Chen Yucong; +Cc: Andrew Morton, mhocko, hannes, linux-mm, linux-kernel

On Mon, Jun 16, 2014 at 08:57:54PM +0800, Chen Yucong wrote:
> On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > original scan targets introduces extra 40 bytes on the stack. This patch
> > is able to avoid this situation and the call to memcpy(). At the same time,
> > it does not change the relative design idea.
> > 
> > ratio = original_nr_file / original_nr_anon;
> > 
> > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> >  x = nr_file - ratio * nr_anon;
> > 
> > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> >  x = nr_anon - nr_file / ratio;
> > 
> Hi Andrew Morton,
> 
> I think the patch
>  
> [PATCH]
> mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> 
> which I committed should be discarded. Because It have some critical
> defects.
>     1) If we want to solve the divide-by-zero and unfair problems, it
> needs to two variables for recording the ratios.
>  
>     2) For "x = nr_file - ratio * nr_anon", the "x" is likely to be a
> negative number. we can assume:
> 
>       nr[LRU_ACTIVE_FILE] = 30
>       nr[LRU_INACTIVE_FILE] = 30
>       nr[LRU_ACTIVE_ANON] = 0
>       nr[LRU_INACTIVE_ANON] = 40
> 
>       ratio = 60/40 = 3/2
> 
> When the value of (nr_reclaimed < nr_to_reclaim) become false, there are
> the following results:
>       nr[LRU_ACTIVE_FILE] = 15
>       nr[LRU_INACTIVE_FILE] = 15
>       nr[LRU_ACTIVE_ANON] = 0
>       nr[LRU_INACTIVE_ANON] = 25
>  
>       nr_file = 30
>       nr_anon = 25
> 
>       x = 30 - 25 * (3/2) = 30 - 37.5 = -7.5.
> 
> The result is too terrible. 
>    
>    3) This method is less accurate than the original, especially for the
> qualitative difference between FILE and ANON that is very small.

Yes, 3 changed old behavior. I'm ashamed but wanted to clean it up.
Is it worth to clean it up?

>From aedf8288e28a07bdd6c459a403f21cc2615ecc4e Mon Sep 17 00:00:00 2001
From: Minchan Kim <minchan@kernel.org>
Date: Tue, 17 Jun 2014 08:36:56 +0900
Subject: [PATCH] mm: proportional scanning cleanup

It aims for clean up, not changing behaivor so if anyone doesn't
looks it's more readable or not enough for readability, it should
really drop.

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 mm/vmscan.c | 69 ++++++++++++++++++++++++++++++++-----------------------------
 1 file changed, 36 insertions(+), 33 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0f16ffe8eb67..acc29315bad0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2054,19 +2054,18 @@ out:
  */
 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 {
-	unsigned long nr[NR_LRU_LISTS];
 	unsigned long targets[NR_LRU_LISTS];
-	unsigned long nr_to_scan;
+	unsigned long remains[NR_LRU_LISTS];
 	enum lru_list lru;
 	unsigned long nr_reclaimed = 0;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
 	struct blk_plug plug;
 	bool scan_adjusted;
 
-	get_scan_count(lruvec, sc, nr);
+	get_scan_count(lruvec, sc, targets);
 
-	/* Record the original scan target for proportional adjustments later */
-	memcpy(targets, nr, sizeof(nr));
+	/* Keep the original scan target for proportional adjustments later */
+	memcpy(remains, targets, sizeof(targets));
 
 	/*
 	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
@@ -2083,19 +2082,21 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 			 sc->priority == DEF_PRIORITY);
 
 	blk_start_plug(&plug);
-	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
-					nr[LRU_INACTIVE_FILE]) {
-		unsigned long nr_anon, nr_file, percentage;
-		unsigned long nr_scanned;
+	while (remains[LRU_INACTIVE_ANON] || remains[LRU_ACTIVE_FILE] ||
+					remains[LRU_INACTIVE_FILE]) {
+		unsigned long target, remain_anon, remain_file;
+		unsigned long percentage;
+		unsigned long nr_scanned, nr_to_scan;
 
 		for_each_evictable_lru(lru) {
-			if (nr[lru]) {
-				nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
-				nr[lru] -= nr_to_scan;
+			if (!remains[lru])
+				continue;
 
-				nr_reclaimed += shrink_list(lru, nr_to_scan,
-							    lruvec, sc);
-			}
+			nr_to_scan = min(remains[lru], SWAP_CLUSTER_MAX);
+			remains[lru] -= nr_to_scan;
+
+			nr_reclaimed += shrink_list(lru, nr_to_scan,
+						    lruvec, sc);
 		}
 
 		if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
@@ -2108,8 +2109,10 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 		 * stop reclaiming one LRU and reduce the amount scanning
 		 * proportional to the original scan target.
 		 */
-		nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
-		nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
+		remain_file = remains[LRU_INACTIVE_FILE] +
+				remains[LRU_ACTIVE_FILE];
+		remain_anon = remains[LRU_INACTIVE_ANON] +
+				remains[LRU_ACTIVE_ANON];
 
 		/*
 		 * It's just vindictive to attack the larger once the smaller
@@ -2117,38 +2120,38 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 		 * smaller below, this makes sure that we only make one nudge
 		 * towards proportionality once we've got nr_to_reclaim.
 		 */
-		if (!nr_file || !nr_anon)
+		if (!remain_file || !remain_anon)
 			break;
 
-		if (nr_file > nr_anon) {
-			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
-						targets[LRU_ACTIVE_ANON] + 1;
+		if (remain_file > remain_anon) {
+			target = targets[LRU_ACTIVE_ANON] +
+						targets[LRU_INACTIVE_ANON] + 1;
+			percentage = 100 * (target - remain_anon) / target;
 			lru = LRU_BASE;
-			percentage = nr_anon * 100 / scan_target;
 		} else {
-			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
-						targets[LRU_ACTIVE_FILE] + 1;
+			target = targets[LRU_ACTIVE_FILE] +
+						targets[LRU_INACTIVE_FILE] + 1;
+			percentage = 100 * (target - remain_file) / target;
 			lru = LRU_FILE;
-			percentage = nr_file * 100 / scan_target;
 		}
 
 		/* Stop scanning the smaller of the LRU */
-		nr[lru] = 0;
-		nr[lru + LRU_ACTIVE] = 0;
+		remains[lru] = 0;
+		remains[lru + LRU_ACTIVE] = 0;
 
 		/*
 		 * Recalculate the other LRU scan count based on its original
 		 * scan target and the percentage scanning already complete
 		 */
 		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
+		nr_scanned = targets[lru] - remains[lru];
+		remains[lru] = targets[lru] * percentage / 100;
+		remains[lru] -= min(remains[lru], nr_scanned);
 
 		lru += LRU_ACTIVE;
-		nr_scanned = targets[lru] - nr[lru];
-		nr[lru] = targets[lru] * (100 - percentage) / 100;
-		nr[lru] -= min(nr[lru], nr_scanned);
+		nr_scanned = targets[lru] - remains[lru];
+		remains[lru] = targets[lru] * percentage / 100;
+		remains[lru] -= min(remains[lru], nr_scanned);
 
 		scan_adjusted = true;
 	}
-- 
2.0.0


-- 
Kind regards,
Minchan Kim

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-16 23:46     ` Minchan Kim
  0 siblings, 0 replies; 28+ messages in thread
From: Minchan Kim @ 2014-06-16 23:46 UTC (permalink / raw)
  To: Chen Yucong; +Cc: Andrew Morton, mhocko, hannes, linux-mm, linux-kernel

On Mon, Jun 16, 2014 at 08:57:54PM +0800, Chen Yucong wrote:
> On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > original scan targets introduces extra 40 bytes on the stack. This patch
> > is able to avoid this situation and the call to memcpy(). At the same time,
> > it does not change the relative design idea.
> > 
> > ratio = original_nr_file / original_nr_anon;
> > 
> > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> >  x = nr_file - ratio * nr_anon;
> > 
> > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> >  x = nr_anon - nr_file / ratio;
> > 
> Hi Andrew Morton,
> 
> I think the patch
>  
> [PATCH]
> mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> 
> which I committed should be discarded. Because It have some critical
> defects.
>     1) If we want to solve the divide-by-zero and unfair problems, it
> needs to two variables for recording the ratios.
>  
>     2) For "x = nr_file - ratio * nr_anon", the "x" is likely to be a
> negative number. we can assume:
> 
>       nr[LRU_ACTIVE_FILE] = 30
>       nr[LRU_INACTIVE_FILE] = 30
>       nr[LRU_ACTIVE_ANON] = 0
>       nr[LRU_INACTIVE_ANON] = 40
> 
>       ratio = 60/40 = 3/2
> 
> When the value of (nr_reclaimed < nr_to_reclaim) become false, there are
> the following results:
>       nr[LRU_ACTIVE_FILE] = 15
>       nr[LRU_INACTIVE_FILE] = 15
>       nr[LRU_ACTIVE_ANON] = 0
>       nr[LRU_INACTIVE_ANON] = 25
>  
>       nr_file = 30
>       nr_anon = 25
> 
>       x = 30 - 25 * (3/2) = 30 - 37.5 = -7.5.
> 
> The result is too terrible. 
>    
>    3) This method is less accurate than the original, especially for the
> qualitative difference between FILE and ANON that is very small.

Yes, 3 changed old behavior. I'm ashamed but wanted to clean it up.
Is it worth to clean it up?

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-16 23:42     ` Andrew Morton
@ 2014-06-16 23:50       ` Chen Yucong
  -1 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-16 23:50 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mhocko, hannes, linux-mm, linux-kernel

On Mon, 2014-06-16 at 16:42 -0700, Andrew Morton wrote:
> On Mon, 16 Jun 2014 20:57:54 +0800 Chen Yucong <slaoub@gmail.com> wrote:
> 
> > On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> > > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > > original scan targets introduces extra 40 bytes on the stack. This patch
> > > is able to avoid this situation and the call to memcpy(). At the same time,
> > > it does not change the relative design idea.
> > > 
> > > ratio = original_nr_file / original_nr_anon;
> > > 
> > > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> > >  x = nr_file - ratio * nr_anon;
> > > 
> > > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> > >  x = nr_anon - nr_file / ratio;
> > > 
> > Hi Andrew Morton,
> > 
> > I think the patch
> >  
> > [PATCH]
> > mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> > 
> > which I committed should be discarded.
> 
> OK, thanks.
> 
> I assume you're referring to
> mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec.patch
> - I don't think a -fix.patch existed?

Yes. the patch that should be discarded is 
mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec.patch

thx!
cyc



^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-16 23:50       ` Chen Yucong
  0 siblings, 0 replies; 28+ messages in thread
From: Chen Yucong @ 2014-06-16 23:50 UTC (permalink / raw)
  To: Andrew Morton; +Cc: mhocko, hannes, linux-mm, linux-kernel

On Mon, 2014-06-16 at 16:42 -0700, Andrew Morton wrote:
> On Mon, 16 Jun 2014 20:57:54 +0800 Chen Yucong <slaoub@gmail.com> wrote:
> 
> > On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> > > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > > original scan targets introduces extra 40 bytes on the stack. This patch
> > > is able to avoid this situation and the call to memcpy(). At the same time,
> > > it does not change the relative design idea.
> > > 
> > > ratio = original_nr_file / original_nr_anon;
> > > 
> > > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> > >  x = nr_file - ratio * nr_anon;
> > > 
> > > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> > >  x = nr_anon - nr_file / ratio;
> > > 
> > Hi Andrew Morton,
> > 
> > I think the patch
> >  
> > [PATCH]
> > mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> > 
> > which I committed should be discarded.
> 
> OK, thanks.
> 
> I assume you're referring to
> mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec.patch
> - I don't think a -fix.patch existed?

Yes. the patch that should be discarded is 
mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec.patch

thx!
cyc


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
  2014-06-16 23:42     ` Andrew Morton
@ 2014-06-16 23:51       ` Minchan Kim
  -1 siblings, 0 replies; 28+ messages in thread
From: Minchan Kim @ 2014-06-16 23:51 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Chen Yucong, mhocko, hannes, linux-mm, linux-kernel

On Mon, Jun 16, 2014 at 04:42:37PM -0700, Andrew Morton wrote:
> On Mon, 16 Jun 2014 20:57:54 +0800 Chen Yucong <slaoub@gmail.com> wrote:
> 
> > On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> > > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > > original scan targets introduces extra 40 bytes on the stack. This patch
> > > is able to avoid this situation and the call to memcpy(). At the same time,
> > > it does not change the relative design idea.
> > > 
> > > ratio = original_nr_file / original_nr_anon;
> > > 
> > > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> > >  x = nr_file - ratio * nr_anon;
> > > 
> > > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> > >  x = nr_anon - nr_file / ratio;
> > > 
> > Hi Andrew Morton,
> > 
> > I think the patch
> >  
> > [PATCH]
> > mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> > 
> > which I committed should be discarded.
> 
> OK, thanks.
> 
> I assume you're referring to
> mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec.patch

True.

-- 
Kind regards,
Minchan Kim

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec()
@ 2014-06-16 23:51       ` Minchan Kim
  0 siblings, 0 replies; 28+ messages in thread
From: Minchan Kim @ 2014-06-16 23:51 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Chen Yucong, mhocko, hannes, linux-mm, linux-kernel

On Mon, Jun 16, 2014 at 04:42:37PM -0700, Andrew Morton wrote:
> On Mon, 16 Jun 2014 20:57:54 +0800 Chen Yucong <slaoub@gmail.com> wrote:
> 
> > On Mon, 2014-06-09 at 21:27 +0800, Chen Yucong wrote:
> > > Via https://lkml.org/lkml/2013/4/10/334 , we can find that recording the
> > > original scan targets introduces extra 40 bytes on the stack. This patch
> > > is able to avoid this situation and the call to memcpy(). At the same time,
> > > it does not change the relative design idea.
> > > 
> > > ratio = original_nr_file / original_nr_anon;
> > > 
> > > If (nr_file > nr_anon), then ratio = (nr_file - x) / nr_anon.
> > >  x = nr_file - ratio * nr_anon;
> > > 
> > > if (nr_file <= nr_anon), then ratio = nr_file / (nr_anon - x).
> > >  x = nr_anon - nr_file / ratio;
> > > 
> > Hi Andrew Morton,
> > 
> > I think the patch
> >  
> > [PATCH]
> > mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec-fix.patch
> > 
> > which I committed should be discarded.
> 
> OK, thanks.
> 
> I assume you're referring to
> mm-vmscanc-avoid-recording-the-original-scan-targets-in-shrink_lruvec.patch

True.

-- 
Kind regards,
Minchan Kim

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 28+ messages in thread

end of thread, other threads:[~2014-06-16 23:52 UTC | newest]

Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-06-09 13:27 [PATCH] mm/vmscan.c: avoid recording the original scan targets in shrink_lruvec() Chen Yucong
2014-06-09 13:27 ` Chen Yucong
2014-06-09 23:24 ` Minchan Kim
2014-06-09 23:24   ` Minchan Kim
2014-06-10  0:10   ` Chen Yucong
2014-06-10  0:10     ` Chen Yucong
2014-06-10  0:24     ` Minchan Kim
2014-06-10  0:24       ` Minchan Kim
2014-06-10 23:33 ` Andrew Morton
2014-06-10 23:33   ` Andrew Morton
2014-06-11  2:08   ` Chen Yucong
2014-06-11  2:08     ` Chen Yucong
2014-06-11  3:21   ` Chen Yucong
2014-06-11  3:21     ` Chen Yucong
2014-06-16  0:47     ` Hugh Dickins
2014-06-16  0:47       ` Hugh Dickins
2014-06-16  6:21       ` Chen Yucong
2014-06-16  6:21         ` Chen Yucong
2014-06-16 12:57 ` Chen Yucong
2014-06-16 12:57   ` Chen Yucong
2014-06-16 23:42   ` Andrew Morton
2014-06-16 23:42     ` Andrew Morton
2014-06-16 23:50     ` Chen Yucong
2014-06-16 23:50       ` Chen Yucong
2014-06-16 23:51     ` Minchan Kim
2014-06-16 23:51       ` Minchan Kim
2014-06-16 23:46   ` Minchan Kim
2014-06-16 23:46     ` Minchan Kim

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.