linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] vmscan: ZVC updates in shrink_active_list() can be done once
@ 2009-05-04 23:44 Wu Fengguang
  2009-05-05  2:46 ` Minchan Kim
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Wu Fengguang @ 2009-05-04 23:44 UTC (permalink / raw)
  To: akpm; +Cc: a.p.zijlstra, cl, kosaki.motohiro, npiggin, riel, linux-mm, LKML

This effectively lifts the unit of nr_inactive_* and pgdeactivate updates
from PAGEVEC_SIZE=14 to SWAP_CLUSTER_MAX=32.

Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
---
 mm/vmscan.c |   11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)

--- linux.orig/mm/vmscan.c
+++ linux/mm/vmscan.c
@@ -1228,7 +1228,6 @@ static void shrink_active_list(unsigned 
 			struct scan_control *sc, int priority, int file)
 {
 	unsigned long pgmoved;
-	int pgdeactivate = 0;
 	unsigned long pgscanned;
 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
 	LIST_HEAD(l_inactive);
@@ -1257,7 +1256,7 @@ static void shrink_active_list(unsigned 
 		__mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
 	spin_unlock_irq(&zone->lru_lock);
 
-	pgmoved = 0;
+	pgmoved = 0;  /* count referenced (mapping) mapped pages */
 	while (!list_empty(&l_hold)) {
 		cond_resched();
 		page = lru_to_page(&l_hold);
@@ -1291,7 +1290,7 @@ static void shrink_active_list(unsigned 
 	 */
 	reclaim_stat->recent_rotated[!!file] += pgmoved;
 
-	pgmoved = 0;
+	pgmoved = 0;  /* count pages moved to inactive list */
 	while (!list_empty(&l_inactive)) {
 		page = lru_to_page(&l_inactive);
 		prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1304,10 +1303,7 @@ static void shrink_active_list(unsigned 
 		mem_cgroup_add_lru_list(page, lru);
 		pgmoved++;
 		if (!pagevec_add(&pvec, page)) {
-			__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
 			spin_unlock_irq(&zone->lru_lock);
-			pgdeactivate += pgmoved;
-			pgmoved = 0;
 			if (buffer_heads_over_limit)
 				pagevec_strip(&pvec);
 			__pagevec_release(&pvec);
@@ -1315,9 +1311,8 @@ static void shrink_active_list(unsigned 
 		}
 	}
 	__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
-	pgdeactivate += pgmoved;
 	__count_zone_vm_events(PGREFILL, zone, pgscanned);
-	__count_vm_events(PGDEACTIVATE, pgdeactivate);
+	__count_vm_events(PGDEACTIVATE, pgmoved);
 	spin_unlock_irq(&zone->lru_lock);
 	if (buffer_heads_over_limit)
 		pagevec_strip(&pvec);

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] vmscan: ZVC updates in shrink_active_list() can be done  once
  2009-05-04 23:44 [PATCH] vmscan: ZVC updates in shrink_active_list() can be done once Wu Fengguang
@ 2009-05-05  2:46 ` Minchan Kim
  2009-05-05 12:15 ` Rik van Riel
  2009-05-05 12:33 ` Johannes Weiner
  2 siblings, 0 replies; 5+ messages in thread
From: Minchan Kim @ 2009-05-05  2:46 UTC (permalink / raw)
  To: Wu Fengguang
  Cc: akpm, a.p.zijlstra, cl, kosaki.motohiro, npiggin, riel, linux-mm, LKML

Reviewed-by: Minchan Kim <minchan.kim@gmail.com>

This fine-grained ZVC update in shrink_active_list was made for
determination problem of the dirty
ratio(c878538598d1e7ab41ecc0de8894e34e2fdef630).
The 32 page reclaim time in normal reclaim situation is too short to
change current VM behavior.
So I think this make sense to me.

On Tue, May 5, 2009 at 8:44 AM, Wu Fengguang <fengguang.wu@intel.com> wrote:
> This effectively lifts the unit of nr_inactive_* and pgdeactivate updates
> from PAGEVEC_SIZE=14 to SWAP_CLUSTER_MAX=32.
>
> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
> ---
>  mm/vmscan.c |   11 +++--------
>  1 file changed, 3 insertions(+), 8 deletions(-)
>
> --- linux.orig/mm/vmscan.c
> +++ linux/mm/vmscan.c
> @@ -1228,7 +1228,6 @@ static void shrink_active_list(unsigned
>                        struct scan_control *sc, int priority, int file)
>  {
>        unsigned long pgmoved;
> -       int pgdeactivate = 0;
>        unsigned long pgscanned;
>        LIST_HEAD(l_hold);      /* The pages which were snipped off */
>        LIST_HEAD(l_inactive);
> @@ -1257,7 +1256,7 @@ static void shrink_active_list(unsigned
>                __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
>        spin_unlock_irq(&zone->lru_lock);
>
> -       pgmoved = 0;
> +       pgmoved = 0;  /* count referenced (mapping) mapped pages */
>        while (!list_empty(&l_hold)) {
>                cond_resched();
>                page = lru_to_page(&l_hold);
> @@ -1291,7 +1290,7 @@ static void shrink_active_list(unsigned
>         */
>        reclaim_stat->recent_rotated[!!file] += pgmoved;
>
> -       pgmoved = 0;
> +       pgmoved = 0;  /* count pages moved to inactive list */
>        while (!list_empty(&l_inactive)) {
>                page = lru_to_page(&l_inactive);
>                prefetchw_prev_lru_page(page, &l_inactive, flags);
> @@ -1304,10 +1303,7 @@ static void shrink_active_list(unsigned
>                mem_cgroup_add_lru_list(page, lru);
>                pgmoved++;
>                if (!pagevec_add(&pvec, page)) {
> -                       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
>                        spin_unlock_irq(&zone->lru_lock);
> -                       pgdeactivate += pgmoved;
> -                       pgmoved = 0;
>                        if (buffer_heads_over_limit)
>                                pagevec_strip(&pvec);
>                        __pagevec_release(&pvec);
> @@ -1315,9 +1311,8 @@ static void shrink_active_list(unsigned
>                }
>        }
>        __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
> -       pgdeactivate += pgmoved;
>        __count_zone_vm_events(PGREFILL, zone, pgscanned);
> -       __count_vm_events(PGDEACTIVATE, pgdeactivate);
> +       __count_vm_events(PGDEACTIVATE, pgmoved);
>        spin_unlock_irq(&zone->lru_lock);
>        if (buffer_heads_over_limit)
>                pagevec_strip(&pvec);
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>



-- 
Thanks,
Minchan Kim

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] vmscan: ZVC updates in shrink_active_list() can be done once
  2009-05-04 23:44 [PATCH] vmscan: ZVC updates in shrink_active_list() can be done once Wu Fengguang
  2009-05-05  2:46 ` Minchan Kim
@ 2009-05-05 12:15 ` Rik van Riel
  2009-05-05 12:33 ` Johannes Weiner
  2 siblings, 0 replies; 5+ messages in thread
From: Rik van Riel @ 2009-05-05 12:15 UTC (permalink / raw)
  To: Wu Fengguang
  Cc: akpm, a.p.zijlstra, cl, kosaki.motohiro, npiggin, linux-mm, LKML

Wu Fengguang wrote:
> This effectively lifts the unit of nr_inactive_* and pgdeactivate updates
> from PAGEVEC_SIZE=14 to SWAP_CLUSTER_MAX=32.
> 
> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>

Acked-by: Rik van Riel <riel@redhat.com>

-- 
All rights reversed.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] vmscan: ZVC updates in shrink_active_list() can be done once
  2009-05-04 23:44 [PATCH] vmscan: ZVC updates in shrink_active_list() can be done once Wu Fengguang
  2009-05-05  2:46 ` Minchan Kim
  2009-05-05 12:15 ` Rik van Riel
@ 2009-05-05 12:33 ` Johannes Weiner
  2009-05-05 13:09   ` Wu Fengguang
  2 siblings, 1 reply; 5+ messages in thread
From: Johannes Weiner @ 2009-05-05 12:33 UTC (permalink / raw)
  To: Wu Fengguang
  Cc: akpm, a.p.zijlstra, cl, kosaki.motohiro, npiggin, riel, linux-mm, LKML

On Tue, May 05, 2009 at 07:44:55AM +0800, Wu Fengguang wrote:
> This effectively lifts the unit of nr_inactive_* and pgdeactivate updates
> from PAGEVEC_SIZE=14 to SWAP_CLUSTER_MAX=32.

For __zone_reclaim() it will be >= SWAP_CLUSTER_MAX, depending on the
allocation order.

	Hannes

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] vmscan: ZVC updates in shrink_active_list() can be done once
  2009-05-05 12:33 ` Johannes Weiner
@ 2009-05-05 13:09   ` Wu Fengguang
  0 siblings, 0 replies; 5+ messages in thread
From: Wu Fengguang @ 2009-05-05 13:09 UTC (permalink / raw)
  To: Johannes Weiner
  Cc: akpm, a.p.zijlstra, cl, kosaki.motohiro, npiggin, riel, linux-mm, LKML

On Tue, May 05, 2009 at 08:33:50PM +0800, Johannes Weiner wrote:
> On Tue, May 05, 2009 at 07:44:55AM +0800, Wu Fengguang wrote:
> > This effectively lifts the unit of nr_inactive_* and pgdeactivate updates
> > from PAGEVEC_SIZE=14 to SWAP_CLUSTER_MAX=32.
> 
> For __zone_reclaim() it will be >= SWAP_CLUSTER_MAX, depending on the
> allocation order.

Thanks for pointing out this. Changelog updated accordingly.

I'd expect its impact to be negletable, because 1024 pages are
not extraordinary large.  But sure, that's much larger than 125 pages
- the maximum zone stats update threshold(zone_pcp->stat_threshold).

Thanks,
Fengguang
---
vmscan: ZVC updates in shrink_active_list() can be done once

This effectively lifts the unit of updates to nr_inactive_* and pgdeactivate
from PAGEVEC_SIZE=14 to SWAP_CLUSTER_MAX=32, or MAX_ORDER_NR_PAGES=1024 for
reclaim_zone().

CC: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
---
 mm/vmscan.c |   11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)

--- linux.orig/mm/vmscan.c
+++ linux/mm/vmscan.c
@@ -1228,7 +1228,6 @@ static void shrink_active_list(unsigned 
 			struct scan_control *sc, int priority, int file)
 {
 	unsigned long pgmoved;
-	int pgdeactivate = 0;
 	unsigned long pgscanned;
 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
 	LIST_HEAD(l_inactive);
@@ -1257,7 +1256,7 @@ static void shrink_active_list(unsigned 
 		__mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
 	spin_unlock_irq(&zone->lru_lock);
 
-	pgmoved = 0;
+	pgmoved = 0;  /* count referenced (mapping) mapped pages */
 	while (!list_empty(&l_hold)) {
 		cond_resched();
 		page = lru_to_page(&l_hold);
@@ -1291,7 +1290,7 @@ static void shrink_active_list(unsigned 
 	 */
 	reclaim_stat->recent_rotated[!!file] += pgmoved;
 
-	pgmoved = 0;
+	pgmoved = 0;  /* count pages moved to inactive list */
 	while (!list_empty(&l_inactive)) {
 		page = lru_to_page(&l_inactive);
 		prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1304,10 +1303,7 @@ static void shrink_active_list(unsigned 
 		mem_cgroup_add_lru_list(page, lru);
 		pgmoved++;
 		if (!pagevec_add(&pvec, page)) {
-			__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
 			spin_unlock_irq(&zone->lru_lock);
-			pgdeactivate += pgmoved;
-			pgmoved = 0;
 			if (buffer_heads_over_limit)
 				pagevec_strip(&pvec);
 			__pagevec_release(&pvec);
@@ -1315,9 +1311,8 @@ static void shrink_active_list(unsigned 
 		}
 	}
 	__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
-	pgdeactivate += pgmoved;
 	__count_zone_vm_events(PGREFILL, zone, pgscanned);
-	__count_vm_events(PGDEACTIVATE, pgdeactivate);
+	__count_vm_events(PGDEACTIVATE, pgmoved);
 	spin_unlock_irq(&zone->lru_lock);
 	if (buffer_heads_over_limit)
 		pagevec_strip(&pvec);

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2009-05-05 13:13 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-05-04 23:44 [PATCH] vmscan: ZVC updates in shrink_active_list() can be done once Wu Fengguang
2009-05-05  2:46 ` Minchan Kim
2009-05-05 12:15 ` Rik van Riel
2009-05-05 12:33 ` Johannes Weiner
2009-05-05 13:09   ` Wu Fengguang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).