linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Minchan Kim <minchan@kernel.org>
To: Mel Gorman <mgorman@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Minchan Kim <minchan.kim@gmail.com>,
	Dave Jones <davej@redhat.com>, Jan Kara <jack@suse.cz>,
	Andy Isaacson <adi@hexapodia.org>,
	Johannes Weiner <jweiner@redhat.com>,
	Rik van Riel <riel@redhat.com>, Nai Xia <nai.xia@gmail.com>,
	Linux-MM <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH 11/11] mm: Isolate pages for immediate reclaim on their own LRU
Date: Sun, 18 Dec 2011 01:08:22 +0900	[thread overview]
Message-ID: <20111217160822.GA10064@barrios-laptop.redhat.com> (raw)
In-Reply-To: <1323877293-15401-12-git-send-email-mgorman@suse.de>

On Wed, Dec 14, 2011 at 03:41:33PM +0000, Mel Gorman wrote:
> It was observed that scan rates from direct reclaim during tests
> writing to both fast and slow storage were extraordinarily high. The
> problem was that while pages were being marked for immediate reclaim
> when writeback completed, the same pages were being encountered over
> and over again during LRU scanning.
> 
> This patch isolates file-backed pages that are to be reclaimed when
> clean on their own LRU list.

Please include your test result about reducing CPU usage.
It makes this separate LRU list how vaule is.

> 
> Signed-off-by: Mel Gorman <mgorman@suse.de>
> ---
>  include/linux/mmzone.h        |    2 +
>  include/linux/vm_event_item.h |    1 +
>  mm/page_alloc.c               |    5 ++-
>  mm/swap.c                     |   74 ++++++++++++++++++++++++++++++++++++++---
>  mm/vmscan.c                   |   11 ++++++
>  mm/vmstat.c                   |    2 +
>  6 files changed, 89 insertions(+), 6 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index ac5b522..80834eb 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -84,6 +84,7 @@ enum zone_stat_item {
>  	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
>  	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
>  	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
> +	NR_IMMEDIATE,		/*  "     "     "   "       "         */
>  	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
>  	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
>  	NR_ANON_PAGES,	/* Mapped anonymous pages */
> @@ -136,6 +137,7 @@ enum lru_list {
>  	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
>  	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
>  	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
> +	LRU_IMMEDIATE,
>  	LRU_UNEVICTABLE,
>  	NR_LRU_LISTS
>  };
> diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
> index 03b90cdc..9696fda 100644
> --- a/include/linux/vm_event_item.h
> +++ b/include/linux/vm_event_item.h
> @@ -36,6 +36,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
>  		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
>  		KSWAPD_SKIP_CONGESTION_WAIT,
>  		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
> +		PGRESCUED,
>  #ifdef CONFIG_COMPACTION
>  		COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
>  		COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index ecaba97..5cf9077 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2590,7 +2590,7 @@ void show_free_areas(unsigned int filter)
>  
>  	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
>  		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
> -		" unevictable:%lu"
> +		" immediate:%lu unevictable:%lu"
>  		" dirty:%lu writeback:%lu unstable:%lu\n"
>  		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
>  		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
> @@ -2600,6 +2600,7 @@ void show_free_areas(unsigned int filter)
>  		global_page_state(NR_ACTIVE_FILE),
>  		global_page_state(NR_INACTIVE_FILE),
>  		global_page_state(NR_ISOLATED_FILE),
> +		global_page_state(NR_IMMEDIATE),
>  		global_page_state(NR_UNEVICTABLE),
>  		global_page_state(NR_FILE_DIRTY),
>  		global_page_state(NR_WRITEBACK),
> @@ -2627,6 +2628,7 @@ void show_free_areas(unsigned int filter)
>  			" inactive_anon:%lukB"
>  			" active_file:%lukB"
>  			" inactive_file:%lukB"
> +			" immediate:%lukB"
>  			" unevictable:%lukB"
>  			" isolated(anon):%lukB"
>  			" isolated(file):%lukB"
> @@ -2655,6 +2657,7 @@ void show_free_areas(unsigned int filter)
>  			K(zone_page_state(zone, NR_INACTIVE_ANON)),
>  			K(zone_page_state(zone, NR_ACTIVE_FILE)),
>  			K(zone_page_state(zone, NR_INACTIVE_FILE)),
> +			K(zone_page_state(zone, NR_IMMEDIATE)),
>  			K(zone_page_state(zone, NR_UNEVICTABLE)),
>  			K(zone_page_state(zone, NR_ISOLATED_ANON)),
>  			K(zone_page_state(zone, NR_ISOLATED_FILE)),
> diff --git a/mm/swap.c b/mm/swap.c
> index a91caf7..9973975 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -39,6 +39,7 @@ int page_cluster;
>  
>  static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
>  static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
> +static DEFINE_PER_CPU(struct pagevec, lru_putback_immediate_pvecs);
>  static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
>  
>  /*
> @@ -255,24 +256,80 @@ static void pagevec_move_tail(struct pagevec *pvec)
>  }
>  
>  /*
> + * Similar pair of functions to pagevec_move_tail except it is called when
> + * moving a page from the LRU_IMMEDIATE to one of the [in]active_[file|anon]
> + * lists
> + */
> +static void pagevec_putback_immediate_fn(struct page *page, void *arg)
> +{
> +	struct zone *zone = page_zone(page);
> +
> +	if (PageLRU(page)) {
> +		enum lru_list lru = page_lru(page);
> +		list_move(&page->lru, &zone->lru[lru].list);
> +	}
> +}
> +
> +static void pagevec_putback_immediate(struct pagevec *pvec)
> +{
> +	pagevec_lru_move_fn(pvec, pagevec_putback_immediate_fn, NULL);
> +}
> +
> +/*
>   * Writeback is about to end against a page which has been marked for immediate
>   * reclaim.  If it still appears to be reclaimable, move it to the tail of the
>   * inactive list.
>   */
>  void rotate_reclaimable_page(struct page *page)
>  {
> +	struct zone *zone = page_zone(page);
> +	struct list_head *page_list;
> +	struct pagevec *pvec;
> +	unsigned long flags;
> +
> +	page_cache_get(page);
> +	local_irq_save(flags);
> +	__mod_zone_page_state(zone, NR_IMMEDIATE, -1);
> +

I am not sure underflow never happen.
We do SetPageReclaim at several places but dont' increase NR_IMMEDIATE.

>  	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
>  	    !PageUnevictable(page) && PageLRU(page)) {
> -		struct pagevec *pvec;
> -		unsigned long flags;
>  
> -		page_cache_get(page);
> -		local_irq_save(flags);
>  		pvec = &__get_cpu_var(lru_rotate_pvecs);
>  		if (!pagevec_add(pvec, page))
>  			pagevec_move_tail(pvec);
> -		local_irq_restore(flags);
> +	} else {
> +		pvec = &__get_cpu_var(lru_putback_immediate_pvecs);
> +		if (!pagevec_add(pvec, page))
> +			pagevec_putback_immediate(pvec);

Nitpick about naming.
It doesn't say immediate is from or to. So I got confused
which is source. I know comment of function already say it
but good naming can reduce unnecessary comment.
How about pagevec_putback_from_immediate_list?

> +	}
> +
> +	/*
> +	 * There is a potential race that if a page is set PageReclaim
> +	 * and moved to the LRU_IMMEDIATE list after writeback completed,
> +	 * it can be left on the LRU_IMMEDATE list with no way for
> +	 * reclaim to find it.
> +	 *
> +	 * This race should be very rare but count how often it happens.
> +	 * If it is a continual race, then it's very unsatisfactory as there
> +	 * is no guarantee that rotate_reclaimable_page() will be called
> +	 * to rescue these pages but finding them in page reclaim is also
> +	 * problematic due to the problem of deciding when the right time
> +	 * to scan this list is.
> +	 */
> +	page_list = &zone->lru[LRU_IMMEDIATE].list;
> +	if (!zone_page_state(zone, NR_IMMEDIATE) && !list_empty(page_list)) {

How about this

if (zone_page_state(zone, NR_IMMEDIATE)) {
	page_list = &zone->lru[LRU_IMMEDIATE].list;
	if (!list_empty(page_list))
...
...
}

It can reduce a unnecessary reference.

> +		struct page *page;
> +
> +		spin_lock(&zone->lru_lock);
> +		while (!list_empty(page_list)) {
> +			page = list_entry(page_list->prev, struct page, lru);
> +			list_move(&page->lru, &zone->lru[page_lru(page)].list);
> +			__count_vm_event(PGRESCUED);
> +		}
> +		spin_unlock(&zone->lru_lock);
>  	}
> +
> +	local_irq_restore(flags);
>  }
>  
>  static void update_page_reclaim_stat(struct zone *zone, struct page *page,
> @@ -475,6 +532,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
>  		 * is _really_ small and  it's non-critical problem.
>  		 */
>  		SetPageReclaim(page);
> +
> +		/*
> +		 * Move to the LRU_IMMEDIATE list to avoid being scanned
> +		 * by page reclaim uselessly.
> +		 */
> +		list_move_tail(&page->lru, &zone->lru[LRU_IMMEDIATE].list);
> +		__mod_zone_page_state(zone, NR_IMMEDIATE, 1);

It mekes below count of PGDEACTIVATE wrong in lru_deactivate_fn.
Before this patch, all is from active to inacive so it was right.
But with this patch, it can be from acdtive to immediate.

>  	} else {
>  		/*
>  		 * The page's writeback ends up during pagevec
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 298ceb8..cb28a07 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1404,6 +1404,17 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
>  		}
>  		SetPageLRU(page);
>  		lru = page_lru(page);
> +
> +		/*
> +		 * If reclaim has tagged a file page reclaim, move it to
> +		 * a separate LRU lists to avoid it being scanned by other
> +		 * users. It is expected that as writeback completes that
> +		 * they are taken back off and moved to the normal LRU
> +		 */
> +		if (lru == LRU_INACTIVE_FILE &&
> +				PageReclaim(page) && PageWriteback(page))
> +			lru = LRU_IMMEDIATE;
> +
>  		add_page_to_lru_list(zone, page, lru);
>  		if (is_active_lru(lru)) {
>  			int file = is_file_lru(lru);
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 8fd603b..dbfec4c 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -688,6 +688,7 @@ const char * const vmstat_text[] = {
>  	"nr_active_anon",
>  	"nr_inactive_file",
>  	"nr_active_file",
> +	"nr_immediate",
>  	"nr_unevictable",
>  	"nr_mlock",
>  	"nr_anon_pages",
> @@ -756,6 +757,7 @@ const char * const vmstat_text[] = {
>  	"allocstall",
>  
>  	"pgrotated",
> +	"pgrescued",
>  
>  #ifdef CONFIG_COMPACTION
>  	"compact_blocks_moved",
> -- 
> 1.7.3.4
> 

-- 
Kind regards,
Minchan Kim

  parent reply	other threads:[~2011-12-17 16:08 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-12-14 15:41 [PATCH 0/11] Reduce compaction-related stalls and improve asynchronous migration of dirty pages v6 Mel Gorman
2011-12-14 15:41 ` [PATCH 01/11] mm: compaction: Allow compaction to isolate dirty pages Mel Gorman
2011-12-14 15:41 ` [PATCH 02/11] mm: compaction: Use synchronous compaction for /proc/sys/vm/compact_memory Mel Gorman
2011-12-14 15:41 ` [PATCH 03/11] mm: vmscan: Check if we isolated a compound page during lumpy scan Mel Gorman
2011-12-15 23:21   ` Rik van Riel
2011-12-14 15:41 ` [PATCH 04/11] mm: vmscan: Do not OOM if aborting reclaim to start compaction Mel Gorman
2011-12-15 23:36   ` Rik van Riel
2011-12-14 15:41 ` [PATCH 05/11] mm: compaction: Determine if dirty pages can be migrated without blocking within ->migratepage Mel Gorman
2011-12-16  3:32   ` Rik van Riel
2011-12-16 23:20   ` Andrew Morton
2011-12-17  3:03     ` Nai Xia
2011-12-17  3:26       ` Andrew Morton
2011-12-19 11:05     ` Mel Gorman
2011-12-19 13:12       ` nai.xia
2011-12-14 15:41 ` [PATCH 06/11] mm: compaction: make isolate_lru_page() filter-aware again Mel Gorman
2011-12-16  3:34   ` Rik van Riel
2011-12-18  1:53   ` Minchan Kim
2011-12-14 15:41 ` [PATCH 07/11] mm: page allocator: Do not call direct reclaim for THP allocations while compaction is deferred Mel Gorman
2011-12-16  4:10   ` Rik van Riel
2011-12-14 15:41 ` [PATCH 08/11] mm: compaction: Introduce sync-light migration for use by compaction Mel Gorman
2011-12-16  4:31   ` Rik van Riel
2011-12-18  2:05   ` Minchan Kim
2011-12-19 11:45     ` Mel Gorman
2011-12-20  7:18       ` Minchan Kim
2012-01-13 21:25   ` Andrew Morton
2012-01-16 11:33     ` Mel Gorman
2011-12-14 15:41 ` [PATCH 09/11] mm: vmscan: When reclaiming for compaction, ensure there are sufficient free pages available Mel Gorman
2011-12-16  4:35   ` Rik van Riel
2011-12-14 15:41 ` [PATCH 10/11] mm: vmscan: Check if reclaim should really abort even if compaction_ready() is true for one zone Mel Gorman
2011-12-16  4:38   ` Rik van Riel
2011-12-16 11:29     ` Mel Gorman
2011-12-14 15:41 ` [PATCH 11/11] mm: Isolate pages for immediate reclaim on their own LRU Mel Gorman
2011-12-16  4:47   ` Rik van Riel
2011-12-16 12:26     ` Mel Gorman
2011-12-16 15:17   ` Johannes Weiner
2011-12-16 16:07     ` Mel Gorman
2011-12-19 16:14       ` Johannes Weiner
2011-12-17 16:08   ` Minchan Kim [this message]
2011-12-19 13:26     ` Mel Gorman
2011-12-20  7:10       ` Minchan Kim
2011-12-20  9:55         ` Mel Gorman
2011-12-23 19:08           ` Hugh Dickins
2011-12-29 16:59             ` Mel Gorman
2011-12-29 19:31               ` Rik van Riel
2011-12-30 11:27                 ` Mel Gorman
2011-12-16 22:56 ` [PATCH 0/11] Reduce compaction-related stalls and improve asynchronous migration of dirty pages v6 Andrew Morton
2011-12-19 14:40   ` Mel Gorman
2011-12-16 23:37 ` Andrew Morton
2011-12-19 14:20   ` Mel Gorman
  -- strict thread matches above, loose matches on Subject: below --
2011-12-01 17:36 [PATCH 0/11] Reduce compaction-related stalls and improve asynchronous migration of dirty pages v5 Mel Gorman
2011-12-01 17:36 ` [PATCH 11/11] mm: Isolate pages for immediate reclaim on their own LRU Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20111217160822.GA10064@barrios-laptop.redhat.com \
    --to=minchan@kernel.org \
    --cc=aarcange@redhat.com \
    --cc=adi@hexapodia.org \
    --cc=akpm@linux-foundation.org \
    --cc=davej@redhat.com \
    --cc=jack@suse.cz \
    --cc=jweiner@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=minchan.kim@gmail.com \
    --cc=nai.xia@gmail.com \
    --cc=riel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).