linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@kernel.org>
To: Johannes Weiner <hannes@cmpxchg.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Rik van Riel <riel@redhat.com>, Mel Gorman <mgorman@suse.de>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Andi Kleen <andi@firstfloor.org>,
	Tim Chen <tim.c.chen@linux.intel.com>,
	kernel-team@fb.com
Subject: Re: [PATCH 03/10] mm: fold and remove lru_cache_add_anon() and lru_cache_add_file()
Date: Tue, 7 Jun 2016 11:12:42 +0200	[thread overview]
Message-ID: <20160607091241.GE12305@dhcp22.suse.cz> (raw)
In-Reply-To: <20160606194836.3624-4-hannes@cmpxchg.org>

On Mon 06-06-16 15:48:29, Johannes Weiner wrote:
> They're the same function, and for the purpose of all callers they are
> equivalent to lru_cache_add().
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  fs/cifs/file.c       | 10 +++++-----
>  fs/fuse/dev.c        |  2 +-
>  include/linux/swap.h |  2 --
>  mm/shmem.c           |  4 ++--
>  mm/swap.c            | 40 +++++++++-------------------------------
>  mm/swap_state.c      |  2 +-
>  6 files changed, 18 insertions(+), 42 deletions(-)
> 
> diff --git a/fs/cifs/file.c b/fs/cifs/file.c
> index 9793ae0bcaa2..232390879640 100644
> --- a/fs/cifs/file.c
> +++ b/fs/cifs/file.c
> @@ -3261,7 +3261,7 @@ cifs_readv_complete(struct work_struct *work)
>  	for (i = 0; i < rdata->nr_pages; i++) {
>  		struct page *page = rdata->pages[i];
>  
> -		lru_cache_add_file(page);
> +		lru_cache_add(page);
>  
>  		if (rdata->result == 0 ||
>  		    (rdata->result == -EAGAIN && got_bytes)) {
> @@ -3321,7 +3321,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
>  			 * fill them until the writes are flushed.
>  			 */
>  			zero_user(page, 0, PAGE_SIZE);
> -			lru_cache_add_file(page);
> +			lru_cache_add(page);
>  			flush_dcache_page(page);
>  			SetPageUptodate(page);
>  			unlock_page(page);
> @@ -3331,7 +3331,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
>  			continue;
>  		} else {
>  			/* no need to hold page hostage */
> -			lru_cache_add_file(page);
> +			lru_cache_add(page);
>  			unlock_page(page);
>  			put_page(page);
>  			rdata->pages[i] = NULL;
> @@ -3488,7 +3488,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
>  			/* best to give up if we're out of mem */
>  			list_for_each_entry_safe(page, tpage, &tmplist, lru) {
>  				list_del(&page->lru);
> -				lru_cache_add_file(page);
> +				lru_cache_add(page);
>  				unlock_page(page);
>  				put_page(page);
>  			}
> @@ -3518,7 +3518,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
>  			add_credits_and_wake_if(server, rdata->credits, 0);
>  			for (i = 0; i < rdata->nr_pages; i++) {
>  				page = rdata->pages[i];
> -				lru_cache_add_file(page);
> +				lru_cache_add(page);
>  				unlock_page(page);
>  				put_page(page);
>  			}
> diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
> index cbece1221417..c7264d4a7f3f 100644
> --- a/fs/fuse/dev.c
> +++ b/fs/fuse/dev.c
> @@ -900,7 +900,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
>  	get_page(newpage);
>  
>  	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
> -		lru_cache_add_file(newpage);
> +		lru_cache_add(newpage);
>  
>  	err = 0;
>  	spin_lock(&cs->req->waitq.lock);
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 0af2bb2028fd..38fe1e91ba55 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -296,8 +296,6 @@ extern unsigned long nr_free_pagecache_pages(void);
>  
>  /* linux/mm/swap.c */
>  extern void lru_cache_add(struct page *);
> -extern void lru_cache_add_anon(struct page *page);
> -extern void lru_cache_add_file(struct page *page);
>  extern void lru_add_page_tail(struct page *page, struct page *page_tail,
>  			 struct lruvec *lruvec, struct list_head *head);
>  extern void activate_page(struct page *);
> diff --git a/mm/shmem.c b/mm/shmem.c
> index e418a995427d..ff210317022d 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1098,7 +1098,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
>  		oldpage = newpage;
>  	} else {
>  		mem_cgroup_migrate(oldpage, newpage);
> -		lru_cache_add_anon(newpage);
> +		lru_cache_add(newpage);
>  		*pagep = newpage;
>  	}
>  
> @@ -1289,7 +1289,7 @@ repeat:
>  			goto decused;
>  		}
>  		mem_cgroup_commit_charge(page, memcg, false, false);
> -		lru_cache_add_anon(page);
> +		lru_cache_add(page);
>  
>  		spin_lock(&info->lock);
>  		info->alloced++;
> diff --git a/mm/swap.c b/mm/swap.c
> index d810c3d95c97..d2786a6308dd 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -386,36 +386,6 @@ void mark_page_accessed(struct page *page)
>  }
>  EXPORT_SYMBOL(mark_page_accessed);
>  
> -static void __lru_cache_add(struct page *page)
> -{
> -	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
> -
> -	get_page(page);
> -	if (!pagevec_space(pvec))
> -		__pagevec_lru_add(pvec);
> -	pagevec_add(pvec, page);
> -	put_cpu_var(lru_add_pvec);
> -}
> -
> -/**
> - * lru_cache_add: add a page to the page lists
> - * @page: the page to add
> - */
> -void lru_cache_add_anon(struct page *page)
> -{
> -	if (PageActive(page))
> -		ClearPageActive(page);
> -	__lru_cache_add(page);
> -}
> -
> -void lru_cache_add_file(struct page *page)
> -{
> -	if (PageActive(page))
> -		ClearPageActive(page);
> -	__lru_cache_add(page);
> -}
> -EXPORT_SYMBOL(lru_cache_add_file);
> -
>  /**
>   * lru_cache_add - add a page to a page list
>   * @page: the page to be added to the LRU.
> @@ -427,10 +397,18 @@ EXPORT_SYMBOL(lru_cache_add_file);
>   */
>  void lru_cache_add(struct page *page)
>  {
> +	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
> +
>  	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
>  	VM_BUG_ON_PAGE(PageLRU(page), page);
> -	__lru_cache_add(page);
> +
> +	get_page(page);
> +	if (!pagevec_space(pvec))
> +		__pagevec_lru_add(pvec);
> +	pagevec_add(pvec, page);
> +	put_cpu_var(lru_add_pvec);
>  }
> +EXPORT_SYMBOL(lru_cache_add);
>  
>  /**
>   * add_page_to_unevictable_list - add a page to the unevictable list
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index 0d457e7db8d6..5400f814ae12 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -365,7 +365,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  			/*
>  			 * Initiate read into locked page and return.
>  			 */
> -			lru_cache_add_anon(new_page);
> +			lru_cache_add(new_page);
>  			*new_page_allocated = true;
>  			return new_page;
>  		}
> -- 
> 2.8.3

-- 
Michal Hocko
SUSE Labs

  parent reply	other threads:[~2016-06-07  9:12 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-06 19:48 [PATCH 00/10] mm: balance LRU lists based on relative thrashing Johannes Weiner
2016-06-06 19:48 ` [PATCH 01/10] mm: allow swappiness that prefers anon over file Johannes Weiner
2016-06-07  0:25   ` Minchan Kim
2016-06-07 14:18     ` Johannes Weiner
2016-06-08  0:06       ` Minchan Kim
2016-06-08 15:58         ` Johannes Weiner
2016-06-09  1:01           ` Minchan Kim
2016-06-09 13:32             ` Johannes Weiner
2016-06-06 19:48 ` [PATCH 02/10] mm: swap: unexport __pagevec_lru_add() Johannes Weiner
2016-06-06 21:32   ` Rik van Riel
2016-06-07  9:07   ` Michal Hocko
2016-06-08  7:14   ` Minchan Kim
2016-06-06 19:48 ` [PATCH 03/10] mm: fold and remove lru_cache_add_anon() and lru_cache_add_file() Johannes Weiner
2016-06-06 21:33   ` Rik van Riel
2016-06-07  9:12   ` Michal Hocko [this message]
2016-06-08  7:24   ` Minchan Kim
2016-06-06 19:48 ` [PATCH 04/10] mm: fix LRU balancing effect of new transparent huge pages Johannes Weiner
2016-06-06 21:36   ` Rik van Riel
2016-06-07  9:19   ` Michal Hocko
2016-06-08  7:28   ` Minchan Kim
2016-06-06 19:48 ` [PATCH 05/10] mm: remove LRU balancing effect of temporary page isolation Johannes Weiner
2016-06-06 21:56   ` Rik van Riel
2016-06-06 22:15     ` Johannes Weiner
2016-06-07  1:11       ` Rik van Riel
2016-06-07 13:57         ` Johannes Weiner
2016-06-07  9:26       ` Michal Hocko
2016-06-07 14:06         ` Johannes Weiner
2016-06-07  9:49   ` Michal Hocko
2016-06-08  7:39   ` Minchan Kim
2016-06-08 16:02     ` Johannes Weiner
2016-06-06 19:48 ` [PATCH 06/10] mm: remove unnecessary use-once cache bias from LRU balancing Johannes Weiner
2016-06-07  2:20   ` Rik van Riel
2016-06-07 14:11     ` Johannes Weiner
2016-06-08  8:03   ` Minchan Kim
2016-06-08 12:31   ` Michal Hocko
2016-06-06 19:48 ` [PATCH 07/10] mm: base LRU balancing on an explicit cost model Johannes Weiner
2016-06-06 19:13   ` kbuild test robot
2016-06-07  2:34   ` Rik van Riel
2016-06-07 14:12     ` Johannes Weiner
2016-06-08  8:14   ` Minchan Kim
2016-06-08 16:06     ` Johannes Weiner
2016-06-08 12:51   ` Michal Hocko
2016-06-08 16:16     ` Johannes Weiner
2016-06-09 12:18       ` Michal Hocko
2016-06-09 13:33         ` Johannes Weiner
2016-06-06 19:48 ` [PATCH 08/10] mm: deactivations shouldn't bias the LRU balance Johannes Weiner
2016-06-08  8:15   ` Minchan Kim
2016-06-08 12:57   ` Michal Hocko
2016-06-06 19:48 ` [PATCH 09/10] mm: only count actual rotations as LRU reclaim cost Johannes Weiner
2016-06-08  8:19   ` Minchan Kim
2016-06-08 13:18   ` Michal Hocko
2016-06-06 19:48 ` [PATCH 10/10] mm: balance LRU lists based on relative thrashing Johannes Weiner
2016-06-06 19:22   ` kbuild test robot
2016-06-06 23:50   ` Tim Chen
2016-06-07 16:23     ` Johannes Weiner
2016-06-07 19:56       ` Tim Chen
2016-06-08 13:58   ` Michal Hocko
2016-06-10  2:19   ` Minchan Kim
2016-06-13 15:52     ` Johannes Weiner
2016-06-15  2:23       ` Minchan Kim
2016-06-16 15:12         ` Johannes Weiner
2016-06-17  7:49           ` Minchan Kim
2016-06-17 17:01             ` Johannes Weiner
2016-06-20  7:42               ` Minchan Kim
2016-06-22 21:56                 ` Johannes Weiner
2016-06-24  6:22                   ` Minchan Kim
2016-06-07  9:51 ` [PATCH 00/10] " Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160607091241.GE12305@dhcp22.suse.cz \
    --to=mhocko@kernel.org \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=andi@firstfloor.org \
    --cc=hannes@cmpxchg.org \
    --cc=kernel-team@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=riel@redhat.com \
    --cc=tim.c.chen@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).