linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Joonsoo Kim <js1304@gmail.com>
To: Johannes Weiner <hannes@cmpxchg.org>
Cc: Alex Shi <alex.shi@linux.alibaba.com>,
	Shakeel Butt <shakeelb@google.com>,
	Hugh Dickins <hughd@google.com>, Michal Hocko <mhocko@suse.com>,
	"Kirill A. Shutemov" <kirill@shutemov.name>,
	Roman Gushchin <guro@fb.com>,
	linux-mm@kvack.org, cgroups@vger.kernel.org,
	linux-kernel@vger.kernel.org, kernel-team@fb.com
Subject: Re: [PATCH 16/18] mm: memcontrol: charge swapin pages on instantiation
Date: Fri, 24 Apr 2020 09:44:42 +0900	[thread overview]
Message-ID: <20200424004441.GF13929@js1304-desktop> (raw)
In-Reply-To: <20200420221126.341272-17-hannes@cmpxchg.org>

On Mon, Apr 20, 2020 at 06:11:24PM -0400, Johannes Weiner wrote:
> Right now, users that are otherwise memory controlled can easily
> escape their containment and allocate significant amounts of memory
> that they're not being charged for. That's because swap readahead
> pages are not being charged until somebody actually faults them into
> their page table. This can be exploited with MADV_WILLNEED, which
> triggers arbitrary readahead allocations without charging the pages.
> 
> There are additional problems with the delayed charging of swap pages:
> 
> 1. To implement refault/workingset detection for anonymous pages, we
>    need to have a target LRU available at swapin time, but the LRU is
>    not determinable until the page has been charged.
> 
> 2. To implement per-cgroup LRU locking, we need page->mem_cgroup to be
>    stable when the page is isolated from the LRU; otherwise, the locks
>    change under us. But swapcache gets charged after it's already on
>    the LRU, and even if we cannot isolate it ourselves (since charging
>    is not exactly optional).
> 
> The previous patch ensured we always maintain cgroup ownership records
> for swap pages. This patch moves the swapcache charging point from the
> fault handler to swapin time to fix all of the above problems.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---
>  mm/memory.c     | 15 ++++++---
>  mm/shmem.c      | 14 ++++----
>  mm/swap_state.c | 89 ++++++++++++++++++++++++++-----------------------
>  mm/swapfile.c   |  6 ----
>  4 files changed, 67 insertions(+), 57 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index 3fa379d9b17d..5d266532fc40 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3127,9 +3127,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
>  							vmf->address);
>  			if (page) {
> +				int err;
> +
>  				__SetPageLocked(page);
>  				__SetPageSwapBacked(page);
>  				set_page_private(page, entry.val);
> +
> +				/* Tell memcg to use swap ownership records */
> +				SetPageSwapCache(page);
> +				err = mem_cgroup_charge(page, vma->vm_mm,
> +							GFP_KERNEL, false);
> +				ClearPageSwapCache(page);
> +				if (err)
> +					goto out_page;
> +
>  				lru_cache_add_anon(page);
>  				swap_readpage(page, true);
>  			}
> @@ -3191,10 +3202,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		goto out_page;
>  	}
>  
> -	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, true)) {
> -		ret = VM_FAULT_OOM;
> -		goto out_page;
> -	}
>  	cgroup_throttle_swaprate(page, GFP_KERNEL);
>  
>  	/*
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 363bd11eba85..966f150a4823 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -623,13 +623,15 @@ static int shmem_add_to_page_cache(struct page *page,
>  	page->mapping = mapping;
>  	page->index = index;
>  
> -	error = mem_cgroup_charge(page, charge_mm, gfp, PageSwapCache(page));
> -	if (error) {
> -		if (!PageSwapCache(page) && PageTransHuge(page)) {
> -			count_vm_event(THP_FILE_FALLBACK);
> -			count_vm_event(THP_FILE_FALLBACK_CHARGE);
> +	if (!PageSwapCache(page)) {
> +		error = mem_cgroup_charge(page, charge_mm, gfp, false);
> +		if (error) {
> +			if (PageTransHuge(page)) {
> +				count_vm_event(THP_FILE_FALLBACK);
> +				count_vm_event(THP_FILE_FALLBACK_CHARGE);
> +			}
> +			goto error;
>  		}
> -		goto error;
>  	}
>  	cgroup_throttle_swaprate(page, gfp);
>  
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index ebed37bbf7a3..f3b9073bfff3 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -360,12 +360,13 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  			struct vm_area_struct *vma, unsigned long addr,
>  			bool *new_page_allocated)
>  {
> -	struct page *found_page = NULL, *new_page = NULL;
>  	struct swap_info_struct *si;
> -	int err;
> +	struct page *page;
> +
>  	*new_page_allocated = false;
>  
> -	do {
> +	for (;;) {
> +		int err;
>  		/*
>  		 * First check the swap cache.  Since this is normally
>  		 * called after lookup_swap_cache() failed, re-calling
> @@ -373,12 +374,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  		 */
>  		si = get_swap_device(entry);
>  		if (!si)
> -			break;
> -		found_page = find_get_page(swap_address_space(entry),
> -					   swp_offset(entry));
> +			return NULL;
> +		page = find_get_page(swap_address_space(entry),
> +				     swp_offset(entry));
>  		put_swap_device(si);
> -		if (found_page)
> -			break;
> +		if (page)
> +			return page;
>  
>  		/*
>  		 * Just skip read ahead for unused swap slot.
> @@ -389,21 +390,15 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  		 * else swap_off will be aborted if we return NULL.
>  		 */
>  		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
> -			break;
> -
> -		/*
> -		 * Get a new page to read into from swap.
> -		 */
> -		if (!new_page) {
> -			new_page = alloc_page_vma(gfp_mask, vma, addr);
> -			if (!new_page)
> -				break;		/* Out of memory */
> -		}
> +			return NULL;
>  
>  		/*
>  		 * Swap entry may have been freed since our caller observed it.
>  		 */
>  		err = swapcache_prepare(entry);
> +		if (!err)
> +			break;
> +
>  		if (err == -EEXIST) {
>  			/*
>  			 * We might race against get_swap_page() and stumble
> @@ -412,31 +407,43 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  			 */
>  			cond_resched();
>  			continue;
> -		} else if (err)		/* swp entry is obsolete ? */
> -			break;
> -
> -		/* May fail (-ENOMEM) if XArray node allocation failed. */
> -		__SetPageLocked(new_page);
> -		__SetPageSwapBacked(new_page);
> -		err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
> -		if (likely(!err)) {
> -			/* Initiate read into locked page */
> -			SetPageWorkingset(new_page);
> -			lru_cache_add_anon(new_page);
> -			*new_page_allocated = true;
> -			return new_page;
>  		}
> -		__ClearPageLocked(new_page);
> -		/*
> -		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
> -		 * clear SWAP_HAS_CACHE flag.
> -		 */
> -		put_swap_page(new_page, entry);
> -	} while (err != -ENOMEM);
> +		if (err)		/* swp entry is obsolete ? */
> +			return NULL;

"if (err)" is not needed since "!err" is already exiting the loop.

> +	}
> +
> +	/*
> +	 * The swap entry is ours to swap in. Prepare a new page.
> +	 */
> +
> +	page = alloc_page_vma(gfp_mask, vma, addr);
> +	if (!page)
> +		goto fail_free;
> +
> +	__SetPageLocked(page);
> +	__SetPageSwapBacked(page);
> +
> +	/* May fail (-ENOMEM) if XArray node allocation failed. */
> +	if (add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL))
> +		goto fail_unlock;
> +
> +	if (mem_cgroup_charge(page, NULL, gfp_mask & GFP_KERNEL, false))
> +		goto fail_delete;
> +

I think that following order of operations is better than yours.

1. page alloc
2. memcg charge
3. swapcache_prepare
4. add_to_swap_cache

Reason is that page allocation and memcg charging could take for a
long time due to reclaim and other tasks waiting this swapcache page
could be blocked inbetween swapcache_prepare() and add_to_swap_cache().

Thanks.


  parent reply	other threads:[~2020-04-24  0:44 UTC|newest]

Thread overview: 76+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-20 22:11 [PATCH 00/18] mm: memcontrol: charge swapin pages on instantiation Johannes Weiner
2020-04-20 22:11 ` [PATCH 01/18] mm: fix NUMA node file count error in replace_page_cache() Johannes Weiner
2020-04-21  8:28   ` Alex Shi
2020-04-21 19:13   ` Shakeel Butt
2020-04-22  6:34   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 02/18] mm: memcontrol: fix theoretical race in charge moving Johannes Weiner
2020-04-22  6:36   ` Joonsoo Kim
2020-04-22 16:51   ` Shakeel Butt
2020-04-22 17:42     ` Johannes Weiner
2020-04-22 18:01       ` Shakeel Butt
2020-04-22 18:02   ` Shakeel Butt
2020-04-20 22:11 ` [PATCH 03/18] mm: memcontrol: drop @compound parameter from memcg charging API Johannes Weiner
2020-04-21  9:11   ` Alex Shi
2020-04-22  6:37   ` Joonsoo Kim
2020-04-22 17:30   ` Shakeel Butt
2020-04-20 22:11 ` [PATCH 04/18] mm: memcontrol: move out cgroup swaprate throttling Johannes Weiner
2020-04-21  9:11   ` Alex Shi
2020-04-22  6:37   ` Joonsoo Kim
2020-04-22 22:20   ` Shakeel Butt
2020-04-20 22:11 ` [PATCH 05/18] mm: memcontrol: convert page cache to a new mem_cgroup_charge() API Johannes Weiner
2020-04-21  9:12   ` Alex Shi
2020-04-22  6:40   ` Joonsoo Kim
2020-04-22 12:09     ` Johannes Weiner
2020-04-23  5:25       ` Joonsoo Kim
2020-05-08 16:01         ` Johannes Weiner
2020-05-11  1:57           ` Joonsoo Kim
2020-05-11  7:38           ` Hugh Dickins
2020-05-11 15:06             ` Johannes Weiner
2020-05-11 16:32               ` Hugh Dickins
2020-05-11 18:10                 ` Johannes Weiner
2020-05-11 18:12                   ` Johannes Weiner
2020-05-11 18:44                   ` Hugh Dickins
2020-04-20 22:11 ` [PATCH 06/18] mm: memcontrol: prepare uncharging for removal of private page type counters Johannes Weiner
2020-04-21  9:12   ` Alex Shi
2020-04-22  6:41   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 07/18] mm: memcontrol: prepare move_account " Johannes Weiner
2020-04-21  9:13   ` Alex Shi
2020-04-22  6:41   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 08/18] mm: memcontrol: prepare cgroup vmstat infrastructure for native anon counters Johannes Weiner
2020-04-22  6:42   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 09/18] mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters Johannes Weiner
2020-04-22  6:42   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 10/18] mm: memcontrol: switch to native NR_ANON_MAPPED counter Johannes Weiner
2020-04-22  6:51   ` Joonsoo Kim
2020-04-22 12:28     ` Johannes Weiner
2020-04-23  5:27       ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 11/18] mm: memcontrol: switch to native NR_ANON_THPS counter Johannes Weiner
2020-04-24  0:29   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 12/18] mm: memcontrol: convert anon and file-thp to new mem_cgroup_charge() API Johannes Weiner
2020-04-24  0:29   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 13/18] mm: memcontrol: drop unused try/commit/cancel charge API Johannes Weiner
2020-04-24  0:30   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 14/18] mm: memcontrol: prepare swap controller setup for integration Johannes Weiner
2020-04-24  0:30   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 15/18] mm: memcontrol: make swap tracking an integral part of memory control Johannes Weiner
2020-04-21  9:27   ` Alex Shi
2020-04-21 14:39     ` Johannes Weiner
2020-04-22  3:14       ` Alex Shi
2020-04-22 13:30         ` Johannes Weiner
2020-04-22 13:40           ` Alex Shi
2020-04-22 13:43           ` Alex Shi
2020-04-24  0:30   ` Joonsoo Kim
2020-04-24  3:01   ` Johannes Weiner
2020-04-20 22:11 ` [PATCH 16/18] mm: memcontrol: charge swapin pages on instantiation Johannes Weiner
2020-04-21  9:21   ` Alex Shi
2020-04-24  0:44   ` Joonsoo Kim [this message]
2020-04-24  2:51     ` Johannes Weiner
2020-04-28  6:49       ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 17/18] mm: memcontrol: delete unused lrucare handling Johannes Weiner
2020-04-24  0:46   ` Joonsoo Kim
2020-04-20 22:11 ` [PATCH 18/18] mm: memcontrol: update page->mem_cgroup stability rules Johannes Weiner
2020-04-21  9:20   ` Alex Shi
2020-04-24  0:48   ` Joonsoo Kim
2020-04-21  9:10 ` Hillf Danton
2020-04-21 14:34   ` Johannes Weiner
2020-04-21  9:32 ` [PATCH 00/18] mm: memcontrol: charge swapin pages on instantiation Alex Shi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200424004441.GF13929@js1304-desktop \
    --to=js1304@gmail.com \
    --cc=alex.shi@linux.alibaba.com \
    --cc=cgroups@vger.kernel.org \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=kernel-team@fb.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.com \
    --cc=shakeelb@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).