linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@kernel.org>
To: js1304@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	kernel-team@lge.com, Vlastimil Babka <vbabka@suse.cz>,
	Christoph Hellwig <hch@infradead.org>,
	Roman Gushchin <guro@fb.com>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>
Subject: Re: [PATCH v2 06/12] mm/hugetlb: make hugetlb migration target allocation APIs CMA aware
Date: Tue, 9 Jun 2020 15:53:25 +0200	[thread overview]
Message-ID: <20200609135325.GH22623@dhcp22.suse.cz> (raw)
In-Reply-To: <1590561903-13186-7-git-send-email-iamjoonsoo.kim@lge.com>

On Wed 27-05-20 15:44:57, Joonsoo Kim wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There is a user who do not want to use CMA memory for migration. Until
> now, it is implemented by caller side but it's not optimal since there
> is limited information on caller. This patch implements it on callee side
> to get better result.

I do not follow this changelog and honestly do not see an improvement.
skip_cma in the alloc_control sound like a hack to me. I can now see
why your earlier patch has started to or the given gfp_mask. If anything
this should be folded here. But even then I do not like a partial
gfp_mask (__GFP_NOWARN on its own really has GFP_NOWAIT like semantic).

> Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> ---
>  include/linux/hugetlb.h |  2 --
>  mm/gup.c                |  9 +++------
>  mm/hugetlb.c            | 21 +++++++++++++++++----
>  mm/internal.h           |  1 +
>  4 files changed, 21 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index f482563..3d05f7d 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -503,8 +503,6 @@ struct huge_bootmem_page {
>  	struct hstate *hstate;
>  };
>  
> -struct page *alloc_migrate_huge_page(struct hstate *h,
> -				struct alloc_control *ac);
>  struct page *alloc_huge_page_nodemask(struct hstate *h,
>  				struct alloc_control *ac);
>  struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
> diff --git a/mm/gup.c b/mm/gup.c
> index 6b78f11..87eca79 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1617,14 +1617,11 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private)
>  		struct alloc_control ac = {
>  			.nid = nid,
>  			.nmask = NULL,
> -			.gfp_mask = gfp_mask,
> +			.gfp_mask = __GFP_NOWARN,
> +			.skip_cma = true,
>  		};
>  
> -		/*
> -		 * We don't want to dequeue from the pool because pool pages will
> -		 * mostly be from the CMA region.
> -		 */
> -		return alloc_migrate_huge_page(h, &ac);
> +		return alloc_huge_page_nodemask(h, &ac);
>  	}
>  
>  	if (PageTransHuge(page)) {
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 8132985..e465582 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1033,13 +1033,19 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
>  	h->free_huge_pages_node[nid]++;
>  }
>  
> -static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
> +static struct page *dequeue_huge_page_node_exact(struct hstate *h,
> +						int nid, bool skip_cma)
>  {
>  	struct page *page;
>  
> -	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
> +	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
> +		if (skip_cma && is_migrate_cma_page(page))
> +			continue;
> +
>  		if (!PageHWPoison(page))
>  			break;
> +	}
> +
>  	/*
>  	 * if 'non-isolated free hugepage' not found on the list,
>  	 * the allocation fails.
> @@ -1080,7 +1086,7 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h,
>  			continue;
>  		node = zone_to_nid(zone);
>  
> -		page = dequeue_huge_page_node_exact(h, node);
> +		page = dequeue_huge_page_node_exact(h, node, ac->skip_cma);
>  		if (page)
>  			return page;
>  	}
> @@ -1937,7 +1943,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
>  	return page;
>  }
>  
> -struct page *alloc_migrate_huge_page(struct hstate *h,
> +static struct page *alloc_migrate_huge_page(struct hstate *h,
>  				struct alloc_control *ac)
>  {
>  	struct page *page;
> @@ -1999,6 +2005,13 @@ struct page *alloc_huge_page_nodemask(struct hstate *h,
>  	}
>  	spin_unlock(&hugetlb_lock);
>  
> +	/*
> +	 * clearing __GFP_MOVABLE flag ensure that allocated page
> +	 * will not come from CMA area
> +	 */
> +	if (ac->skip_cma)
> +		ac->gfp_mask &= ~__GFP_MOVABLE;
> +
>  	return alloc_migrate_huge_page(h, ac);
>  }
>  
> diff --git a/mm/internal.h b/mm/internal.h
> index 6e613ce..159cfd6 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -618,6 +618,7 @@ struct alloc_control {
>  	int nid;		/* preferred node id */
>  	nodemask_t *nmask;
>  	gfp_t gfp_mask;
> +	bool skip_cma;
>  };
>  
>  #endif	/* __MM_INTERNAL_H */
> -- 
> 2.7.4
> 

-- 
Michal Hocko
SUSE Labs


  reply	other threads:[~2020-06-09 13:53 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-27  6:44 [PATCH v2 00/12] clean-up the migration target allocation functions js1304
2020-05-27  6:44 ` [PATCH v2 01/12] mm/page_isolation: prefer the node of the source page js1304
2020-05-28 15:34   ` Vlastimil Babka
2020-06-09 12:43   ` Michal Hocko
2020-05-27  6:44 ` [PATCH v2 02/12] mm/migrate: move migration helper from .h to .c js1304
2020-05-28 16:10   ` Vlastimil Babka
2020-06-09 12:44   ` Michal Hocko
2020-05-27  6:44 ` [PATCH v2 03/12] mm/hugetlb: introduce alloc_control structure to simplify migration target allocation APIs js1304
2020-06-09 13:24   ` Michal Hocko
2020-06-10  3:07     ` Joonsoo Kim
2020-05-27  6:44 ` [PATCH v2 04/12] mm/hugetlb: use provided ac->gfp_mask for allocation js1304
2020-06-09 13:26   ` Michal Hocko
2020-06-10  3:08     ` Joonsoo Kim
2020-05-27  6:44 ` [PATCH v2 05/12] mm/hugetlb: unify hugetlb migration callback function js1304
2020-06-09 13:43   ` Michal Hocko
2020-06-10  3:11     ` Joonsoo Kim
2020-05-27  6:44 ` [PATCH v2 06/12] mm/hugetlb: make hugetlb migration target allocation APIs CMA aware js1304
2020-06-09 13:53   ` Michal Hocko [this message]
2020-06-10  3:36     ` Joonsoo Kim
2020-05-27  6:44 ` [PATCH v2 07/12] mm/hugetlb: do not modify user provided gfp_mask js1304
2020-06-09 13:54   ` Michal Hocko
2020-06-10  5:12     ` Joonsoo Kim
2020-05-27  6:44 ` [PATCH v2 08/12] mm/migrate: change the interface of the migration target alloc/free functions js1304
2020-06-09 14:04   ` Michal Hocko
2020-06-10  3:45     ` Joonsoo Kim
2020-05-27  6:45 ` [PATCH v2 09/12] mm/migrate: make standard migration target allocation functions js1304
2020-05-27  6:45 ` [PATCH v2 10/12] mm/gup: use standard migration target allocation function js1304
2020-05-27  6:45 ` [PATCH v2 11/12] mm/mempolicy: " js1304
2020-05-27  6:45 ` [PATCH v2 12/12] mm/page_alloc: use standard migration target allocation function directly js1304
2020-05-28 19:25 ` [PATCH v2 00/12] clean-up the migration target allocation functions Vlastimil Babka
2020-05-29  6:50   ` Joonsoo Kim
2020-06-01  6:40     ` Joonsoo Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200609135325.GH22623@dhcp22.suse.cz \
    --to=mhocko@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=guro@fb.com \
    --cc=hch@infradead.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=js1304@gmail.com \
    --cc=kernel-team@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mike.kravetz@oracle.com \
    --cc=n-horiguchi@ah.jp.nec.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).