All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@kernel.org>
To: js1304@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	kernel-team@lge.com, Vlastimil Babka <vbabka@suse.cz>,
	Christoph Hellwig <hch@infradead.org>,
	Roman Gushchin <guro@fb.com>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>
Subject: Re: [PATCH v4 03/11] mm/hugetlb: unify migration callbacks
Date: Tue, 7 Jul 2020 13:19:13 +0200	[thread overview]
Message-ID: <20200707111913.GG5913@dhcp22.suse.cz> (raw)
In-Reply-To: <1594107889-32228-4-git-send-email-iamjoonsoo.kim@lge.com>

On Tue 07-07-20 16:44:41, Joonsoo Kim wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There is no difference between two migration callback functions,
> alloc_huge_page_node() and alloc_huge_page_nodemask(), except
> __GFP_THISNODE handling. It's redundant to have two almost similar
> functions in order to handle this flag. So, this patch tries to
> remove one by introducing a new argument, gfp_mask, to
> alloc_huge_page_nodemask().
> 
> After introducing gfp_mask argument, it's caller's job to provide correct
> gfp_mask. So, every callsites for alloc_huge_page_nodemask() are changed
> to provide gfp_mask.
> 
> Note that it's safe to remove a node id check in alloc_huge_page_node()
> since there is no caller passing NUMA_NO_NODE as a node id.
> 
> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

Thanks for doing it this way. This makes much more sense than the
prvevious gfp_mask as a modifier approach.

I hope there won't be any weird include dependency problems but 0day
will tell us soon about that.

For the patch, feel free to add
Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  include/linux/hugetlb.h | 26 ++++++++++++++++++--------
>  mm/hugetlb.c            | 35 ++---------------------------------
>  mm/mempolicy.c          | 10 ++++++----
>  mm/migrate.c            | 11 +++++++----
>  4 files changed, 33 insertions(+), 49 deletions(-)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 50650d0..bb93e95 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -10,6 +10,7 @@
>  #include <linux/list.h>
>  #include <linux/kref.h>
>  #include <linux/pgtable.h>
> +#include <linux/gfp.h>
>  
>  struct ctl_table;
>  struct user_struct;
> @@ -504,9 +505,8 @@ struct huge_bootmem_page {
>  
>  struct page *alloc_huge_page(struct vm_area_struct *vma,
>  				unsigned long addr, int avoid_reserve);
> -struct page *alloc_huge_page_node(struct hstate *h, int nid);
>  struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
> -				nodemask_t *nmask);
> +				nodemask_t *nmask, gfp_t gfp_mask);
>  struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
>  				unsigned long address);
>  struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
> @@ -692,6 +692,15 @@ static inline bool hugepage_movable_supported(struct hstate *h)
>  	return true;
>  }
>  
> +/* Movability of hugepages depends on migration support. */
> +static inline gfp_t htlb_alloc_mask(struct hstate *h)
> +{
> +	if (hugepage_movable_supported(h))
> +		return GFP_HIGHUSER_MOVABLE;
> +	else
> +		return GFP_HIGHUSER;
> +}
> +
>  static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
>  					   struct mm_struct *mm, pte_t *pte)
>  {
> @@ -759,13 +768,9 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
>  	return NULL;
>  }
>  
> -static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
> -{
> -	return NULL;
> -}
> -
>  static inline struct page *
> -alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
> +alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
> +			nodemask_t *nmask, gfp_t gfp_mask)
>  {
>  	return NULL;
>  }
> @@ -878,6 +883,11 @@ static inline bool hugepage_movable_supported(struct hstate *h)
>  	return false;
>  }
>  
> +static inline gfp_t htlb_alloc_mask(struct hstate *h)
> +{
> +	return 0;
> +}
> +
>  static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
>  					   struct mm_struct *mm, pte_t *pte)
>  {
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 7e5ba5c0..3245aa0 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1089,15 +1089,6 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
>  	return NULL;
>  }
>  
> -/* Movability of hugepages depends on migration support. */
> -static inline gfp_t htlb_alloc_mask(struct hstate *h)
> -{
> -	if (hugepage_movable_supported(h))
> -		return GFP_HIGHUSER_MOVABLE;
> -	else
> -		return GFP_HIGHUSER;
> -}
> -
>  static struct page *dequeue_huge_page_vma(struct hstate *h,
>  				struct vm_area_struct *vma,
>  				unsigned long address, int avoid_reserve,
> @@ -1979,31 +1970,9 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
>  }
>  
>  /* page migration callback function */
> -struct page *alloc_huge_page_node(struct hstate *h, int nid)
> -{
> -	gfp_t gfp_mask = htlb_alloc_mask(h);
> -	struct page *page = NULL;
> -
> -	if (nid != NUMA_NO_NODE)
> -		gfp_mask |= __GFP_THISNODE;
> -
> -	spin_lock(&hugetlb_lock);
> -	if (h->free_huge_pages - h->resv_huge_pages > 0)
> -		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
> -	spin_unlock(&hugetlb_lock);
> -
> -	if (!page)
> -		page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
> -
> -	return page;
> -}
> -
> -/* page migration callback function */
>  struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
> -		nodemask_t *nmask)
> +		nodemask_t *nmask, gfp_t gfp_mask)
>  {
> -	gfp_t gfp_mask = htlb_alloc_mask(h);
> -
>  	spin_lock(&hugetlb_lock);
>  	if (h->free_huge_pages - h->resv_huge_pages > 0) {
>  		struct page *page;
> @@ -2031,7 +2000,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
>  
>  	gfp_mask = htlb_alloc_mask(h);
>  	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
> -	page = alloc_huge_page_nodemask(h, node, nodemask);
> +	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
>  	mpol_cond_put(mpol);
>  
>  	return page;
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index dabcee8..9034a53 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -1068,10 +1068,12 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
>  /* page allocation callback for NUMA node migration */
>  struct page *alloc_new_node_page(struct page *page, unsigned long node)
>  {
> -	if (PageHuge(page))
> -		return alloc_huge_page_node(page_hstate(compound_head(page)),
> -					node);
> -	else if (PageTransHuge(page)) {
> +	if (PageHuge(page)) {
> +		struct hstate *h = page_hstate(compound_head(page));
> +		gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
> +
> +		return alloc_huge_page_nodemask(h, node, NULL, gfp_mask);
> +	} else if (PageTransHuge(page)) {
>  		struct page *thp;
>  
>  		thp = alloc_pages_node(node,
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 7370a66..3b3d918 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1538,10 +1538,13 @@ struct page *new_page_nodemask(struct page *page,
>  	unsigned int order = 0;
>  	struct page *new_page = NULL;
>  
> -	if (PageHuge(page))
> -		return alloc_huge_page_nodemask(
> -				page_hstate(compound_head(page)),
> -				preferred_nid, nodemask);
> +	if (PageHuge(page)) {
> +		struct hstate *h = page_hstate(compound_head(page));
> +
> +		gfp_mask = htlb_alloc_mask(h);
> +		return alloc_huge_page_nodemask(h, preferred_nid,
> +						nodemask, gfp_mask);
> +	}
>  
>  	if (PageTransHuge(page)) {
>  		gfp_mask |= GFP_TRANSHUGE;
> -- 
> 2.7.4
> 

-- 
Michal Hocko
SUSE Labs

  parent reply	other threads:[~2020-07-07 11:19 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-07  7:44 [PATCH v4 00/11] clean-up the migration target allocation functions js1304
2020-07-07  7:44 ` [PATCH v4 01/11] mm/page_isolation: prefer the node of the source page js1304
2020-07-07  7:44 ` [PATCH v4 02/11] mm/migrate: move migration helper from .h to .c js1304
2020-07-07  7:44 ` [PATCH v4 03/11] mm/hugetlb: unify migration callbacks js1304
2020-07-07 11:05   ` Vlastimil Babka
2020-07-07 11:19   ` Michal Hocko [this message]
2020-07-07  7:44 ` [PATCH v4 04/11] mm/hugetlb: make hugetlb migration callback CMA aware js1304
2020-07-07 11:22   ` Vlastimil Babka
2020-07-08  7:16     ` Joonsoo Kim
2020-07-08  7:41       ` Michal Hocko
2020-07-08  9:26         ` Vlastimil Babka
2020-07-08 10:57           ` Aneesh Kumar K.V
2020-07-08 11:32             ` Michal Hocko
2020-07-09  6:43         ` Michal Hocko
2020-07-09  7:03           ` Joonsoo Kim
2020-07-09  7:03             ` Joonsoo Kim
2020-07-09  0:27       ` Mike Kravetz
2020-07-07 11:31   ` Michal Hocko
2020-07-08  6:48     ` Michal Hocko
2020-07-08  7:12     ` Joonsoo Kim
2020-07-07  7:44 ` [PATCH v4 05/11] mm/migrate: clear __GFP_RECLAIM for THP allocation for migration js1304
2020-07-07 11:40   ` Michal Hocko
2020-07-08  7:19     ` Joonsoo Kim
2020-07-08  7:48       ` Michal Hocko
2020-07-09  3:26         ` Joonsoo Kim
2020-07-09  3:26           ` Joonsoo Kim
2020-07-07 12:17   ` Vlastimil Babka
2020-07-08  7:17     ` Joonsoo Kim
2020-07-09  7:17     ` Joonsoo Kim
2020-07-09  7:17       ` Joonsoo Kim
2020-07-07  7:44 ` [PATCH v4 06/11] mm/migrate: make a standard migration target allocation function js1304
2020-07-07 11:43   ` Michal Hocko
2020-07-07 14:49   ` Vlastimil Babka
2020-07-07 19:00     ` Michal Hocko
2020-07-09  7:15       ` Joonsoo Kim
2020-07-09  7:15         ` Joonsoo Kim
2020-07-09 10:28         ` Michal Hocko
2020-07-07  7:44 ` [PATCH v4 07/11] mm/gup: use a standard migration target allocation callback js1304
2020-07-07 11:46   ` Michal Hocko
2020-07-08  7:21     ` Joonsoo Kim
2020-07-07  7:44 ` [PATCH v4 08/11] mm/mempolicy: " js1304
2020-07-07  7:44 ` [PATCH v4 09/11] mm/page_alloc: remove a wrapper for alloc_migration_target() js1304
2020-07-07  7:44 ` [PATCH v4 10/11] mm/memory-failure: " js1304
2020-07-07 11:48   ` Michal Hocko
2020-07-07 15:03     ` Vlastimil Babka
2020-07-07 18:55       ` Michal Hocko
2020-07-07 15:00   ` Vlastimil Babka
2020-07-07  7:44 ` [PATCH v4 11/11] mm/memory_hotplug: " js1304
2020-07-07 11:52   ` Michal Hocko
2020-07-07 15:09   ` Vlastimil Babka
2020-07-09  3:25     ` Joonsoo Kim
2020-07-09  3:25       ` Joonsoo Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200707111913.GG5913@dhcp22.suse.cz \
    --to=mhocko@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=guro@fb.com \
    --cc=hch@infradead.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=js1304@gmail.com \
    --cc=kernel-team@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mike.kravetz@oracle.com \
    --cc=n-horiguchi@ah.jp.nec.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.