All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mike Kravetz <mike.kravetz@oracle.com>
To: Michal Hocko <mhocko@kernel.org>, linux-mm@kvack.org
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	LKML <linux-kernel@vger.kernel.org>,
	Michal Hocko <mhocko@suse.com>
Subject: Re: [RFC PATCH 1/5] mm, hugetlb: unify core page allocation accounting and initialization
Date: Tue, 12 Dec 2017 16:20:53 -0800	[thread overview]
Message-ID: <0698cdb3-ee17-04ac-5f8b-4fa7b15ac52c@oracle.com> (raw)
In-Reply-To: <20171204140117.7191-2-mhocko@kernel.org>

On 12/04/2017 06:01 AM, Michal Hocko wrote:
> From: Michal Hocko <mhocko@suse.com>
> 
> hugetlb allocator has two entry points to the page allocator
> - alloc_fresh_huge_page_node
> - __hugetlb_alloc_buddy_huge_page
> 
> The two differ very subtly in two aspects. The first one doesn't care
> about HTLB_BUDDY_* stats and it doesn't initialize the huge page.
> prep_new_huge_page is not used because it not only initializes hugetlb
> specific stuff but because it also put_page and releases the page to
> the hugetlb pool which is not what is required in some contexts. This
> makes things more complicated than necessary.
> 
> Simplify things by a) removing the page allocator entry point duplicity
> and only keep __hugetlb_alloc_buddy_huge_page and b) make
> prep_new_huge_page more reusable by removing the put_page which moves
> the page to the allocator pool. All current callers are updated to call
> put_page explicitly. Later patches will add new callers which won't
> need it.
> 
> This patch shouldn't introduce any functional change.
> 
> Signed-off-by: Michal Hocko <mhocko@suse.com>

Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
-- 
Mike Kravetz

> ---
>  mm/hugetlb.c | 61 +++++++++++++++++++++++++++++-------------------------------
>  1 file changed, 29 insertions(+), 32 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 2c9033d39bfe..8189c92fac82 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1157,6 +1157,7 @@ static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
>  	if (page) {
>  		prep_compound_gigantic_page(page, huge_page_order(h));
>  		prep_new_huge_page(h, page, nid);
> +		put_page(page); /* free it into the hugepage allocator */
>  	}
>  
>  	return page;
> @@ -1304,7 +1305,6 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
>  	h->nr_huge_pages++;
>  	h->nr_huge_pages_node[nid]++;
>  	spin_unlock(&hugetlb_lock);
> -	put_page(page); /* free it into the hugepage allocator */
>  }
>  
>  static void prep_compound_gigantic_page(struct page *page, unsigned int order)
> @@ -1381,41 +1381,49 @@ pgoff_t __basepage_index(struct page *page)
>  	return (index << compound_order(page_head)) + compound_idx;
>  }
>  
> -static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
> +static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
> +		gfp_t gfp_mask, int nid, nodemask_t *nmask)
>  {
> +	int order = huge_page_order(h);
>  	struct page *page;
>  
> -	page = __alloc_pages_node(nid,
> -		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
> -						__GFP_RETRY_MAYFAIL|__GFP_NOWARN,
> -		huge_page_order(h));
> -	if (page) {
> -		prep_new_huge_page(h, page, nid);
> -	}
> +	gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
> +	if (nid == NUMA_NO_NODE)
> +		nid = numa_mem_id();
> +	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
> +	if (page)
> +		__count_vm_event(HTLB_BUDDY_PGALLOC);
> +	else
> +		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
>  
>  	return page;
>  }
>  
> +/*
> + * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
> + * manner.
> + */
>  static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
>  {
>  	struct page *page;
>  	int nr_nodes, node;
> -	int ret = 0;
> +	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
>  
>  	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
> -		page = alloc_fresh_huge_page_node(h, node);
> -		if (page) {
> -			ret = 1;
> +		page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
> +				node, nodes_allowed);
> +		if (page)
>  			break;
> -		}
> +
>  	}
>  
> -	if (ret)
> -		count_vm_event(HTLB_BUDDY_PGALLOC);
> -	else
> -		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
> +	if (!page)
> +		return 0;
>  
> -	return ret;
> +	prep_new_huge_page(h, page, page_to_nid(page));
> +	put_page(page); /* free it into the hugepage allocator */
> +
> +	return 1;
>  }
>  
>  /*
> @@ -1523,17 +1531,6 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
>  	return rc;
>  }
>  
> -static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
> -		gfp_t gfp_mask, int nid, nodemask_t *nmask)
> -{
> -	int order = huge_page_order(h);
> -
> -	gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
> -	if (nid == NUMA_NO_NODE)
> -		nid = numa_mem_id();
> -	return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
> -}
> -
>  static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
>  		int nid, nodemask_t *nmask)
>  {
> @@ -1589,11 +1586,9 @@ static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
>  		 */
>  		h->nr_huge_pages_node[r_nid]++;
>  		h->surplus_huge_pages_node[r_nid]++;
> -		__count_vm_event(HTLB_BUDDY_PGALLOC);
>  	} else {
>  		h->nr_huge_pages--;
>  		h->surplus_huge_pages--;
> -		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
>  	}
>  	spin_unlock(&hugetlb_lock);
>  
> @@ -2148,6 +2143,8 @@ static void __init gather_bootmem_prealloc(void)
>  		prep_compound_huge_page(page, h->order);
>  		WARN_ON(PageReserved(page));
>  		prep_new_huge_page(h, page, page_to_nid(page));
> +		put_page(page); /* free it into the hugepage allocator */
> +
>  		/*
>  		 * If we had gigantic hugepages allocated at boot time, we need
>  		 * to restore the 'stolen' pages to totalram_pages in order to
> 

WARNING: multiple messages have this Message-ID (diff)
From: Mike Kravetz <mike.kravetz@oracle.com>
To: Michal Hocko <mhocko@kernel.org>, linux-mm@kvack.org
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	LKML <linux-kernel@vger.kernel.org>,
	Michal Hocko <mhocko@suse.com>
Subject: Re: [RFC PATCH 1/5] mm, hugetlb: unify core page allocation accounting and initialization
Date: Tue, 12 Dec 2017 16:20:53 -0800	[thread overview]
Message-ID: <0698cdb3-ee17-04ac-5f8b-4fa7b15ac52c@oracle.com> (raw)
In-Reply-To: <20171204140117.7191-2-mhocko@kernel.org>

On 12/04/2017 06:01 AM, Michal Hocko wrote:
> From: Michal Hocko <mhocko@suse.com>
> 
> hugetlb allocator has two entry points to the page allocator
> - alloc_fresh_huge_page_node
> - __hugetlb_alloc_buddy_huge_page
> 
> The two differ very subtly in two aspects. The first one doesn't care
> about HTLB_BUDDY_* stats and it doesn't initialize the huge page.
> prep_new_huge_page is not used because it not only initializes hugetlb
> specific stuff but because it also put_page and releases the page to
> the hugetlb pool which is not what is required in some contexts. This
> makes things more complicated than necessary.
> 
> Simplify things by a) removing the page allocator entry point duplicity
> and only keep __hugetlb_alloc_buddy_huge_page and b) make
> prep_new_huge_page more reusable by removing the put_page which moves
> the page to the allocator pool. All current callers are updated to call
> put_page explicitly. Later patches will add new callers which won't
> need it.
> 
> This patch shouldn't introduce any functional change.
> 
> Signed-off-by: Michal Hocko <mhocko@suse.com>

Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
-- 
Mike Kravetz

> ---
>  mm/hugetlb.c | 61 +++++++++++++++++++++++++++++-------------------------------
>  1 file changed, 29 insertions(+), 32 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 2c9033d39bfe..8189c92fac82 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1157,6 +1157,7 @@ static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
>  	if (page) {
>  		prep_compound_gigantic_page(page, huge_page_order(h));
>  		prep_new_huge_page(h, page, nid);
> +		put_page(page); /* free it into the hugepage allocator */
>  	}
>  
>  	return page;
> @@ -1304,7 +1305,6 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
>  	h->nr_huge_pages++;
>  	h->nr_huge_pages_node[nid]++;
>  	spin_unlock(&hugetlb_lock);
> -	put_page(page); /* free it into the hugepage allocator */
>  }
>  
>  static void prep_compound_gigantic_page(struct page *page, unsigned int order)
> @@ -1381,41 +1381,49 @@ pgoff_t __basepage_index(struct page *page)
>  	return (index << compound_order(page_head)) + compound_idx;
>  }
>  
> -static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
> +static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
> +		gfp_t gfp_mask, int nid, nodemask_t *nmask)
>  {
> +	int order = huge_page_order(h);
>  	struct page *page;
>  
> -	page = __alloc_pages_node(nid,
> -		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
> -						__GFP_RETRY_MAYFAIL|__GFP_NOWARN,
> -		huge_page_order(h));
> -	if (page) {
> -		prep_new_huge_page(h, page, nid);
> -	}
> +	gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
> +	if (nid == NUMA_NO_NODE)
> +		nid = numa_mem_id();
> +	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
> +	if (page)
> +		__count_vm_event(HTLB_BUDDY_PGALLOC);
> +	else
> +		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
>  
>  	return page;
>  }
>  
> +/*
> + * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
> + * manner.
> + */
>  static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
>  {
>  	struct page *page;
>  	int nr_nodes, node;
> -	int ret = 0;
> +	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
>  
>  	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
> -		page = alloc_fresh_huge_page_node(h, node);
> -		if (page) {
> -			ret = 1;
> +		page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
> +				node, nodes_allowed);
> +		if (page)
>  			break;
> -		}
> +
>  	}
>  
> -	if (ret)
> -		count_vm_event(HTLB_BUDDY_PGALLOC);
> -	else
> -		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
> +	if (!page)
> +		return 0;
>  
> -	return ret;
> +	prep_new_huge_page(h, page, page_to_nid(page));
> +	put_page(page); /* free it into the hugepage allocator */
> +
> +	return 1;
>  }
>  
>  /*
> @@ -1523,17 +1531,6 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
>  	return rc;
>  }
>  
> -static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
> -		gfp_t gfp_mask, int nid, nodemask_t *nmask)
> -{
> -	int order = huge_page_order(h);
> -
> -	gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
> -	if (nid == NUMA_NO_NODE)
> -		nid = numa_mem_id();
> -	return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
> -}
> -
>  static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
>  		int nid, nodemask_t *nmask)
>  {
> @@ -1589,11 +1586,9 @@ static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
>  		 */
>  		h->nr_huge_pages_node[r_nid]++;
>  		h->surplus_huge_pages_node[r_nid]++;
> -		__count_vm_event(HTLB_BUDDY_PGALLOC);
>  	} else {
>  		h->nr_huge_pages--;
>  		h->surplus_huge_pages--;
> -		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
>  	}
>  	spin_unlock(&hugetlb_lock);
>  
> @@ -2148,6 +2143,8 @@ static void __init gather_bootmem_prealloc(void)
>  		prep_compound_huge_page(page, h->order);
>  		WARN_ON(PageReserved(page));
>  		prep_new_huge_page(h, page, page_to_nid(page));
> +		put_page(page); /* free it into the hugepage allocator */
> +
>  		/*
>  		 * If we had gigantic hugepages allocated at boot time, we need
>  		 * to restore the 'stolen' pages to totalram_pages in order to
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2017-12-13  0:26 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-04 14:01 [RFC PATCH 0/5] mm, hugetlb: allocation API and migration improvements Michal Hocko
2017-12-04 14:01 ` Michal Hocko
2017-12-04 14:01 ` [RFC PATCH 1/5] mm, hugetlb: unify core page allocation accounting and initialization Michal Hocko
2017-12-04 14:01   ` Michal Hocko
2017-12-13  0:20   ` Mike Kravetz [this message]
2017-12-13  0:20     ` Mike Kravetz
2017-12-04 14:01 ` [RFC PATCH 2/5] mm, hugetlb: integrate giga hugetlb more naturally to the allocation path Michal Hocko
2017-12-04 14:01   ` Michal Hocko
2017-12-13  0:24   ` Mike Kravetz
2017-12-13  0:24     ` Mike Kravetz
2017-12-04 14:01 ` [RFC PATCH 3/5] mm, hugetlb: do not rely on overcommit limit during migration Michal Hocko
2017-12-04 14:01   ` Michal Hocko
2017-12-13 23:35   ` Mike Kravetz
2017-12-13 23:35     ` Mike Kravetz
2017-12-14  7:40     ` Michal Hocko
2017-12-14  7:40       ` Michal Hocko
2017-12-14 20:57       ` Mike Kravetz
2017-12-14 20:57         ` Mike Kravetz
2017-12-04 14:01 ` [RFC PATCH 4/5] mm, hugetlb: get rid of surplus page accounting tricks Michal Hocko
2017-12-04 14:01   ` Michal Hocko
2017-12-14  0:45   ` Mike Kravetz
2017-12-14  0:45     ` Mike Kravetz
2017-12-14  7:50     ` Michal Hocko
2017-12-14  7:50       ` Michal Hocko
2017-12-14 20:58       ` Mike Kravetz
2017-12-14 20:58         ` Mike Kravetz
2017-12-04 14:01 ` [RFC PATCH 5/5] mm, hugetlb: further simplify hugetlb allocation API Michal Hocko
2017-12-04 14:01   ` Michal Hocko
2017-12-14 21:01   ` Mike Kravetz
2017-12-14 21:01     ` Mike Kravetz
2017-12-15  9:33 ` [RFC PATCH 0/5] mm, hugetlb: allocation API and migration improvements Michal Hocko
2017-12-15  9:33   ` Michal Hocko
2017-12-20  5:33   ` Naoya Horiguchi
2017-12-20  5:33     ` Naoya Horiguchi
2017-12-20  9:53     ` Michal Hocko
2017-12-20  9:53       ` Michal Hocko
2017-12-20 22:43       ` Mike Kravetz
2017-12-20 22:43         ` Mike Kravetz
2017-12-21  7:28         ` Michal Hocko
2017-12-21  7:28           ` Michal Hocko
2017-12-21 23:35           ` Mike Kravetz
2017-12-21 23:35             ` Mike Kravetz
2017-12-22  9:48             ` Michal Hocko
2017-12-22  9:48               ` Michal Hocko
2017-12-22  8:58       ` Naoya Horiguchi
2017-12-22  8:58         ` Naoya Horiguchi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0698cdb3-ee17-04ac-5f8b-4fa7b15ac52c@oracle.com \
    --to=mike.kravetz@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=mhocko@suse.com \
    --cc=n-horiguchi@ah.jp.nec.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.