All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] Get rid of __alloc_pages wrapper
@ 2021-01-24 12:03 Matthew Wilcox (Oracle)
  2021-01-24 12:03 ` [PATCH 1/2] mm/page-alloc: Rename gfp_mask to gfp Matthew Wilcox (Oracle)
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Matthew Wilcox (Oracle) @ 2021-01-24 12:03 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle)

I was poking my way around the __alloc_pages variants trying to understand
why they each exist, and couldn't really find a good justification for
keeping __alloc_pages and __alloc_pages_nodemask as separate functions.

Matthew Wilcox (Oracle) (2):
  mm/page-alloc: Rename gfp_mask to gfp
  mm: Combine __alloc_pages and __alloc_pages_nodemask

 Documentation/admin-guide/mm/transhuge.rst |  2 +-
 include/linux/gfp.h                        | 13 +++----------
 mm/hugetlb.c                               |  2 +-
 mm/internal.h                              |  4 ++--
 mm/mempolicy.c                             |  6 +++---
 mm/migrate.c                               |  2 +-
 mm/page_alloc.c                            | 22 +++++++++++-----------
 7 files changed, 22 insertions(+), 29 deletions(-)

-- 
2.29.2



^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] mm/page-alloc: Rename gfp_mask to gfp
  2021-01-24 12:03 [PATCH 0/2] Get rid of __alloc_pages wrapper Matthew Wilcox (Oracle)
@ 2021-01-24 12:03 ` Matthew Wilcox (Oracle)
  2021-01-26 13:43   ` Vlastimil Babka
  2021-01-24 12:03 ` [PATCH 2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask Matthew Wilcox (Oracle)
  2021-01-24 17:11 ` [PATCH 3/2] mm: Rename alloc_pages_current to alloc_pages Matthew Wilcox
  2 siblings, 1 reply; 9+ messages in thread
From: Matthew Wilcox (Oracle) @ 2021-01-24 12:03 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle)

Shorten some overly-long lines by renaming this identifier.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/page_alloc.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b031a5ae0bd5..d72ef706f6e6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4963,7 +4963,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
  * This is the 'heart' of the zoned buddy allocator.
  */
 struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
+__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
 							nodemask_t *nodemask)
 {
 	struct page *page;
@@ -4976,20 +4976,21 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 	 * so bail out early if the request is out of bound.
 	 */
 	if (unlikely(order >= MAX_ORDER)) {
-		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
+		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
 		return NULL;
 	}
 
-	gfp_mask &= gfp_allowed_mask;
-	alloc_mask = gfp_mask;
-	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
+	gfp &= gfp_allowed_mask;
+	alloc_mask = gfp;
+	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
+					&alloc_mask, &alloc_flags))
 		return NULL;
 
 	/*
 	 * Forbid the first pass from falling back to types that fragment
 	 * memory until all local zones are considered.
 	 */
-	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
+	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
 
 	/* First allocation attempt */
 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
@@ -5002,7 +5003,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 	 * from a particular context which has been marked by
 	 * memalloc_no{fs,io}_{save,restore}.
 	 */
-	alloc_mask = current_gfp_context(gfp_mask);
+	alloc_mask = current_gfp_context(gfp);
 	ac.spread_dirty_pages = false;
 
 	/*
@@ -5014,8 +5015,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
 out:
-	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
-	    unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
+	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
+	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
 		__free_pages(page, order);
 		page = NULL;
 	}
-- 
2.29.2



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask
  2021-01-24 12:03 [PATCH 0/2] Get rid of __alloc_pages wrapper Matthew Wilcox (Oracle)
  2021-01-24 12:03 ` [PATCH 1/2] mm/page-alloc: Rename gfp_mask to gfp Matthew Wilcox (Oracle)
@ 2021-01-24 12:03 ` Matthew Wilcox (Oracle)
  2021-01-26 13:47   ` Vlastimil Babka
  2021-01-27  9:34   ` Michal Hocko
  2021-01-24 17:11 ` [PATCH 3/2] mm: Rename alloc_pages_current to alloc_pages Matthew Wilcox
  2 siblings, 2 replies; 9+ messages in thread
From: Matthew Wilcox (Oracle) @ 2021-01-24 12:03 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle)

There are only two callers of __alloc_pages() so prune the thicket of
alloc_pages variants by combining the two functions together.  Current
callers of __alloc_pages() simply add an extra 'NULL' parameter and
current callers of __alloc_pages_nodemask() call __alloc_pages() instead.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 Documentation/admin-guide/mm/transhuge.rst |  2 +-
 include/linux/gfp.h                        | 13 +++----------
 mm/hugetlb.c                               |  2 +-
 mm/internal.h                              |  4 ++--
 mm/mempolicy.c                             |  6 +++---
 mm/migrate.c                               |  2 +-
 mm/page_alloc.c                            |  5 ++---
 7 files changed, 13 insertions(+), 21 deletions(-)

diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 3b8a336511a4..c9c37f16eef8 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -402,7 +402,7 @@ compact_fail
 	but failed.
 
 It is possible to establish how long the stalls were using the function
-tracer to record how long was spent in __alloc_pages_nodemask and
+tracer to record how long was spent in __alloc_pages() and
 using the mm_page_alloc tracepoint to identify which allocations were
 for huge pages.
 
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 53caa9846854..acca2c487da8 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -501,15 +501,8 @@ static inline int arch_make_page_accessible(struct page *page)
 }
 #endif
 
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
-							nodemask_t *nodemask);
-
-static inline struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
-{
-	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
-}
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
+		nodemask_t *nodemask);
 
 /*
  * Allocate pages, preferring the node given as nid. The node must be valid and
@@ -521,7 +514,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
 	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
 	VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
 
-	return __alloc_pages(gfp_mask, order, nid);
+	return __alloc_pages(gfp_mask, order, nid, NULL);
 }
 
 /*
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a6bad1f686c5..604857289e02 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1644,7 +1644,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 		gfp_mask |= __GFP_RETRY_MAYFAIL;
 	if (nid == NUMA_NO_NODE)
 		nid = numa_mem_id();
-	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
+	page = __alloc_pages(gfp_mask, order, nid, nmask);
 	if (page)
 		__count_vm_event(HTLB_BUDDY_PGALLOC);
 	else
diff --git a/mm/internal.h b/mm/internal.h
index 8e9c660f33ca..19aee773f6a8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -126,10 +126,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
  * family of functions.
  *
  * nodemask, migratetype and highest_zoneidx are initialized only once in
- * __alloc_pages_nodemask() and then never change.
+ * __alloc_pages() and then never change.
  *
  * zonelist, preferred_zone and highest_zoneidx are set first in
- * __alloc_pages_nodemask() for the fast path, and might be later changed
+ * __alloc_pages() for the fast path, and might be later changed
  * in __alloc_pages_slowpath(). All other functions pass the whole structure
  * by a const pointer.
  */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 6961238c7ef5..addf0854d693 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
 {
 	struct page *page;
 
-	page = __alloc_pages(gfp, order, nid);
+	page = __alloc_pages(gfp, order, nid, NULL);
 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
 	if (!static_branch_likely(&vm_numa_stat_key))
 		return page;
@@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 
 	nmask = policy_nodemask(gfp, pol);
 	preferred_nid = policy_node(gfp, pol, node);
-	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
+	page = __alloc_pages(gfp, order, preferred_nid, nmask);
 	mpol_cond_put(pol);
 out:
 	return page;
@@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 	if (pol->mode == MPOL_INTERLEAVE)
 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
 	else
-		page = __alloc_pages_nodemask(gfp, order,
+		page = __alloc_pages(gfp, order,
 				policy_node(gfp, pol, numa_node_id()),
 				policy_nodemask(gfp, pol));
 
diff --git a/mm/migrate.c b/mm/migrate.c
index a3e1acc72ad7..f1ca50febfbe 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1617,7 +1617,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
 		gfp_mask |= __GFP_HIGHMEM;
 
-	new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
+	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
 
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d72ef706f6e6..90a1eb06c11b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4962,8 +4962,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
-struct page *
-__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
 							nodemask_t *nodemask)
 {
 	struct page *page;
@@ -5025,7 +5024,7 @@ __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
 
 	return page;
 }
-EXPORT_SYMBOL(__alloc_pages_nodemask);
+EXPORT_SYMBOL(__alloc_pages);
 
 /*
  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
-- 
2.29.2



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/2] mm: Rename alloc_pages_current to alloc_pages
  2021-01-24 12:03 [PATCH 0/2] Get rid of __alloc_pages wrapper Matthew Wilcox (Oracle)
  2021-01-24 12:03 ` [PATCH 1/2] mm/page-alloc: Rename gfp_mask to gfp Matthew Wilcox (Oracle)
  2021-01-24 12:03 ` [PATCH 2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask Matthew Wilcox (Oracle)
@ 2021-01-24 17:11 ` Matthew Wilcox
  2021-01-26 16:05   ` Vlastimil Babka
  2021-01-27  9:39   ` Michal Hocko
  2 siblings, 2 replies; 9+ messages in thread
From: Matthew Wilcox @ 2021-01-24 17:11 UTC (permalink / raw)
  To: linux-mm

When CONFIG_NUMA is enabled, alloc_pages() is a wrapper around
alloc_pages_current().  This is pointless, just implement alloc_pages()
directly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/gfp.h |  8 +-------
 mm/mempolicy.c      | 27 +++++++++++++--------------
 2 files changed, 14 insertions(+), 21 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index acca2c487da8..44978b35ce1a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -532,13 +532,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 }
 
 #ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
-
-static inline struct page *
-alloc_pages(gfp_t gfp_mask, unsigned int order)
-{
-	return alloc_pages_current(gfp_mask, order);
-}
+struct page *alloc_pages(gfp_t gfp, unsigned int order);
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
 			struct vm_area_struct *vma, unsigned long addr,
 			int node, bool hugepage);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index addf0854d693..0cf54aa5a2f0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2245,21 +2245,20 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 EXPORT_SYMBOL(alloc_pages_vma);
 
 /**
- * 	alloc_pages_current - Allocate pages.
+ * alloc_pages - Allocate pages.
+ * @gfp:
+ *	%GFP_USER   user allocation,
+ *	%GFP_KERNEL kernel allocation,
+ *	%GFP_HIGHMEM highmem allocation,
+ *	%GFP_FS     don't call back into a file system.
+ *	%GFP_ATOMIC don't sleep.
+ * @order: Power of two of allocation size in pages. 0 is a single page.
  *
- *	@gfp:
- *		%GFP_USER   user allocation,
- *      	%GFP_KERNEL kernel allocation,
- *      	%GFP_HIGHMEM highmem allocation,
- *      	%GFP_FS     don't call back into a file system.
- *      	%GFP_ATOMIC don't sleep.
- *	@order: Power of two of allocation size in pages. 0 is a single page.
- *
- *	Allocate a page from the kernel page pool.  When not in
- *	interrupt context and apply the current process NUMA policy.
- *	Returns NULL when no page can be allocated.
+ * Allocate a page from the kernel page pool.  When in
+ * process context apply the current process NUMA policy.
+ * Returns NULL when no page can be allocated.
  */
-struct page *alloc_pages_current(gfp_t gfp, unsigned order)
+struct page *alloc_pages(gfp_t gfp, unsigned order)
 {
 	struct mempolicy *pol = &default_policy;
 	struct page *page;
@@ -2280,7 +2279,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 
 	return page;
 }
-EXPORT_SYMBOL(alloc_pages_current);
+EXPORT_SYMBOL(alloc_pages);
 
 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 {
-- 
2.29.2



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] mm/page-alloc: Rename gfp_mask to gfp
  2021-01-24 12:03 ` [PATCH 1/2] mm/page-alloc: Rename gfp_mask to gfp Matthew Wilcox (Oracle)
@ 2021-01-26 13:43   ` Vlastimil Babka
  0 siblings, 0 replies; 9+ messages in thread
From: Vlastimil Babka @ 2021-01-26 13:43 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), linux-mm

On 1/24/21 1:03 PM, Matthew Wilcox (Oracle) wrote:
> Shorten some overly-long lines by renaming this identifier.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/page_alloc.c | 19 ++++++++++---------
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index b031a5ae0bd5..d72ef706f6e6 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4963,7 +4963,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>   * This is the 'heart' of the zoned buddy allocator.
>   */
>  struct page *
> -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
> +__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
>  							nodemask_t *nodemask)
>  {
>  	struct page *page;
> @@ -4976,20 +4976,21 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>  	 * so bail out early if the request is out of bound.
>  	 */
>  	if (unlikely(order >= MAX_ORDER)) {
> -		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
> +		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
>  		return NULL;
>  	}
>  
> -	gfp_mask &= gfp_allowed_mask;
> -	alloc_mask = gfp_mask;
> -	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
> +	gfp &= gfp_allowed_mask;
> +	alloc_mask = gfp;
> +	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
> +					&alloc_mask, &alloc_flags))
>  		return NULL;
>  
>  	/*
>  	 * Forbid the first pass from falling back to types that fragment
>  	 * memory until all local zones are considered.
>  	 */
> -	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
> +	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
>  
>  	/* First allocation attempt */
>  	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
> @@ -5002,7 +5003,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>  	 * from a particular context which has been marked by
>  	 * memalloc_no{fs,io}_{save,restore}.
>  	 */
> -	alloc_mask = current_gfp_context(gfp_mask);
> +	alloc_mask = current_gfp_context(gfp);
>  	ac.spread_dirty_pages = false;
>  
>  	/*
> @@ -5014,8 +5015,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>  	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
>  
>  out:
> -	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
> -	    unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
> +	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
> +	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
>  		__free_pages(page, order);
>  		page = NULL;
>  	}
> 



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask
  2021-01-24 12:03 ` [PATCH 2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask Matthew Wilcox (Oracle)
@ 2021-01-26 13:47   ` Vlastimil Babka
  2021-01-27  9:34   ` Michal Hocko
  1 sibling, 0 replies; 9+ messages in thread
From: Vlastimil Babka @ 2021-01-26 13:47 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), linux-mm

On 1/24/21 1:03 PM, Matthew Wilcox (Oracle) wrote:
> There are only two callers of __alloc_pages() so prune the thicket of
> alloc_pages variants by combining the two functions together.  Current
> callers of __alloc_pages() simply add an extra 'NULL' parameter and
> current callers of __alloc_pages_nodemask() call __alloc_pages() instead.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/2] mm: Rename alloc_pages_current to alloc_pages
  2021-01-24 17:11 ` [PATCH 3/2] mm: Rename alloc_pages_current to alloc_pages Matthew Wilcox
@ 2021-01-26 16:05   ` Vlastimil Babka
  2021-01-27  9:39   ` Michal Hocko
  1 sibling, 0 replies; 9+ messages in thread
From: Vlastimil Babka @ 2021-01-26 16:05 UTC (permalink / raw)
  To: Matthew Wilcox, linux-mm

On 1/24/21 6:11 PM, Matthew Wilcox wrote:
> When CONFIG_NUMA is enabled, alloc_pages() is a wrapper around
> alloc_pages_current().  This is pointless, just implement alloc_pages()
> directly.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask
  2021-01-24 12:03 ` [PATCH 2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask Matthew Wilcox (Oracle)
  2021-01-26 13:47   ` Vlastimil Babka
@ 2021-01-27  9:34   ` Michal Hocko
  1 sibling, 0 replies; 9+ messages in thread
From: Michal Hocko @ 2021-01-27  9:34 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: linux-mm

On Sun 24-01-21 12:03:57, Matthew Wilcox wrote:
> There are only two callers of __alloc_pages() so prune the thicket of
> alloc_pages variants by combining the two functions together.  Current
> callers of __alloc_pages() simply add an extra 'NULL' parameter and
> current callers of __alloc_pages_nodemask() call __alloc_pages() instead.

Thanks this is indeed a simplification. The allocation API zoo is a real
maze. This will simplify it a bit. __alloc_pages_nodemask is also
quite a verbatim name.

> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  Documentation/admin-guide/mm/transhuge.rst |  2 +-
>  include/linux/gfp.h                        | 13 +++----------
>  mm/hugetlb.c                               |  2 +-
>  mm/internal.h                              |  4 ++--
>  mm/mempolicy.c                             |  6 +++---
>  mm/migrate.c                               |  2 +-
>  mm/page_alloc.c                            |  5 ++---
>  7 files changed, 13 insertions(+), 21 deletions(-)
> 
> diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
> index 3b8a336511a4..c9c37f16eef8 100644
> --- a/Documentation/admin-guide/mm/transhuge.rst
> +++ b/Documentation/admin-guide/mm/transhuge.rst
> @@ -402,7 +402,7 @@ compact_fail
>  	but failed.
>  
>  It is possible to establish how long the stalls were using the function
> -tracer to record how long was spent in __alloc_pages_nodemask and
> +tracer to record how long was spent in __alloc_pages() and
>  using the mm_page_alloc tracepoint to identify which allocations were
>  for huge pages.
>  
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index 53caa9846854..acca2c487da8 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -501,15 +501,8 @@ static inline int arch_make_page_accessible(struct page *page)
>  }
>  #endif
>  
> -struct page *
> -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
> -							nodemask_t *nodemask);
> -
> -static inline struct page *
> -__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
> -{
> -	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
> -}
> +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
> +		nodemask_t *nodemask);
>  
>  /*
>   * Allocate pages, preferring the node given as nid. The node must be valid and
> @@ -521,7 +514,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
>  	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
>  	VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
>  
> -	return __alloc_pages(gfp_mask, order, nid);
> +	return __alloc_pages(gfp_mask, order, nid, NULL);
>  }
>  
>  /*
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index a6bad1f686c5..604857289e02 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1644,7 +1644,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
>  		gfp_mask |= __GFP_RETRY_MAYFAIL;
>  	if (nid == NUMA_NO_NODE)
>  		nid = numa_mem_id();
> -	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
> +	page = __alloc_pages(gfp_mask, order, nid, nmask);
>  	if (page)
>  		__count_vm_event(HTLB_BUDDY_PGALLOC);
>  	else
> diff --git a/mm/internal.h b/mm/internal.h
> index 8e9c660f33ca..19aee773f6a8 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -126,10 +126,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
>   * family of functions.
>   *
>   * nodemask, migratetype and highest_zoneidx are initialized only once in
> - * __alloc_pages_nodemask() and then never change.
> + * __alloc_pages() and then never change.
>   *
>   * zonelist, preferred_zone and highest_zoneidx are set first in
> - * __alloc_pages_nodemask() for the fast path, and might be later changed
> + * __alloc_pages() for the fast path, and might be later changed
>   * in __alloc_pages_slowpath(). All other functions pass the whole structure
>   * by a const pointer.
>   */
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 6961238c7ef5..addf0854d693 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
>  {
>  	struct page *page;
>  
> -	page = __alloc_pages(gfp, order, nid);
> +	page = __alloc_pages(gfp, order, nid, NULL);
>  	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
>  	if (!static_branch_likely(&vm_numa_stat_key))
>  		return page;
> @@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
>  
>  	nmask = policy_nodemask(gfp, pol);
>  	preferred_nid = policy_node(gfp, pol, node);
> -	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
> +	page = __alloc_pages(gfp, order, preferred_nid, nmask);
>  	mpol_cond_put(pol);
>  out:
>  	return page;
> @@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
>  	if (pol->mode == MPOL_INTERLEAVE)
>  		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
>  	else
> -		page = __alloc_pages_nodemask(gfp, order,
> +		page = __alloc_pages(gfp, order,
>  				policy_node(gfp, pol, numa_node_id()),
>  				policy_nodemask(gfp, pol));
>  
> diff --git a/mm/migrate.c b/mm/migrate.c
> index a3e1acc72ad7..f1ca50febfbe 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1617,7 +1617,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
>  	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
>  		gfp_mask |= __GFP_HIGHMEM;
>  
> -	new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
> +	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
>  
>  	if (new_page && PageTransHuge(new_page))
>  		prep_transhuge_page(new_page);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index d72ef706f6e6..90a1eb06c11b 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4962,8 +4962,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>  /*
>   * This is the 'heart' of the zoned buddy allocator.
>   */
> -struct page *
> -__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
> +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
>  							nodemask_t *nodemask)
>  {
>  	struct page *page;
> @@ -5025,7 +5024,7 @@ __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
>  
>  	return page;
>  }
> -EXPORT_SYMBOL(__alloc_pages_nodemask);
> +EXPORT_SYMBOL(__alloc_pages);
>  
>  /*
>   * Common helper functions. Never use with __GFP_HIGHMEM because the returned
> -- 
> 2.29.2
> 

-- 
Michal Hocko
SUSE Labs


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/2] mm: Rename alloc_pages_current to alloc_pages
  2021-01-24 17:11 ` [PATCH 3/2] mm: Rename alloc_pages_current to alloc_pages Matthew Wilcox
  2021-01-26 16:05   ` Vlastimil Babka
@ 2021-01-27  9:39   ` Michal Hocko
  1 sibling, 0 replies; 9+ messages in thread
From: Michal Hocko @ 2021-01-27  9:39 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm

On Sun 24-01-21 17:11:07, Matthew Wilcox wrote:
> When CONFIG_NUMA is enabled, alloc_pages() is a wrapper around
> alloc_pages_current().  This is pointless, just implement alloc_pages()
> directly.

alloc_pages_current was a bit of a misnomer anyway as it implements
memory policy as well (e.g. interleave).

> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Michal Hocko <mhocko@suse.com>

Thanks!

> ---
>  include/linux/gfp.h |  8 +-------
>  mm/mempolicy.c      | 27 +++++++++++++--------------
>  2 files changed, 14 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index acca2c487da8..44978b35ce1a 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -532,13 +532,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
>  }
>  
>  #ifdef CONFIG_NUMA
> -extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
> -
> -static inline struct page *
> -alloc_pages(gfp_t gfp_mask, unsigned int order)
> -{
> -	return alloc_pages_current(gfp_mask, order);
> -}
> +struct page *alloc_pages(gfp_t gfp, unsigned int order);
>  extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
>  			struct vm_area_struct *vma, unsigned long addr,
>  			int node, bool hugepage);
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index addf0854d693..0cf54aa5a2f0 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2245,21 +2245,20 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
>  EXPORT_SYMBOL(alloc_pages_vma);
>  
>  /**
> - * 	alloc_pages_current - Allocate pages.
> + * alloc_pages - Allocate pages.
> + * @gfp:
> + *	%GFP_USER   user allocation,
> + *	%GFP_KERNEL kernel allocation,
> + *	%GFP_HIGHMEM highmem allocation,
> + *	%GFP_FS     don't call back into a file system.
> + *	%GFP_ATOMIC don't sleep.
> + * @order: Power of two of allocation size in pages. 0 is a single page.
>   *
> - *	@gfp:
> - *		%GFP_USER   user allocation,
> - *      	%GFP_KERNEL kernel allocation,
> - *      	%GFP_HIGHMEM highmem allocation,
> - *      	%GFP_FS     don't call back into a file system.
> - *      	%GFP_ATOMIC don't sleep.
> - *	@order: Power of two of allocation size in pages. 0 is a single page.
> - *
> - *	Allocate a page from the kernel page pool.  When not in
> - *	interrupt context and apply the current process NUMA policy.
> - *	Returns NULL when no page can be allocated.
> + * Allocate a page from the kernel page pool.  When in
> + * process context apply the current process NUMA policy.
> + * Returns NULL when no page can be allocated.
>   */
> -struct page *alloc_pages_current(gfp_t gfp, unsigned order)
> +struct page *alloc_pages(gfp_t gfp, unsigned order)
>  {
>  	struct mempolicy *pol = &default_policy;
>  	struct page *page;
> @@ -2280,7 +2279,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
>  
>  	return page;
>  }
> -EXPORT_SYMBOL(alloc_pages_current);
> +EXPORT_SYMBOL(alloc_pages);
>  
>  int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
>  {
> -- 
> 2.29.2
> 

-- 
Michal Hocko
SUSE Labs


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-01-27  9:40 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-24 12:03 [PATCH 0/2] Get rid of __alloc_pages wrapper Matthew Wilcox (Oracle)
2021-01-24 12:03 ` [PATCH 1/2] mm/page-alloc: Rename gfp_mask to gfp Matthew Wilcox (Oracle)
2021-01-26 13:43   ` Vlastimil Babka
2021-01-24 12:03 ` [PATCH 2/2] mm: Combine __alloc_pages and __alloc_pages_nodemask Matthew Wilcox (Oracle)
2021-01-26 13:47   ` Vlastimil Babka
2021-01-27  9:34   ` Michal Hocko
2021-01-24 17:11 ` [PATCH 3/2] mm: Rename alloc_pages_current to alloc_pages Matthew Wilcox
2021-01-26 16:05   ` Vlastimil Babka
2021-01-27  9:39   ` Michal Hocko

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.