linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Ben Widawsky <ben.widawsky@intel.com>
To: linux-mm <linux-mm@kvack.org>
Cc: Ben Widawsky <ben.widawsky@intel.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Mina Almasry <almasrymina@google.com>,
	Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH 15/18] mm: convert callers of __alloc_pages_nodemask to pmask
Date: Fri, 19 Jun 2020 09:24:22 -0700	[thread overview]
Message-ID: <20200619162425.1052382-16-ben.widawsky@intel.com> (raw)
In-Reply-To: <20200619162425.1052382-1-ben.widawsky@intel.com>

Now that the infrastructure is in place to both select, and allocate a
set of preferred nodes as specified by policy (or perhaps in the future,
the calling function), start transitioning over functions that can
benefit from this.

This patch looks stupid. It seems to artificially insert a nodemask on
the stack, then just use the first node from that mask - in other words,
a nop just adding overhead. It does. The reason for this is it's a
preparatory patch for when we switch over to __alloc_pages_nodemask() to
using a mask for preferences. This helps with readability and
bisectability.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Ben Widawsky <ben.widawsky@intel.com>
---
 mm/hugetlb.c   | 11 ++++++++---
 mm/mempolicy.c | 38 +++++++++++++++++++++++---------------
 2 files changed, 31 insertions(+), 18 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 57ece74e3aae..71b6750661df 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1687,6 +1687,12 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 	int order = huge_page_order(h);
 	struct page *page;
 	bool alloc_try_hard = true;
+	nodemask_t pmask;
+
+	if (nid == NUMA_NO_NODE)
+		nid = numa_mem_id();
+
+	pmask = nodemask_of_node(nid);
 
 	/*
 	 * By default we always try hard to allocate the page with
@@ -1700,9 +1706,8 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
 	if (alloc_try_hard)
 		gfp_mask |= __GFP_RETRY_MAYFAIL;
-	if (nid == NUMA_NO_NODE)
-		nid = numa_mem_id();
-	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
+	page = __alloc_pages_nodemask(gfp_mask, order, first_node(pmask),
+				      nmask);
 	if (page)
 		__count_vm_event(HTLB_BUDDY_PGALLOC);
 	else
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3c48f299d344..9521bb46aa00 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2270,11 +2270,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
 }
 
 static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
-					int order, int node)
+					int order, nodemask_t *prefmask)
 {
 	nodemask_t *nmask;
 	struct page *page;
-	int hpage_node = node;
+	int hpage_node = first_node(*prefmask);
 
 	/*
 	 * For hugepage allocation and non-interleave policy which allows the
@@ -2286,9 +2286,6 @@ static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
 	 * If the policy is interleave or multiple preferred nodes, or does not
 	 * allow the current node in its nodemask, we allocate the standard way.
 	 */
-	if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
-		hpage_node = first_node(pol->v.preferred_nodes);
-
 	nmask = policy_nodemask(gfp, pol);
 
 	/*
@@ -2340,10 +2337,14 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 {
 	struct mempolicy *pol;
 	struct page *page;
-	int preferred_nid;
-	nodemask_t *nmask;
+	nodemask_t *nmask, *pmask, tmp;
 
 	pol = get_vma_policy(vma, addr);
+	pmask = policy_preferred_nodes(gfp, pol);
+	if (!pmask) {
+		tmp = nodemask_of_node(node);
+		pmask = &tmp;
+	}
 
 	if (pol->mode == MPOL_INTERLEAVE) {
 		unsigned nid;
@@ -2353,12 +2354,12 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 		page = alloc_page_interleave(gfp, order, nid);
 	} else if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
 			    hugepage)) {
-		page = alloc_pages_vma_thp(gfp, pol, order, node);
+		page = alloc_pages_vma_thp(gfp, pol, order, pmask);
 		mpol_cond_put(pol);
 	} else {
 		nmask = policy_nodemask(gfp, pol);
-		preferred_nid = policy_node(gfp, pol, node);
-		page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
+		page = __alloc_pages_nodemask(gfp, order, first_node(*pmask),
+					      nmask);
 		mpol_cond_put(pol);
 	}
 
@@ -2393,12 +2394,19 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 	 * No reference counting needed for current->mempolicy
 	 * nor system default_policy
 	 */
-	if (pol->mode == MPOL_INTERLEAVE)
+	if (pol->mode == MPOL_INTERLEAVE) {
 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
-	else
-		page = __alloc_pages_nodemask(gfp, order,
-				policy_node(gfp, pol, numa_node_id()),
-				policy_nodemask(gfp, pol));
+	} else {
+		nodemask_t tmp, *pmask;
+
+		pmask = policy_preferred_nodes(gfp, pol);
+		if (!pmask) {
+			tmp = nodemask_of_node(numa_node_id());
+			pmask = &tmp;
+		}
+		page = __alloc_pages_nodemask(gfp, order, first_node(*pmask),
+					      policy_nodemask(gfp, pol));
+	}
 
 	return page;
 }
-- 
2.27.0



  parent reply	other threads:[~2020-06-19 16:25 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-19 16:24 [PATCH 00/18] multiple preferred nodes Ben Widawsky
2020-06-19 16:24 ` [PATCH 01/18] mm/mempolicy: Add comment for missing LOCAL Ben Widawsky
2020-06-24  7:55   ` Michal Hocko
2020-06-19 16:24 ` [PATCH 02/18] mm/mempolicy: Use node_mem_id() instead of node_id() Ben Widawsky
2020-06-24  8:25   ` Michal Hocko
2020-06-24 16:48     ` Ben Widawsky
2020-06-26 12:30       ` Michal Hocko
2020-06-19 16:24 ` [PATCH 03/18] mm/page_alloc: start plumbing multi preferred node Ben Widawsky
2020-06-19 16:24 ` [PATCH 04/18] mm/page_alloc: add preferred pass to page allocation Ben Widawsky
2020-06-19 16:24 ` [PATCH 05/18] mm/mempolicy: convert single preferred_node to full nodemask Ben Widawsky
2020-06-19 16:24 ` [PATCH 06/18] mm/mempolicy: Add MPOL_PREFERRED_MANY for multiple preferred nodes Ben Widawsky
2020-06-19 16:24 ` [PATCH 07/18] mm/mempolicy: allow preferred code to take a nodemask Ben Widawsky
2020-06-19 16:24 ` [PATCH 08/18] mm/mempolicy: refactor rebind code for PREFERRED_MANY Ben Widawsky
2020-06-19 16:24 ` [PATCH 09/18] mm: Finish handling MPOL_PREFERRED_MANY Ben Widawsky
2020-06-19 16:24 ` [PATCH 10/18] mm: clean up alloc_pages_vma (thp) Ben Widawsky
2020-06-19 16:24 ` [PATCH 11/18] mm: Extract THP hugepage allocation Ben Widawsky
2020-06-19 16:24 ` [PATCH 12/18] mm/mempolicy: Use __alloc_page_node for interleaved Ben Widawsky
2020-06-19 16:24 ` [PATCH 13/18] mm: kill __alloc_pages Ben Widawsky
2020-06-19 16:24 ` [PATCH 14/18] mm/mempolicy: Introduce policy_preferred_nodes() Ben Widawsky
2020-06-19 16:24 ` Ben Widawsky [this message]
2020-06-19 16:24 ` [PATCH 16/18] alloc_pages_nodemask: turn preferred nid into a nodemask Ben Widawsky
2020-06-19 16:24 ` [PATCH 17/18] mm: Use less stack for page allocations Ben Widawsky
2020-06-19 16:24 ` [PATCH 18/18] mm/mempolicy: Advertise new MPOL_PREFERRED_MANY Ben Widawsky
2020-06-22  7:09 ` [PATCH 00/18] multiple preferred nodes Michal Hocko
2020-06-23 11:20   ` Michal Hocko
2020-06-23 16:12     ` Ben Widawsky
2020-06-24  7:52       ` Michal Hocko
2020-06-24 16:16         ` Ben Widawsky
2020-06-24 18:39           ` Michal Hocko
2020-06-24 19:37             ` Ben Widawsky
2020-06-24 19:51               ` Michal Hocko
2020-06-24 20:01                 ` Ben Widawsky
2020-06-24 20:07                   ` Michal Hocko
2020-06-24 20:23                     ` Ben Widawsky
2020-06-24 20:42                       ` Michal Hocko
2020-06-24 20:55                         ` Ben Widawsky
2020-06-25  6:28                           ` Michal Hocko
2020-06-26 21:39         ` Ben Widawsky
2020-06-29 10:16           ` Michal Hocko
2020-06-22 20:54 ` Andi Kleen
2020-06-22 21:02   ` Ben Widawsky
2020-06-22 21:07   ` Dave Hansen
2020-06-22 22:02     ` Andi Kleen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200619162425.1052382-16-ben.widawsky@intel.com \
    --to=ben.widawsky@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=almasrymina@google.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=linux-mm@kvack.org \
    --cc=mike.kravetz@oracle.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).