From: "Zi Yan" <zi.yan@cs.rutgers.edu>
To: Michal Hocko <mhocko@suse.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>,
Andrew Morton <akpm@linux-foundation.org>,
linux-mm@kvack.org, Alex Williamson <alex.williamson@redhat.com>,
David Rientjes <rientjes@google.com>,
Vlastimil Babka <vbabka@suse.cz>,
Stefan Priebe - Profihost AG <s.priebe@profihost.ag>
Subject: Re: [PATCH] mm, thp: relax __GFP_THISNODE for MADV_HUGEPAGE mappings
Date: Wed, 29 Aug 2018 18:54:23 -0400 [thread overview]
Message-ID: <E97C9342-9BA0-48DD-A580-738ACEE49B41@cs.rutgers.edu> (raw)
In-Reply-To: <20180829192451.GG10223@dhcp22.suse.cz>
[-- Attachment #1: Type: text/plain, Size: 7162 bytes --]
Hi Michal,
<snip>
>
> Fixes: 5265047ac301 ("mm, thp: really limit transparent hugepage allocation to local node")
> Reported-by: Stefan Priebe <s.priebe@profihost.ag>
> Debugged-by: Andrea Arcangeli <aarcange@redhat.com>
> Signed-off-by: Michal Hocko <mhocko@suse.com>
> ---
> include/linux/mempolicy.h | 2 ++
> mm/huge_memory.c | 25 +++++++++++++++++--------
> mm/mempolicy.c | 28 +---------------------------
> 3 files changed, 20 insertions(+), 35 deletions(-)
>
> diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
> index 5228c62af416..bac395f1d00a 100644
> --- a/include/linux/mempolicy.h
> +++ b/include/linux/mempolicy.h
> @@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
> struct mempolicy *get_task_policy(struct task_struct *p);
> struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
> unsigned long addr);
> +struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
> + unsigned long addr);
> bool vma_policy_mof(struct vm_area_struct *vma);
>
> extern void numa_default_policy(void);
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index c3bc7e9c9a2a..94472bf9a31b 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -629,21 +629,30 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
> * available
> * never: never stall for any thp allocation
> */
> -static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
> +static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
> {
> const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
> + gfp_t this_node = 0;
> + struct mempolicy *pol;
> +
> +#ifdef CONFIG_NUMA
> + /* __GFP_THISNODE makes sense only if there is no explicit binding */
> + pol = get_vma_policy(vma, addr);
> + if (pol->mode != MPOL_BIND)
> + this_node = __GFP_THISNODE;
> +#endif
>
> if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
> - return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
> + return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY | this_node);
> if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
> - return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
> + return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
> if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
> return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
> - __GFP_KSWAPD_RECLAIM);
> + __GFP_KSWAPD_RECLAIM | this_node);
> if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
> return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
> - 0);
> - return GFP_TRANSHUGE_LIGHT;
> + this_node);
> + return GFP_TRANSHUGE_LIGHT | this_node;
> }
>
> /* Caller must hold page table lock. */
> @@ -715,7 +724,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
> pte_free(vma->vm_mm, pgtable);
> return ret;
> }
> - gfp = alloc_hugepage_direct_gfpmask(vma);
> + gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
> page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
> if (unlikely(!page)) {
> count_vm_event(THP_FAULT_FALLBACK);
> @@ -1290,7 +1299,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
> alloc:
> if (transparent_hugepage_enabled(vma) &&
> !transparent_hugepage_debug_cow()) {
> - huge_gfp = alloc_hugepage_direct_gfpmask(vma);
> + huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
> new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
> } else
> new_page = NULL;
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index da858f794eb6..75bbfc3d6233 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -1648,7 +1648,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
> * freeing by another task. It is the caller's responsibility to free the
> * extra reference for shared policies.
> */
> -static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
> +struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
> unsigned long addr)
> {
> struct mempolicy *pol = __get_vma_policy(vma, addr);
> @@ -2026,32 +2026,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
> goto out;
> }
>
> - if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
> - int hpage_node = node;
> -
> - /*
> - * For hugepage allocation and non-interleave policy which
> - * allows the current node (or other explicitly preferred
> - * node) we only try to allocate from the current/preferred
> - * node and don't fall back to other nodes, as the cost of
> - * remote accesses would likely offset THP benefits.
> - *
> - * If the policy is interleave, or does not allow the current
> - * node in its nodemask, we allocate the standard way.
> - */
> - if (pol->mode == MPOL_PREFERRED &&
> - !(pol->flags & MPOL_F_LOCAL))
> - hpage_node = pol->v.preferred_node;
> -
> - nmask = policy_nodemask(gfp, pol);
> - if (!nmask || node_isset(hpage_node, *nmask)) {
> - mpol_cond_put(pol);
> - page = __alloc_pages_node(hpage_node,
> - gfp | __GFP_THISNODE, order);
> - goto out;
> - }
> - }
> -
> nmask = policy_nodemask(gfp, pol);
> preferred_nid = policy_node(gfp, pol, node);
> page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
> --
> 2.18.0
>
Thanks for your patch.
I tested it against Linus’s tree with “memhog -r3 130g” in a two-socket machine with 128GB memory on
each node and got the results below. I expect this test should fill one node, then fall back to the other.
1. madvise(MADV_HUGEPAGE) + defrag = {always, madvise, defer+madvise}: no swap, THPs are allocated in the fallback node.
2. madvise(MADV_HUGEPAGE) + defrag = defer: pages got swapped to the disk instead of being allocated in the fallback node.
3. no madvise, THP is on by default + defrag = {always, defer, defer+madvise}: pages got swapped to the disk instead of
being allocated in the fallback node.
4. no madvise, THP is on by default + defrag = madvise: no swap, base pages are allocated in the fallback node.
The result 2 and 3 seems unexpected, since pages should be allocated in the fallback node.
The reason, as Andrea mentioned in his email, is that the combination of __THIS_NODE and __GFP_DIRECT_RECLAIM (plus __GFP_KSWAPD_RECLAIM from this experiment). __THIS_NODE uses ZONELIST_NOFALLBACK, which removes the fallback possibility
and __GFP_*_RECLAIM triggers page reclaim in the first page allocation node when fallback nodes are removed by
ZONELIST_NOFALLBACK.
IMHO, __THIS_NODE should not be used for user memory allocation at all, since it fights against most of memory policies.
But kernel memory allocation would need it as a kernel MPOL_BIND memory policy.
Comments?
—
Best Regards,
Yan Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 516 bytes --]
next prev parent reply other threads:[~2018-08-29 22:54 UTC|newest]
Thread overview: 75+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-08-20 3:22 [PATCH 0/2] fix for "pathological THP behavior" Andrea Arcangeli
2018-08-20 3:22 ` [PATCH 1/2] mm: thp: consolidate policy_nodemask call Andrea Arcangeli
2018-08-20 3:22 ` [PATCH 2/2] mm: thp: fix transparent_hugepage/defrag = madvise || always Andrea Arcangeli
2018-08-20 3:26 ` [PATCH 0/1] fix for "pathological THP behavior" v2 Andrea Arcangeli
2018-08-20 3:26 ` [PATCH 1/1] mm: thp: fix transparent_hugepage/defrag = madvise || always Andrea Arcangeli
2018-08-20 12:35 ` [PATCH 2/2] " Zi Yan
2018-08-20 15:32 ` Andrea Arcangeli
2018-08-21 11:50 ` Michal Hocko
2018-08-21 21:40 ` Andrea Arcangeli
2018-08-22 9:02 ` Michal Hocko
2018-08-22 11:07 ` Michal Hocko
2018-08-22 14:24 ` Andrea Arcangeli
2018-08-22 14:45 ` Michal Hocko
2018-08-22 15:24 ` Andrea Arcangeli
2018-08-23 10:50 ` Michal Hocko
2018-08-22 15:52 ` Andrea Arcangeli
2018-08-23 10:52 ` Michal Hocko
2018-08-28 7:53 ` Michal Hocko
2018-08-28 8:18 ` Michal Hocko
2018-08-28 8:54 ` Stefan Priebe - Profihost AG
2018-08-29 11:11 ` Stefan Priebe - Profihost AG
[not found] ` <D5F4A33C-0A37-495C-9468-D6866A862097@cs.rutgers.edu>
2018-08-29 14:28 ` Michal Hocko
2018-08-29 14:35 ` Michal Hocko
2018-08-29 15:22 ` Zi Yan
2018-08-29 15:47 ` Michal Hocko
2018-08-29 16:06 ` Zi Yan
2018-08-29 16:25 ` Michal Hocko
2018-08-29 19:24 ` [PATCH] mm, thp: relax __GFP_THISNODE for MADV_HUGEPAGE mappings Michal Hocko
2018-08-29 22:54 ` Zi Yan [this message]
2018-08-30 7:00 ` Michal Hocko
2018-08-30 13:22 ` Zi Yan
2018-08-30 13:45 ` Michal Hocko
2018-08-30 14:02 ` Zi Yan
2018-08-30 16:19 ` Stefan Priebe - Profihost AG
2018-08-30 16:40 ` Michal Hocko
2018-09-05 3:44 ` Andrea Arcangeli
2018-09-05 7:08 ` Michal Hocko
2018-09-06 11:10 ` Vlastimil Babka
2018-09-06 11:16 ` Vlastimil Babka
2018-09-06 11:25 ` Michal Hocko
2018-09-06 12:35 ` Zi Yan
2018-09-06 10:59 ` Vlastimil Babka
2018-09-06 11:17 ` Zi Yan
2018-08-30 6:47 ` Michal Hocko
2018-09-06 11:18 ` Vlastimil Babka
2018-09-06 11:27 ` Michal Hocko
2018-09-12 17:29 ` Mel Gorman
2018-09-17 6:11 ` Michal Hocko
2018-09-17 7:04 ` Stefan Priebe - Profihost AG
2018-09-17 9:32 ` Stefan Priebe - Profihost AG
2018-09-17 11:27 ` Michal Hocko
2018-08-20 11:58 ` [PATCH 0/2] fix for "pathological THP behavior" Kirill A. Shutemov
2018-08-20 15:19 ` Andrea Arcangeli
2018-08-21 15:30 ` Vlastimil Babka
2018-08-21 17:26 ` David Rientjes
2018-08-21 22:18 ` Andrea Arcangeli
2018-08-21 22:05 ` Andrea Arcangeli
2018-08-22 9:24 ` Michal Hocko
2018-08-22 15:56 ` Andrea Arcangeli
2018-08-20 19:06 ` Yang Shi
2018-08-20 23:24 ` Andrea Arcangeli
2018-09-07 13:05 [PATCH] mm, thp: relax __GFP_THISNODE for MADV_HUGEPAGE mappings Michal Hocko
2018-09-08 18:52 ` Stefan Priebe - Profihost AG
2018-09-10 7:39 ` Michal Hocko
2018-09-11 9:03 ` Vlastimil Babka
2018-09-10 20:08 ` David Rientjes
2018-09-10 20:22 ` Stefan Priebe - Profihost AG
2018-09-11 8:51 ` Vlastimil Babka
2018-09-11 11:56 ` Michal Hocko
2018-09-11 20:30 ` David Rientjes
2018-09-12 12:05 ` Michal Hocko
2018-09-12 20:40 ` David Rientjes
2018-09-12 13:54 ` Andrea Arcangeli
2018-09-12 14:21 ` Michal Hocko
2018-09-12 15:25 ` Michal Hocko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=E97C9342-9BA0-48DD-A580-738ACEE49B41@cs.rutgers.edu \
--to=zi.yan@cs.rutgers.edu \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=alex.williamson@redhat.com \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=rientjes@google.com \
--cc=s.priebe@profihost.ag \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).