From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> To: linux-mm@kvack.org, akpm@linux-foundation.org Cc: mpe@ellerman.id.au, linuxppc-dev@lists.ozlabs.org, kaleshsingh@google.com, npiggin@gmail.com, joel@joelfernandes.org, Christophe Leroy <christophe.leroy@csgroup.eu>, Linus Torvalds <torvalds@linux-foundation.org>, "Kirill A . Shutemov" <kirill@shutemov.name>, "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Subject: [PATCH v7 05/11] mm/mremap: Convert huge PUD move to separate helper Date: Mon, 7 Jun 2021 11:21:25 +0530 [thread overview] Message-ID: <20210607055131.156184-6-aneesh.kumar@linux.ibm.com> (raw) In-Reply-To: <20210607055131.156184-1-aneesh.kumar@linux.ibm.com> With TRANSPARENT_HUGEPAGE_PUD enabled the kernel can find huge PUD entries. Add a helper to move huge PUD entries on mremap(). This will be used by a later patch to optimize mremap of PUD_SIZE aligned level 4 PTE mapped address This also make sure we support mremap on huge PUD entries even with CONFIG_HAVE_MOVE_PUD disabled. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- mm/mremap.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 73 insertions(+), 7 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index 47c255b60150..92ab7d24a587 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -324,10 +324,62 @@ static inline bool move_normal_pud(struct vm_area_struct *vma, } #endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PUD +static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +{ + spinlock_t *old_ptl, *new_ptl; + struct mm_struct *mm = vma->vm_mm; + pud_t pud; + + /* + * The destination pud shouldn't be established, free_pgtables() + * should have released it. + */ + if (WARN_ON_ONCE(!pud_none(*new_pud))) + return false; + + /* + * We don't have to worry about the ordering of src and dst + * ptlocks because exclusive mmap_lock prevents deadlock. + */ + old_ptl = pud_lock(vma->vm_mm, old_pud); + new_ptl = pud_lockptr(mm, new_pud); + if (new_ptl != old_ptl) + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); + + /* Clear the pud */ + pud = *old_pud; + pud_clear(old_pud); + + VM_BUG_ON(!pud_none(*new_pud)); + + /* Set the new pud */ + /* mark soft_ditry when we add pud level soft dirty support */ + set_pud_at(mm, new_addr, new_pud, pud); + flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); + if (new_ptl != old_ptl) + spin_unlock(new_ptl); + spin_unlock(old_ptl); + + return true; +} +#else +static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +{ + WARN_ON_ONCE(1); + return false; + +} +#endif + enum pgt_entry { NORMAL_PMD, HPAGE_PMD, NORMAL_PUD, + HPAGE_PUD, }; /* @@ -347,6 +399,7 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry, mask = PMD_MASK; size = PMD_SIZE; break; + case HPAGE_PUD: case NORMAL_PUD: mask = PUD_MASK; size = PUD_SIZE; @@ -395,6 +448,11 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, move_huge_pmd(vma, old_addr, new_addr, old_entry, new_entry); break; + case HPAGE_PUD: + moved = move_huge_pud(vma, old_addr, new_addr, old_entry, + new_entry); + break; + default: WARN_ON_ONCE(1); break; @@ -414,6 +472,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long extent, old_end; struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; + pud_t *old_pud, *new_pud; old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); @@ -429,15 +488,22 @@ unsigned long move_page_tables(struct vm_area_struct *vma, * PUD level if possible. */ extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); - if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { - pud_t *old_pud, *new_pud; - old_pud = get_old_pud(vma->vm_mm, old_addr); - if (!old_pud) + old_pud = get_old_pud(vma->vm_mm, old_addr); + if (!old_pud) + continue; + new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); + if (!new_pud) + break; + if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { + if (extent == HPAGE_PUD_SIZE) { + move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, + old_pud, new_pud, need_rmap_locks); + /* We ignore and continue on error? */ continue; - new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); - if (!new_pud) - break; + } + } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { + if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, old_pud, new_pud, need_rmap_locks)) continue; -- 2.31.1
WARNING: multiple messages have this Message-ID (diff)
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> To: linux-mm@kvack.org, akpm@linux-foundation.org Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>, Linus Torvalds <torvalds@linux-foundation.org>, npiggin@gmail.com, kaleshsingh@google.com, joel@joelfernandes.org, "Kirill A . Shutemov" <kirill@shutemov.name>, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v7 05/11] mm/mremap: Convert huge PUD move to separate helper Date: Mon, 7 Jun 2021 11:21:25 +0530 [thread overview] Message-ID: <20210607055131.156184-6-aneesh.kumar@linux.ibm.com> (raw) In-Reply-To: <20210607055131.156184-1-aneesh.kumar@linux.ibm.com> With TRANSPARENT_HUGEPAGE_PUD enabled the kernel can find huge PUD entries. Add a helper to move huge PUD entries on mremap(). This will be used by a later patch to optimize mremap of PUD_SIZE aligned level 4 PTE mapped address This also make sure we support mremap on huge PUD entries even with CONFIG_HAVE_MOVE_PUD disabled. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- mm/mremap.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 73 insertions(+), 7 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index 47c255b60150..92ab7d24a587 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -324,10 +324,62 @@ static inline bool move_normal_pud(struct vm_area_struct *vma, } #endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PUD +static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +{ + spinlock_t *old_ptl, *new_ptl; + struct mm_struct *mm = vma->vm_mm; + pud_t pud; + + /* + * The destination pud shouldn't be established, free_pgtables() + * should have released it. + */ + if (WARN_ON_ONCE(!pud_none(*new_pud))) + return false; + + /* + * We don't have to worry about the ordering of src and dst + * ptlocks because exclusive mmap_lock prevents deadlock. + */ + old_ptl = pud_lock(vma->vm_mm, old_pud); + new_ptl = pud_lockptr(mm, new_pud); + if (new_ptl != old_ptl) + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); + + /* Clear the pud */ + pud = *old_pud; + pud_clear(old_pud); + + VM_BUG_ON(!pud_none(*new_pud)); + + /* Set the new pud */ + /* mark soft_ditry when we add pud level soft dirty support */ + set_pud_at(mm, new_addr, new_pud, pud); + flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); + if (new_ptl != old_ptl) + spin_unlock(new_ptl); + spin_unlock(old_ptl); + + return true; +} +#else +static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +{ + WARN_ON_ONCE(1); + return false; + +} +#endif + enum pgt_entry { NORMAL_PMD, HPAGE_PMD, NORMAL_PUD, + HPAGE_PUD, }; /* @@ -347,6 +399,7 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry, mask = PMD_MASK; size = PMD_SIZE; break; + case HPAGE_PUD: case NORMAL_PUD: mask = PUD_MASK; size = PUD_SIZE; @@ -395,6 +448,11 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, move_huge_pmd(vma, old_addr, new_addr, old_entry, new_entry); break; + case HPAGE_PUD: + moved = move_huge_pud(vma, old_addr, new_addr, old_entry, + new_entry); + break; + default: WARN_ON_ONCE(1); break; @@ -414,6 +472,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long extent, old_end; struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; + pud_t *old_pud, *new_pud; old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); @@ -429,15 +488,22 @@ unsigned long move_page_tables(struct vm_area_struct *vma, * PUD level if possible. */ extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); - if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { - pud_t *old_pud, *new_pud; - old_pud = get_old_pud(vma->vm_mm, old_addr); - if (!old_pud) + old_pud = get_old_pud(vma->vm_mm, old_addr); + if (!old_pud) + continue; + new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); + if (!new_pud) + break; + if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { + if (extent == HPAGE_PUD_SIZE) { + move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, + old_pud, new_pud, need_rmap_locks); + /* We ignore and continue on error? */ continue; - new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); - if (!new_pud) - break; + } + } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { + if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, old_pud, new_pud, need_rmap_locks)) continue; -- 2.31.1
next prev parent reply other threads:[~2021-06-07 5:52 UTC|newest] Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-06-07 5:51 [PATCH v7 00/11] Speedup mremap on ppc64 Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 5:51 ` [PATCH v7 01/11] mm/mremap: Fix race between MOVE_PMD mremap and pageout Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-08 0:06 ` Hugh Dickins 2021-06-08 0:06 ` Hugh Dickins 2021-06-08 7:52 ` Aneesh Kumar K.V 2021-06-08 7:52 ` Aneesh Kumar K.V 2021-06-08 9:42 ` Kirill A. Shutemov 2021-06-08 9:42 ` Kirill A. Shutemov 2021-06-08 11:17 ` Aneesh Kumar K.V 2021-06-08 11:17 ` Aneesh Kumar K.V 2021-06-08 12:05 ` Kirill A. Shutemov 2021-06-08 12:05 ` Kirill A. Shutemov 2021-06-08 20:39 ` Hugh Dickins 2021-06-08 20:39 ` Hugh Dickins 2021-06-07 5:51 ` [PATCH v7 02/11] mm/mremap: Fix race between MOVE_PUD " Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-14 14:55 ` [mm/mremap] ecf8443e51: vm-scalability.throughput -29.4% regression kernel test robot 2021-06-14 14:55 ` kernel test robot 2021-06-14 14:55 ` kernel test robot 2021-06-14 14:58 ` Linus Torvalds 2021-06-14 14:58 ` Linus Torvalds 2021-06-14 14:58 ` Linus Torvalds 2021-06-14 14:58 ` Linus Torvalds 2021-06-14 16:08 ` Aneesh Kumar K.V 2021-06-14 16:08 ` Aneesh Kumar K.V 2021-06-14 16:08 ` Aneesh Kumar K.V 2021-06-17 2:38 ` [LKP] " Liu, Yujie 2021-06-17 2:38 ` Liu, Yujie 2021-06-17 2:38 ` [LKP] " Liu, Yujie 2021-06-07 5:51 ` [PATCH v7 03/11] selftest/mremap_test: Update the test to handle pagesize other than 4K Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 5:51 ` [PATCH v7 04/11] selftest/mremap_test: Avoid crash with static build Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V [this message] 2021-06-07 5:51 ` [PATCH v7 05/11] mm/mremap: Convert huge PUD move to separate helper Aneesh Kumar K.V 2021-06-07 5:51 ` [PATCH v7 06/11] mm/mremap: Don't enable optimized PUD move if page table levels is 2 Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 5:51 ` [PATCH v7 07/11] mm/mremap: Use pmd/pud_poplulate to update page table entries Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 5:51 ` [PATCH v7 08/11] powerpc/mm/book3s64: Fix possible build error Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 5:51 ` [PATCH v7 09/11] mm/mremap: Allow arch runtime override Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 5:51 ` [PATCH v7 10/11] powerpc/book3s64/mm: Update flush_tlb_range to flush page walk cache Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 5:51 ` [PATCH v7 11/11] powerpc/mm: Enable HAVE_MOVE_PMD support Aneesh Kumar K.V 2021-06-07 5:51 ` Aneesh Kumar K.V 2021-06-07 10:10 ` [PATCH v7 00/11] Speedup mremap on ppc64 Nick Piggin 2021-06-07 10:10 ` Nick Piggin 2021-06-08 4:39 ` Aneesh Kumar K.V 2021-06-08 4:39 ` Aneesh Kumar K.V 2021-06-08 5:03 ` Nicholas Piggin 2021-06-08 5:03 ` Nicholas Piggin 2021-06-08 17:10 ` Linus Torvalds 2021-06-08 17:10 ` Linus Torvalds 2021-06-16 1:44 ` Nicholas Piggin 2021-06-16 1:44 ` Nicholas Piggin
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20210607055131.156184-6-aneesh.kumar@linux.ibm.com \ --to=aneesh.kumar@linux.ibm.com \ --cc=akpm@linux-foundation.org \ --cc=christophe.leroy@csgroup.eu \ --cc=joel@joelfernandes.org \ --cc=kaleshsingh@google.com \ --cc=kirill@shutemov.name \ --cc=linux-mm@kvack.org \ --cc=linuxppc-dev@lists.ozlabs.org \ --cc=mpe@ellerman.id.au \ --cc=npiggin@gmail.com \ --cc=torvalds@linux-foundation.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.