* [PATCH v3 1/6] mm/huge_memory.c: rework the function vma_adjust_trans_huge()
2021-03-18 12:27 [PATCH v3 0/6] Some cleanups for huge_memory Miaohe Lin
@ 2021-03-18 12:27 ` Miaohe Lin
2021-03-18 12:27 ` [PATCH v3 2/6] mm/huge_memory.c: make get_huge_zero_page() return bool Miaohe Lin
` (5 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Miaohe Lin @ 2021-03-18 12:27 UTC (permalink / raw)
To: akpm
Cc: ziy, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm, linmiaohe
The current implementation of vma_adjust_trans_huge() contains some
duplicated codes. Add helper function to get rid of these codes to make
it more succinct.
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
mm/huge_memory.c | 44 +++++++++++++++++++-------------------------
1 file changed, 19 insertions(+), 25 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bff92dea5ab3..ae16a82da823 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2301,44 +2301,38 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
__split_huge_pmd(vma, pmd, address, freeze, page);
}
+static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
+{
+ /*
+ * If the new address isn't hpage aligned and it could previously
+ * contain an hugepage: check if we need to split an huge pmd.
+ */
+ if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
+ range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
+ ALIGN(address, HPAGE_PMD_SIZE)))
+ split_huge_pmd_address(vma, address, false, NULL);
+}
+
void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
- /*
- * If the new start address isn't hpage aligned and it could
- * previously contain an hugepage: check if we need to split
- * an huge pmd.
- */
- if (start & ~HPAGE_PMD_MASK &&
- (start & HPAGE_PMD_MASK) >= vma->vm_start &&
- (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
- split_huge_pmd_address(vma, start, false, NULL);
+ /* Check if we need to split start first. */
+ split_huge_pmd_if_needed(vma, start);
- /*
- * If the new end address isn't hpage aligned and it could
- * previously contain an hugepage: check if we need to split
- * an huge pmd.
- */
- if (end & ~HPAGE_PMD_MASK &&
- (end & HPAGE_PMD_MASK) >= vma->vm_start &&
- (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
- split_huge_pmd_address(vma, end, false, NULL);
+ /* Check if we need to split end next. */
+ split_huge_pmd_if_needed(vma, end);
/*
- * If we're also updating the vma->vm_next->vm_start, if the new
- * vm_next->vm_start isn't hpage aligned and it could previously
- * contain an hugepage: check if we need to split an huge pmd.
+ * If we're also updating the vma->vm_next->vm_start,
+ * check if we need to split it.
*/
if (adjust_next > 0) {
struct vm_area_struct *next = vma->vm_next;
unsigned long nstart = next->vm_start;
nstart += adjust_next;
- if (nstart & ~HPAGE_PMD_MASK &&
- (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
- (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
- split_huge_pmd_address(next, nstart, false, NULL);
+ split_huge_pmd_if_needed(next, nstart);
}
}
--
2.19.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 2/6] mm/huge_memory.c: make get_huge_zero_page() return bool
2021-03-18 12:27 [PATCH v3 0/6] Some cleanups for huge_memory Miaohe Lin
2021-03-18 12:27 ` [PATCH v3 1/6] mm/huge_memory.c: rework the function vma_adjust_trans_huge() Miaohe Lin
@ 2021-03-18 12:27 ` Miaohe Lin
2021-03-18 14:00 ` Zi Yan
2021-03-18 12:27 ` [PATCH v3 3/6] mm/huge_memory.c: rework the function do_huge_pmd_numa_page() slightly Miaohe Lin
` (4 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Miaohe Lin @ 2021-03-18 12:27 UTC (permalink / raw)
To: akpm
Cc: ziy, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm, linmiaohe
It's guaranteed that huge_zero_page will not be NULL if huge_zero_refcount
is increased successfully. When READ_ONCE(huge_zero_page) is returned,
there must be a huge_zero_page and it can be replaced with returning 'true'
when we do not care about the value of huge_zero_page. We can thus make it
return bool to save READ_ONCE cpu cycles as the return value is just used
to check if huge_zero_page exists.
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
mm/huge_memory.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ae16a82da823..01b96c638e73 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -77,18 +77,18 @@ bool transparent_hugepage_enabled(struct vm_area_struct *vma)
return false;
}
-static struct page *get_huge_zero_page(void)
+static bool get_huge_zero_page(void)
{
struct page *zero_page;
retry:
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
- return READ_ONCE(huge_zero_page);
+ return true;
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
HPAGE_PMD_ORDER);
if (!zero_page) {
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
- return NULL;
+ return false;
}
count_vm_event(THP_ZERO_PAGE_ALLOC);
preempt_disable();
@@ -101,7 +101,7 @@ static struct page *get_huge_zero_page(void)
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
preempt_enable();
- return READ_ONCE(huge_zero_page);
+ return true;
}
static void put_huge_zero_page(void)
--
2.19.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v3 2/6] mm/huge_memory.c: make get_huge_zero_page() return bool
2021-03-18 12:27 ` [PATCH v3 2/6] mm/huge_memory.c: make get_huge_zero_page() return bool Miaohe Lin
@ 2021-03-18 14:00 ` Zi Yan
0 siblings, 0 replies; 11+ messages in thread
From: Zi Yan @ 2021-03-18 14:00 UTC (permalink / raw)
To: Miaohe Lin
Cc: akpm, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm
[-- Attachment #1: Type: text/plain, Size: 695 bytes --]
On 18 Mar 2021, at 8:27, Miaohe Lin wrote:
> It's guaranteed that huge_zero_page will not be NULL if huge_zero_refcount
> is increased successfully. When READ_ONCE(huge_zero_page) is returned,
> there must be a huge_zero_page and it can be replaced with returning 'true'
> when we do not care about the value of huge_zero_page. We can thus make it
> return bool to save READ_ONCE cpu cycles as the return value is just used
> to check if huge_zero_page exists.
>
> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
> ---
> mm/huge_memory.c | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>
—
Best Regards,
Yan Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v3 3/6] mm/huge_memory.c: rework the function do_huge_pmd_numa_page() slightly
2021-03-18 12:27 [PATCH v3 0/6] Some cleanups for huge_memory Miaohe Lin
2021-03-18 12:27 ` [PATCH v3 1/6] mm/huge_memory.c: rework the function vma_adjust_trans_huge() Miaohe Lin
2021-03-18 12:27 ` [PATCH v3 2/6] mm/huge_memory.c: make get_huge_zero_page() return bool Miaohe Lin
@ 2021-03-18 12:27 ` Miaohe Lin
2021-03-18 14:04 ` Zi Yan
2021-03-18 12:27 ` [PATCH v3 4/6] mm/huge_memory.c: remove redundant PageCompound() check Miaohe Lin
` (3 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Miaohe Lin @ 2021-03-18 12:27 UTC (permalink / raw)
To: akpm
Cc: ziy, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm, linmiaohe
The current code that checks if migrating misplaced transhuge page is
needed is pretty hard to follow. Rework it and add a comment to make
its logic more clear and improve readability.
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
mm/huge_memory.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 01b96c638e73..23964adf5db2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1462,12 +1462,6 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
*/
page_locked = trylock_page(page);
target_nid = mpol_misplaced(page, vma, haddr);
- if (target_nid == NUMA_NO_NODE) {
- /* If the page was locked, there are no parallel migrations */
- if (page_locked)
- goto clear_pmdnuma;
- }
-
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
page_nid = NUMA_NO_NODE;
@@ -1476,6 +1470,11 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
spin_unlock(vmf->ptl);
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
goto out;
+ } else if (target_nid == NUMA_NO_NODE) {
+ /* There are no parallel migrations and page is in the right
+ * node. Clear the numa hinting info in this pmd.
+ */
+ goto clear_pmdnuma;
}
/*
--
2.19.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v3 3/6] mm/huge_memory.c: rework the function do_huge_pmd_numa_page() slightly
2021-03-18 12:27 ` [PATCH v3 3/6] mm/huge_memory.c: rework the function do_huge_pmd_numa_page() slightly Miaohe Lin
@ 2021-03-18 14:04 ` Zi Yan
0 siblings, 0 replies; 11+ messages in thread
From: Zi Yan @ 2021-03-18 14:04 UTC (permalink / raw)
To: Miaohe Lin
Cc: akpm, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm
[-- Attachment #1: Type: text/plain, Size: 469 bytes --]
On 18 Mar 2021, at 8:27, Miaohe Lin wrote:
> The current code that checks if migrating misplaced transhuge page is
> needed is pretty hard to follow. Rework it and add a comment to make
> its logic more clear and improve readability.
>
> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
> ---
> mm/huge_memory.c | 11 +++++------
> 1 file changed, 5 insertions(+), 6 deletions(-)
>
LGTM. Reviewed-by: Zi Yan <ziy@nvidia.com>
—
Best Regards,
Yan Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v3 4/6] mm/huge_memory.c: remove redundant PageCompound() check
2021-03-18 12:27 [PATCH v3 0/6] Some cleanups for huge_memory Miaohe Lin
` (2 preceding siblings ...)
2021-03-18 12:27 ` [PATCH v3 3/6] mm/huge_memory.c: rework the function do_huge_pmd_numa_page() slightly Miaohe Lin
@ 2021-03-18 12:27 ` Miaohe Lin
2021-03-18 12:27 ` [PATCH v3 5/6] mm/huge_memory.c: remove unused macro TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG Miaohe Lin
` (2 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Miaohe Lin @ 2021-03-18 12:27 UTC (permalink / raw)
To: akpm
Cc: ziy, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm, linmiaohe
The !PageCompound() check limits the page must be head or tail while
!PageHead() further limits it to page head only. So !PageHead() check
is equivalent here.
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
mm/huge_memory.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 23964adf5db2..52acc3954afd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1291,7 +1291,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
}
page = pmd_page(orig_pmd);
- VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
+ VM_BUG_ON_PAGE(!PageHead(page), page);
/* Lock page for reuse_swap_page() */
if (!trylock_page(page)) {
--
2.19.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 5/6] mm/huge_memory.c: remove unused macro TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
2021-03-18 12:27 [PATCH v3 0/6] Some cleanups for huge_memory Miaohe Lin
` (3 preceding siblings ...)
2021-03-18 12:27 ` [PATCH v3 4/6] mm/huge_memory.c: remove redundant PageCompound() check Miaohe Lin
@ 2021-03-18 12:27 ` Miaohe Lin
2021-03-18 14:12 ` Zi Yan
2021-03-18 12:27 ` [PATCH v3 6/6] mm/huge_memory.c: use helper function migration_entry_to_page() Miaohe Lin
2021-03-18 15:07 ` [PATCH v3 0/6] Some cleanups for huge_memory Peter Xu
6 siblings, 1 reply; 11+ messages in thread
From: Miaohe Lin @ 2021-03-18 12:27 UTC (permalink / raw)
To: akpm
Cc: ziy, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm, linmiaohe
The commit 4958e4d86ecb ("mm: thp: remove debug_cow switch") forgot to
remove TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG macro. Remove it here.
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
include/linux/huge_mm.h | 3 ---
1 file changed, 3 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ba973efcd369..9626fda5efce 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -87,9 +87,6 @@ enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
-#ifdef CONFIG_DEBUG_VM
- TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
-#endif
};
struct kobject;
--
2.19.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v3 5/6] mm/huge_memory.c: remove unused macro TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
2021-03-18 12:27 ` [PATCH v3 5/6] mm/huge_memory.c: remove unused macro TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG Miaohe Lin
@ 2021-03-18 14:12 ` Zi Yan
0 siblings, 0 replies; 11+ messages in thread
From: Zi Yan @ 2021-03-18 14:12 UTC (permalink / raw)
To: Miaohe Lin
Cc: akpm, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm
[-- Attachment #1: Type: text/plain, Size: 385 bytes --]
On 18 Mar 2021, at 8:27, Miaohe Lin wrote:
> The commit 4958e4d86ecb ("mm: thp: remove debug_cow switch") forgot to
> remove TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG macro. Remove it here.
>
> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
> ---
> include/linux/huge_mm.h | 3 ---
> 1 file changed, 3 deletions(-)
LGTM. Zi Yan <ziy@nvidia.com>
—
Best Regards,
Yan Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v3 6/6] mm/huge_memory.c: use helper function migration_entry_to_page()
2021-03-18 12:27 [PATCH v3 0/6] Some cleanups for huge_memory Miaohe Lin
` (4 preceding siblings ...)
2021-03-18 12:27 ` [PATCH v3 5/6] mm/huge_memory.c: remove unused macro TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG Miaohe Lin
@ 2021-03-18 12:27 ` Miaohe Lin
2021-03-18 15:07 ` [PATCH v3 0/6] Some cleanups for huge_memory Peter Xu
6 siblings, 0 replies; 11+ messages in thread
From: Miaohe Lin @ 2021-03-18 12:27 UTC (permalink / raw)
To: akpm
Cc: ziy, willy, william.kucharski, vbabka, peterx, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm, linmiaohe
It's more recommended to use helper function migration_entry_to_page() to
get the page via migration entry. We can also enjoy the PageLocked()
check there.
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
mm/huge_memory.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 52acc3954afd..9b31ef84bf7e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1693,7 +1693,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
entry = pmd_to_swp_entry(orig_pmd);
- page = pfn_to_page(swp_offset(entry));
+ page = migration_entry_to_page(entry);
flush_needed = 0;
} else
WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
@@ -2101,7 +2101,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
- page = pfn_to_page(swp_offset(entry));
+ page = migration_entry_to_page(entry);
write = is_write_migration_entry(entry);
young = false;
soft_dirty = pmd_swp_soft_dirty(old_pmd);
--
2.19.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v3 0/6] Some cleanups for huge_memory
2021-03-18 12:27 [PATCH v3 0/6] Some cleanups for huge_memory Miaohe Lin
` (5 preceding siblings ...)
2021-03-18 12:27 ` [PATCH v3 6/6] mm/huge_memory.c: use helper function migration_entry_to_page() Miaohe Lin
@ 2021-03-18 15:07 ` Peter Xu
6 siblings, 0 replies; 11+ messages in thread
From: Peter Xu @ 2021-03-18 15:07 UTC (permalink / raw)
To: Miaohe Lin
Cc: akpm, ziy, willy, william.kucharski, vbabka, yulei.kernel,
walken, aneesh.kumar, rcampbell, thomas_os, yang.shi,
richard.weiyang, linux-kernel, linux-mm
On Thu, Mar 18, 2021 at 08:27:16AM -0400, Miaohe Lin wrote:
> Hi all,
> This series contains cleanups to rework some function logics to make it
> more readable, use helper function and so on. More details can be found
> in the respective changelogs. Thanks!
>
> v2->v3:
> use ALIGN/ALIGN_DOWN too against HPAGE_PMD_SIZE per Peter.
>
> v1->v2:
> rename try_to_split_huge_pmd_address and move up comments.
>
> Miaohe Lin (6):
> mm/huge_memory.c: rework the function vma_adjust_trans_huge()
> mm/huge_memory.c: make get_huge_zero_page() return bool
> mm/huge_memory.c: rework the function do_huge_pmd_numa_page() slightly
> mm/huge_memory.c: remove redundant PageCompound() check
> mm/huge_memory.c: remove unused macro
> TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
> mm/huge_memory.c: use helper function migration_entry_to_page()
Reviewed-by: Peter Xu <peterx@redhat.com>
--
Peter Xu
^ permalink raw reply [flat|nested] 11+ messages in thread