* [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP
@ 2021-04-07 3:05 yanfei.xu
2021-04-07 3:05 ` [PATCH v2 1/2] mm: khugepaged: use macro to align addresses yanfei.xu
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: yanfei.xu @ 2021-04-07 3:05 UTC (permalink / raw)
To: shy828301; +Cc: linux-mm, linux-kernel
From: Yanfei Xu <yanfei.xu@windriver.com>
v1-->v2:
1.correct the wrong location where the goto jump to.
2.keep the cond_resched() dropped in v1 still there.
Thanks for Yang's review.
Yanfei Xu (2):
mm: khugepaged: use macro to align addresses
mm: khugepaged: check MMF_DISABLE_THP ahead of iterating over vmas
mm/khugepaged.c | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)
--
2.27.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v2 1/2] mm: khugepaged: use macro to align addresses
2021-04-07 3:05 [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP yanfei.xu
@ 2021-04-07 3:05 ` yanfei.xu
2021-04-07 21:40 ` Yang Shi
2021-04-07 3:05 ` [PATCH v2 2/2] mm: khugepaged: check MMF_DISABLE_THP ahead of iterating over vmas yanfei.xu
2021-04-14 2:14 ` [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP Xu, Yanfei
2 siblings, 1 reply; 6+ messages in thread
From: yanfei.xu @ 2021-04-07 3:05 UTC (permalink / raw)
To: shy828301; +Cc: linux-mm, linux-kernel
From: Yanfei Xu <yanfei.xu@windriver.com>
We could use macro to deal with the addresses which need to be aligned
to improve readability of codes.
Signed-off-by: Yanfei Xu <yanfei.xu@windriver.com>
---
mm/khugepaged.c | 27 +++++++++++++--------------
1 file changed, 13 insertions(+), 14 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a7d6cb912b05..a6012b9259a2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -517,8 +517,8 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
if (!hugepage_vma_check(vma, vm_flags))
return 0;
- hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- hend = vma->vm_end & HPAGE_PMD_MASK;
+ hstart = ALIGN(vma->vm_start, HPAGE_PMD_SIZE);
+ hend = ALIGN_DOWN(vma->vm_end, HPAGE_PMD_SIZE);
if (hstart < hend)
return khugepaged_enter(vma, vm_flags);
return 0;
@@ -979,8 +979,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!vma)
return SCAN_VMA_NULL;
- hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- hend = vma->vm_end & HPAGE_PMD_MASK;
+ hstart = ALIGN(vma->vm_start, HPAGE_PMD_SIZE);
+ hend = ALIGN_DOWN(vma->vm_end, HPAGE_PMD_SIZE);
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
return SCAN_ADDRESS_RANGE;
if (!hugepage_vma_check(vma, vma->vm_flags))
@@ -1070,7 +1070,7 @@ static void collapse_huge_page(struct mm_struct *mm,
struct mmu_notifier_range range;
gfp_t gfp;
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
/* Only allocate from the target node */
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
@@ -1235,7 +1235,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
int node = NUMA_NO_NODE, unmapped = 0;
bool writable = false;
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
pmd = mm_find_pmd(mm, address);
if (!pmd) {
@@ -1414,7 +1414,7 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
{
struct mm_slot *mm_slot;
- VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
spin_lock(&khugepaged_mm_lock);
mm_slot = get_mm_slot(mm);
@@ -1437,7 +1437,7 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
*/
void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
{
- unsigned long haddr = addr & HPAGE_PMD_MASK;
+ unsigned long haddr = ALIGN_DOWN(addr, HPAGE_PMD_SIZE);
struct vm_area_struct *vma = find_vma(mm, haddr);
struct page *hpage;
pte_t *start_pte, *pte;
@@ -1584,7 +1584,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
if (vma->anon_vma)
continue;
addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
- if (addr & ~HPAGE_PMD_MASK)
+ if (!IS_ALIGNED(addr, HPAGE_PMD_SIZE))
continue;
if (vma->vm_end < addr + HPAGE_PMD_SIZE)
continue;
@@ -2070,7 +2070,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
{
struct mm_slot *mm_slot;
struct mm_struct *mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = NULL;
int progress = 0;
VM_BUG_ON(!pages);
@@ -2092,7 +2092,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
* Don't wait for semaphore (to avoid long wait times). Just move to
* the next mm on the list.
*/
- vma = NULL;
if (unlikely(!mmap_read_trylock(mm)))
goto breakouterloop_mmap_lock;
if (likely(!khugepaged_test_exit(mm)))
@@ -2112,15 +2111,15 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
progress++;
continue;
}
- hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- hend = vma->vm_end & HPAGE_PMD_MASK;
+ hstart = ALIGN(vma->vm_start, HPAGE_PMD_SIZE);
+ hend = ALIGN_DOWN(vma->vm_end, HPAGE_PMD_SIZE);
if (hstart >= hend)
goto skip;
if (khugepaged_scan.address > hend)
goto skip;
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
- VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(!IS_ALIGNED(khugepaged_scan.address, HPAGE_PMD_SIZE));
if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
goto skip;
--
2.27.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v2 2/2] mm: khugepaged: check MMF_DISABLE_THP ahead of iterating over vmas
2021-04-07 3:05 [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP yanfei.xu
2021-04-07 3:05 ` [PATCH v2 1/2] mm: khugepaged: use macro to align addresses yanfei.xu
@ 2021-04-07 3:05 ` yanfei.xu
2021-04-07 21:40 ` Yang Shi
2021-04-14 2:14 ` [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP Xu, Yanfei
2 siblings, 1 reply; 6+ messages in thread
From: yanfei.xu @ 2021-04-07 3:05 UTC (permalink / raw)
To: shy828301; +Cc: linux-mm, linux-kernel
From: Yanfei Xu <yanfei.xu@windriver.com>
We could check MMF_DISABLE_THP ahead of iterating over all of vma.
Otherwise if some mm_struct contain a large number of vma, there will
be amounts meaningless cpu cycles cost.
Signed-off-by: Yanfei Xu <yanfei.xu@windriver.com>
---
mm/khugepaged.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a6012b9259a2..f4ad25a7db55 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2094,6 +2094,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
*/
if (unlikely(!mmap_read_trylock(mm)))
goto breakouterloop_mmap_lock;
+ if (test_bit(MMF_DISABLE_THP, &mm->flags))
+ goto breakouterloop;
if (likely(!khugepaged_test_exit(mm)))
vma = find_vma(mm, khugepaged_scan.address);
--
2.27.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2 1/2] mm: khugepaged: use macro to align addresses
2021-04-07 3:05 ` [PATCH v2 1/2] mm: khugepaged: use macro to align addresses yanfei.xu
@ 2021-04-07 21:40 ` Yang Shi
0 siblings, 0 replies; 6+ messages in thread
From: Yang Shi @ 2021-04-07 21:40 UTC (permalink / raw)
To: yanfei.xu; +Cc: Linux MM, Linux Kernel Mailing List
On Tue, Apr 6, 2021 at 8:06 PM <yanfei.xu@windriver.com> wrote:
>
> From: Yanfei Xu <yanfei.xu@windriver.com>
>
> We could use macro to deal with the addresses which need to be aligned
> to improve readability of codes.
Reviewed-by: Yang Shi <shy828301@gmail.com>
>
> Signed-off-by: Yanfei Xu <yanfei.xu@windriver.com>
> ---
> mm/khugepaged.c | 27 +++++++++++++--------------
> 1 file changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index a7d6cb912b05..a6012b9259a2 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -517,8 +517,8 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
> if (!hugepage_vma_check(vma, vm_flags))
> return 0;
>
> - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
> - hend = vma->vm_end & HPAGE_PMD_MASK;
> + hstart = ALIGN(vma->vm_start, HPAGE_PMD_SIZE);
> + hend = ALIGN_DOWN(vma->vm_end, HPAGE_PMD_SIZE);
> if (hstart < hend)
> return khugepaged_enter(vma, vm_flags);
> return 0;
> @@ -979,8 +979,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
> if (!vma)
> return SCAN_VMA_NULL;
>
> - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
> - hend = vma->vm_end & HPAGE_PMD_MASK;
> + hstart = ALIGN(vma->vm_start, HPAGE_PMD_SIZE);
> + hend = ALIGN_DOWN(vma->vm_end, HPAGE_PMD_SIZE);
> if (address < hstart || address + HPAGE_PMD_SIZE > hend)
> return SCAN_ADDRESS_RANGE;
> if (!hugepage_vma_check(vma, vma->vm_flags))
> @@ -1070,7 +1070,7 @@ static void collapse_huge_page(struct mm_struct *mm,
> struct mmu_notifier_range range;
> gfp_t gfp;
>
> - VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> + VM_BUG_ON(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
>
> /* Only allocate from the target node */
> gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
> @@ -1235,7 +1235,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
> int node = NUMA_NO_NODE, unmapped = 0;
> bool writable = false;
>
> - VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> + VM_BUG_ON(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
>
> pmd = mm_find_pmd(mm, address);
> if (!pmd) {
> @@ -1414,7 +1414,7 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
> {
> struct mm_slot *mm_slot;
>
> - VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
> + VM_BUG_ON(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
>
> spin_lock(&khugepaged_mm_lock);
> mm_slot = get_mm_slot(mm);
> @@ -1437,7 +1437,7 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
> */
> void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
> {
> - unsigned long haddr = addr & HPAGE_PMD_MASK;
> + unsigned long haddr = ALIGN_DOWN(addr, HPAGE_PMD_SIZE);
> struct vm_area_struct *vma = find_vma(mm, haddr);
> struct page *hpage;
> pte_t *start_pte, *pte;
> @@ -1584,7 +1584,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
> if (vma->anon_vma)
> continue;
> addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
> - if (addr & ~HPAGE_PMD_MASK)
> + if (!IS_ALIGNED(addr, HPAGE_PMD_SIZE))
> continue;
> if (vma->vm_end < addr + HPAGE_PMD_SIZE)
> continue;
> @@ -2070,7 +2070,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
> {
> struct mm_slot *mm_slot;
> struct mm_struct *mm;
> - struct vm_area_struct *vma;
> + struct vm_area_struct *vma = NULL;
> int progress = 0;
>
> VM_BUG_ON(!pages);
> @@ -2092,7 +2092,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
> * Don't wait for semaphore (to avoid long wait times). Just move to
> * the next mm on the list.
> */
> - vma = NULL;
> if (unlikely(!mmap_read_trylock(mm)))
> goto breakouterloop_mmap_lock;
> if (likely(!khugepaged_test_exit(mm)))
> @@ -2112,15 +2111,15 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
> progress++;
> continue;
> }
> - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
> - hend = vma->vm_end & HPAGE_PMD_MASK;
> + hstart = ALIGN(vma->vm_start, HPAGE_PMD_SIZE);
> + hend = ALIGN_DOWN(vma->vm_end, HPAGE_PMD_SIZE);
> if (hstart >= hend)
> goto skip;
> if (khugepaged_scan.address > hend)
> goto skip;
> if (khugepaged_scan.address < hstart)
> khugepaged_scan.address = hstart;
> - VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
> + VM_BUG_ON(!IS_ALIGNED(khugepaged_scan.address, HPAGE_PMD_SIZE));
> if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
> goto skip;
>
> --
> 2.27.0
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 2/2] mm: khugepaged: check MMF_DISABLE_THP ahead of iterating over vmas
2021-04-07 3:05 ` [PATCH v2 2/2] mm: khugepaged: check MMF_DISABLE_THP ahead of iterating over vmas yanfei.xu
@ 2021-04-07 21:40 ` Yang Shi
0 siblings, 0 replies; 6+ messages in thread
From: Yang Shi @ 2021-04-07 21:40 UTC (permalink / raw)
To: Xu, Yanfei; +Cc: Linux MM, Linux Kernel Mailing List
On Tue, Apr 6, 2021 at 8:06 PM <yanfei.xu@windriver.com> wrote:
>
> From: Yanfei Xu <yanfei.xu@windriver.com>
>
> We could check MMF_DISABLE_THP ahead of iterating over all of vma.
> Otherwise if some mm_struct contain a large number of vma, there will
> be amounts meaningless cpu cycles cost.
Reviewed-by: Yang Shi <shy828301@gmail.com>
>
> Signed-off-by: Yanfei Xu <yanfei.xu@windriver.com>
> ---
> mm/khugepaged.c | 2 ++
> 1 file changed, 2 insertions(+)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index a6012b9259a2..f4ad25a7db55 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -2094,6 +2094,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
> */
> if (unlikely(!mmap_read_trylock(mm)))
> goto breakouterloop_mmap_lock;
> + if (test_bit(MMF_DISABLE_THP, &mm->flags))
> + goto breakouterloop;
> if (likely(!khugepaged_test_exit(mm)))
> vma = find_vma(mm, khugepaged_scan.address);
>
> --
> 2.27.0
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP
2021-04-07 3:05 [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP yanfei.xu
2021-04-07 3:05 ` [PATCH v2 1/2] mm: khugepaged: use macro to align addresses yanfei.xu
2021-04-07 3:05 ` [PATCH v2 2/2] mm: khugepaged: check MMF_DISABLE_THP ahead of iterating over vmas yanfei.xu
@ 2021-04-14 2:14 ` Xu, Yanfei
2 siblings, 0 replies; 6+ messages in thread
From: Xu, Yanfei @ 2021-04-14 2:14 UTC (permalink / raw)
To: shy828301, Andrew Morton; +Cc: linux-mm, linux-kernel
Gentle ping.
On 4/7/21 11:05 AM, yanfei.xu@windriver.com wrote:
> From: Yanfei Xu <yanfei.xu@windriver.com>
>
> v1-->v2:
> 1.correct the wrong location where the goto jump to.
> 2.keep the cond_resched() dropped in v1 still there.
>
> Thanks for Yang's review.
>
> Yanfei Xu (2):
> mm: khugepaged: use macro to align addresses
> mm: khugepaged: check MMF_DISABLE_THP ahead of iterating over vmas
>
> mm/khugepaged.c | 29 +++++++++++++++--------------
> 1 file changed, 15 insertions(+), 14 deletions(-)
>
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2021-04-14 2:14 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-07 3:05 [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP yanfei.xu
2021-04-07 3:05 ` [PATCH v2 1/2] mm: khugepaged: use macro to align addresses yanfei.xu
2021-04-07 21:40 ` Yang Shi
2021-04-07 3:05 ` [PATCH v2 2/2] mm: khugepaged: check MMF_DISABLE_THP ahead of iterating over vmas yanfei.xu
2021-04-07 21:40 ` Yang Shi
2021-04-14 2:14 ` [PATCH v2 0/2] mm: khugepaged: cleanup and a minor tuning in THP Xu, Yanfei
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).