* Re: [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem [not found] <04f701d1c797$1ebe6b80$5c3b4280$@alibaba-inc.com> @ 2016-06-16 6:52 ` Hillf Danton 2016-06-16 10:08 ` Kirill A. Shutemov 0 siblings, 1 reply; 7+ messages in thread From: Hillf Danton @ 2016-06-16 6:52 UTC (permalink / raw) To: 'Ebru Akagunduz', Kirill A. Shutemov; +Cc: linux-kernel, linux-mm > > From: Ebru Akagunduz <ebru.akagunduz@gmail.com> > > Currently khugepaged makes swapin readahead under down_write. This patch > supplies to make swapin readahead under down_read instead of down_write. > > The patch was tested with a test program that allocates 800MB of memory, > writes to it, and then sleeps. The system was forced to swap out all. > Afterwards, the test program touches the area by writing, it skips a page > in each 20 pages of the area. > > Link: http://lkml.kernel.org/r/1464335964-6510-4-git-send-email-ebru.akagunduz@gmail.com > Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com> > Cc: Hugh Dickins <hughd@google.com> > Cc: Rik van Riel <riel@redhat.com> > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> > Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> > Cc: Andrea Arcangeli <aarcange@redhat.com> > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> > Cc: Cyrill Gorcunov <gorcunov@openvz.org> > Cc: Mel Gorman <mgorman@suse.de> > Cc: David Rientjes <rientjes@google.com> > Cc: Vlastimil Babka <vbabka@suse.cz> > Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> > Cc: Johannes Weiner <hannes@cmpxchg.org> > Cc: Michal Hocko <mhocko@suse.cz> > Cc: Minchan Kim <minchan.kim@gmail.com> > Signed-off-by: Andrew Morton <akpm@linux-foundation.org> > --- > mm/huge_memory.c | 92 ++++++++++++++++++++++++++++++++++++++------------------ > 1 file changed, 63 insertions(+), 29 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index f2bc57c45d2f..96dfe3f09bf6 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -2378,6 +2378,35 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) > } > > /* > + * If mmap_sem temporarily dropped, revalidate vma > + * before taking mmap_sem. See below > + * Return 0 if succeeds, otherwise return none-zero > + * value (scan code). > + */ > + > +static int hugepage_vma_revalidate(struct mm_struct *mm, > + struct vm_area_struct *vma, > + unsigned long address) > +{ > + unsigned long hstart, hend; > + > + if (unlikely(khugepaged_test_exit(mm))) > + return SCAN_ANY_PROCESS; > + > + vma = find_vma(mm, address); > + if (!vma) > + return SCAN_VMA_NULL; > + > + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; > + hend = vma->vm_end & HPAGE_PMD_MASK; > + if (address < hstart || address + HPAGE_PMD_SIZE > hend) > + return SCAN_ADDRESS_RANGE; > + if (!hugepage_vma_check(vma)) > + return SCAN_VMA_CHECK; > + return 0; > +} > + > +/* > * Bring missing pages in from swap, to complete THP collapse. > * Only done if khugepaged_scan_pmd believes it is worthwhile. > * > @@ -2385,7 +2414,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) > * but with mmap_sem held to protect against vma changes. > */ > > -static void __collapse_huge_page_swapin(struct mm_struct *mm, > +static bool __collapse_huge_page_swapin(struct mm_struct *mm, > struct vm_area_struct *vma, > unsigned long address, pmd_t *pmd) > { > @@ -2401,11 +2430,18 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, > continue; > swapped_in++; > ret = do_swap_page(mm, vma, _address, pte, pmd, > - FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, > + FAULT_FLAG_ALLOW_RETRY, Add a description in change log for it please. > pteval); > + /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ > + if (ret & VM_FAULT_RETRY) { > + down_read(&mm->mmap_sem); > + /* vma is no longer available, don't continue to swapin */ > + if (hugepage_vma_revalidate(mm, vma, address)) > + return false; Revalidate vma _after_ acquiring mmap_sem, but the above comment says _before_. > + } > if (ret & VM_FAULT_ERROR) { > trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); > - return; > + return false; > } > /* pte is unmapped now, we need to map it */ > pte = pte_offset_map(pmd, _address); > @@ -2413,6 +2449,7 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, > pte--; > pte_unmap(pte); > trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); > + return true; > } > > static void collapse_huge_page(struct mm_struct *mm, > @@ -2427,7 +2464,6 @@ static void collapse_huge_page(struct mm_struct *mm, > struct page *new_page; > spinlock_t *pmd_ptl, *pte_ptl; > int isolated = 0, result = 0; > - unsigned long hstart, hend; > struct mem_cgroup *memcg; > unsigned long mmun_start; /* For mmu_notifiers */ > unsigned long mmun_end; /* For mmu_notifiers */ > @@ -2450,39 +2486,37 @@ static void collapse_huge_page(struct mm_struct *mm, > goto out_nolock; > } > > - /* > - * Prevent all access to pagetables with the exception of > - * gup_fast later hanlded by the ptep_clear_flush and the VM > - * handled by the anon_vma lock + PG_lock. > - */ > - down_write(&mm->mmap_sem); > - if (unlikely(khugepaged_test_exit(mm))) { > - result = SCAN_ANY_PROCESS; > + down_read(&mm->mmap_sem); > + result = hugepage_vma_revalidate(mm, vma, address); > + if (result) > goto out; > - } > > - vma = find_vma(mm, address); > - if (!vma) { > - result = SCAN_VMA_NULL; > - goto out; > - } > - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; > - hend = vma->vm_end & HPAGE_PMD_MASK; > - if (address < hstart || address + HPAGE_PMD_SIZE > hend) { > - result = SCAN_ADDRESS_RANGE; > - goto out; > - } > - if (!hugepage_vma_check(vma)) { > - result = SCAN_VMA_CHECK; > - goto out; > - } > pmd = mm_find_pmd(mm, address); > if (!pmd) { > result = SCAN_PMD_NULL; > goto out; > } > > - __collapse_huge_page_swapin(mm, vma, address, pmd); > + /* > + * __collapse_huge_page_swapin always returns with mmap_sem > + * locked. If it fails, release mmap_sem and jump directly > + * label out. Continuing to collapse causes inconsistency. > + */ > + if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { > + up_read(&mm->mmap_sem); > + goto out; Jump out with mmap_sem released, > + } > + > + up_read(&mm->mmap_sem); > + /* > + * Prevent all access to pagetables with the exception of > + * gup_fast later handled by the ptep_clear_flush and the VM > + * handled by the anon_vma lock + PG_lock. > + */ > + down_write(&mm->mmap_sem); > + result = hugepage_vma_revalidate(mm, vma, address); > + if (result) > + goto out; but jump out again with mmap_sem held. They are cleaned up in subsequent darns? > > anon_vma_lock_write(vma->anon_vma); > > -- > 2.8.1 ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem 2016-06-16 6:52 ` [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem Hillf Danton @ 2016-06-16 10:08 ` Kirill A. Shutemov 2016-06-18 19:09 ` Ebru Akagunduz 0 siblings, 1 reply; 7+ messages in thread From: Kirill A. Shutemov @ 2016-06-16 10:08 UTC (permalink / raw) To: Hillf Danton Cc: 'Ebru Akagunduz', Kirill A. Shutemov, linux-kernel, linux-mm On Thu, Jun 16, 2016 at 02:52:52PM +0800, Hillf Danton wrote: > > > > From: Ebru Akagunduz <ebru.akagunduz@gmail.com> > > > > Currently khugepaged makes swapin readahead under down_write. This patch > > supplies to make swapin readahead under down_read instead of down_write. > > > > The patch was tested with a test program that allocates 800MB of memory, > > writes to it, and then sleeps. The system was forced to swap out all. > > Afterwards, the test program touches the area by writing, it skips a page > > in each 20 pages of the area. > > > > Link: http://lkml.kernel.org/r/1464335964-6510-4-git-send-email-ebru.akagunduz@gmail.com > > Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com> > > Cc: Hugh Dickins <hughd@google.com> > > Cc: Rik van Riel <riel@redhat.com> > > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> > > Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> > > Cc: Andrea Arcangeli <aarcange@redhat.com> > > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > Cc: Cyrill Gorcunov <gorcunov@openvz.org> > > Cc: Mel Gorman <mgorman@suse.de> > > Cc: David Rientjes <rientjes@google.com> > > Cc: Vlastimil Babka <vbabka@suse.cz> > > Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> > > Cc: Johannes Weiner <hannes@cmpxchg.org> > > Cc: Michal Hocko <mhocko@suse.cz> > > Cc: Minchan Kim <minchan.kim@gmail.com> > > Signed-off-by: Andrew Morton <akpm@linux-foundation.org> > > --- > > mm/huge_memory.c | 92 ++++++++++++++++++++++++++++++++++++++------------------ > > 1 file changed, 63 insertions(+), 29 deletions(-) > > > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > > index f2bc57c45d2f..96dfe3f09bf6 100644 > > --- a/mm/huge_memory.c > > +++ b/mm/huge_memory.c > > @@ -2378,6 +2378,35 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) > > } > > > > /* > > + * If mmap_sem temporarily dropped, revalidate vma > > + * before taking mmap_sem. > > See below > > @@ -2401,11 +2430,18 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, > > continue; > > swapped_in++; > > ret = do_swap_page(mm, vma, _address, pte, pmd, > > - FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, > > + FAULT_FLAG_ALLOW_RETRY, > > Add a description in change log for it please. Ebru, would you address it? > > pteval); > > + /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ > > + if (ret & VM_FAULT_RETRY) { > > + down_read(&mm->mmap_sem); > > + /* vma is no longer available, don't continue to swapin */ > > + if (hugepage_vma_revalidate(mm, vma, address)) > > + return false; > > Revalidate vma _after_ acquiring mmap_sem, but the above comment says _before_. Ditto. > > + if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { > > + up_read(&mm->mmap_sem); > > + goto out; > > Jump out with mmap_sem released, > > > + result = hugepage_vma_revalidate(mm, vma, address); > > + if (result) > > + goto out; > > but jump out again with mmap_sem held. > > They are cleaned up in subsequent darns? I didn't fold fixups for these > -- Kirill A. Shutemov ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem 2016-06-16 10:08 ` Kirill A. Shutemov @ 2016-06-18 19:09 ` Ebru Akagunduz 2016-06-20 2:51 ` Hillf Danton 2016-06-20 11:15 ` Michal Hocko 0 siblings, 2 replies; 7+ messages in thread From: Ebru Akagunduz @ 2016-06-18 19:09 UTC (permalink / raw) To: Kirill A. Shutemov, Hillf Danton; +Cc: linux-kernel, linux-mm On Thu, Jun 16, 2016 at 01:08:54PM +0300, Kirill A. Shutemov wrote: > On Thu, Jun 16, 2016 at 02:52:52PM +0800, Hillf Danton wrote: > > > > > > From: Ebru Akagunduz <ebru.akagunduz@gmail.com> > > > > > > Currently khugepaged makes swapin readahead under down_write. This patch > > > supplies to make swapin readahead under down_read instead of down_write. > > > > > > The patch was tested with a test program that allocates 800MB of memory, > > > writes to it, and then sleeps. The system was forced to swap out all. > > > Afterwards, the test program touches the area by writing, it skips a page > > > in each 20 pages of the area. > > > > > > Link: http://lkml.kernel.org/r/1464335964-6510-4-git-send-email-ebru.akagunduz@gmail.com > > > Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com> > > > Cc: Hugh Dickins <hughd@google.com> > > > Cc: Rik van Riel <riel@redhat.com> > > > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> > > > Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> > > > Cc: Andrea Arcangeli <aarcange@redhat.com> > > > Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > > Cc: Cyrill Gorcunov <gorcunov@openvz.org> > > > Cc: Mel Gorman <mgorman@suse.de> > > > Cc: David Rientjes <rientjes@google.com> > > > Cc: Vlastimil Babka <vbabka@suse.cz> > > > Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> > > > Cc: Johannes Weiner <hannes@cmpxchg.org> > > > Cc: Michal Hocko <mhocko@suse.cz> > > > Cc: Minchan Kim <minchan.kim@gmail.com> > > > Signed-off-by: Andrew Morton <akpm@linux-foundation.org> > > > --- > > > mm/huge_memory.c | 92 ++++++++++++++++++++++++++++++++++++++------------------ > > > 1 file changed, 63 insertions(+), 29 deletions(-) > > > > > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > > > index f2bc57c45d2f..96dfe3f09bf6 100644 > > > --- a/mm/huge_memory.c > > > +++ b/mm/huge_memory.c > > > @@ -2378,6 +2378,35 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) > > > } > > > > > > /* > > > + * If mmap_sem temporarily dropped, revalidate vma > > > + * before taking mmap_sem. > > > > See below > > > > @@ -2401,11 +2430,18 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, > > > continue; > > > swapped_in++; > > > ret = do_swap_page(mm, vma, _address, pte, pmd, > > > - FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, > > > + FAULT_FLAG_ALLOW_RETRY, > > > > Add a description in change log for it please. > > Ebru, would you address it? > This changelog really seems poor. Is there a way to update only changelog of the commit? I tried to use git rebase to amend commit, however I could not rebase. This patch only needs better changelog. I would like to update it as follows, if you would like to too: " Currently khugepaged makes swapin readahead under down_write. This patch supplies to make swapin readahead under down_read instead of down_write. Along swapin, we can need to drop and re-take mmap_sem. Therefore we have to be sure vma is consistent. This patch adds a helper function to validate vma and also supplies that async swapin should not be performed without waiting. The patch was tested with a test program that allocates 800MB of memory, writes to it, and then sleeps. The system was forced to swap out all. Afterwards, the test program touches the area by writing, it skips a page in each 20 pages of the area. " Could you please suggest me a way to replace above changelog with the old? > > > pteval); > > > + /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ > > > + if (ret & VM_FAULT_RETRY) { > > > + down_read(&mm->mmap_sem); > > > + /* vma is no longer available, don't continue to swapin */ > > > + if (hugepage_vma_revalidate(mm, vma, address)) > > > + return false; > > > > Revalidate vma _after_ acquiring mmap_sem, but the above comment says _before_. > > Ditto. > > > > + if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { > > > + up_read(&mm->mmap_sem); > > > + goto out; > > > > Jump out with mmap_sem released, > > > > > + result = hugepage_vma_revalidate(mm, vma, address); > > > + if (result) > > > + goto out; > > > > but jump out again with mmap_sem held. > > > > They are cleaned up in subsequent darns? > Yes, that is reported and fixed here: http://git.kernel.org/cgit/linux/kernel/git/next/linux-next.git/commit/?id=fc7038a69cee6b817261f7cd805e9663fdc1075c However, the above comment inconsistency still there. I've added a fix patch: >From 404438ff1b0617cbf7434cba0c5a08f79ccb8a5d Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz <ebru.akagunduz@gmail.com> Date: Sat, 18 Jun 2016 21:07:22 +0300 Subject: [PATCH] mm, thp: fix comment inconsistency for swapin readahead functions Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com> --- mm/huge_memory.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index acd374e..f0d528e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2436,9 +2436,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); - /* vma is no longer available, don't continue to swapin */ - if (hugepage_vma_revalidate(mm, address)) + if (hugepage_vma_revalidate(mm, address)) { + /* vma is no longer available, don't continue to swapin */ return false; + } } if (ret & VM_FAULT_ERROR) { trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); @@ -2513,8 +2514,8 @@ static void collapse_huge_page(struct mm_struct *mm, if (allocstall == curr_allocstall && swap != 0) { /* * __collapse_huge_page_swapin always returns with mmap_sem - * locked. If it fails, release mmap_sem and jump directly - * out. Continuing to collapse causes inconsistency. + * locked. If it fails, we release mmap_sem and jump out_nolock. + * Continuing to collapse causes inconsistency. */ if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { mem_cgroup_cancel_charge(new_page, memcg, true); -- 1.9.1 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem 2016-06-18 19:09 ` Ebru Akagunduz @ 2016-06-20 2:51 ` Hillf Danton 2016-06-22 11:24 ` Ebru Akagunduz 2016-06-20 11:15 ` Michal Hocko 1 sibling, 1 reply; 7+ messages in thread From: Hillf Danton @ 2016-06-20 2:51 UTC (permalink / raw) To: 'Ebru Akagunduz', 'Kirill A. Shutemov' Cc: linux-kernel, linux-mm, Andrew Morton > > > > @@ -2401,11 +2430,18 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, > > > > continue; > > > > swapped_in++; > > > > ret = do_swap_page(mm, vma, _address, pte, pmd, > > > > - FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, > > > > + FAULT_FLAG_ALLOW_RETRY, > > > > > > Add a description in change log for it please. > > > > Ebru, would you address it? > > > This changelog really seems poor. > Is there a way to update only changelog of the commit? > I tried to use git rebase to amend commit, however > I could not rebase. This patch only needs better changelog. > > I would like to update it as follows, if you would like to too: > > " > Currently khugepaged makes swapin readahead under down_write. This patch > supplies to make swapin readahead under down_read instead of down_write. > > Along swapin, we can need to drop and re-take mmap_sem. Therefore we > have to be sure vma is consistent. This patch adds a helper function > to validate vma and also supplies that async swapin should not be > performed without waiting. > > The patch was tested with a test program that allocates 800MB of memory, > writes to it, and then sleeps. The system was forced to swap out all. > Afterwards, the test program touches the area by writing, it skips a page > in each 20 pages of the area. > " > I like to ask again, why is FAULT_FLAG_RETRY_NOWAIT dropped? > Could you please suggest me a way to replace above changelog with the old? > We can ask Andrew for some advices. > > > > > > They are cleaned up in subsequent darns? > > > Yes, that is reported and fixed here: > http://git.kernel.org/cgit/linux/kernel/git/next/linux-next.git/commit/?id=fc7038a69cee6b817261f7cd805e9663fdc1075c > > However, the above comment inconsistency still there. > I've added a fix patch: > > From 404438ff1b0617cbf7434cba0c5a08f79ccb8a5d Mon Sep 17 00:00:00 2001 > From: Ebru Akagunduz <ebru.akagunduz@gmail.com> > Date: Sat, 18 Jun 2016 21:07:22 +0300 > Subject: [PATCH] mm, thp: fix comment inconsistency for swapin readahead > functions > Fill in change log please. thanks Hillf > Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com> > --- > mm/huge_memory.c | 9 +++++---- > 1 file changed, 5 insertions(+), 4 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index acd374e..f0d528e 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -2436,9 +2436,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, > /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ > if (ret & VM_FAULT_RETRY) { > down_read(&mm->mmap_sem); > - /* vma is no longer available, don't continue to swapin */ > - if (hugepage_vma_revalidate(mm, address)) > + if (hugepage_vma_revalidate(mm, address)) { > + /* vma is no longer available, don't continue to swapin */ > return false; > + } > } > if (ret & VM_FAULT_ERROR) { > trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); > @@ -2513,8 +2514,8 @@ static void collapse_huge_page(struct mm_struct *mm, > if (allocstall == curr_allocstall && swap != 0) { > /* > * __collapse_huge_page_swapin always returns with mmap_sem > - * locked. If it fails, release mmap_sem and jump directly > - * out. Continuing to collapse causes inconsistency. > + * locked. If it fails, we release mmap_sem and jump out_nolock. > + * Continuing to collapse causes inconsistency. > */ > if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { > mem_cgroup_cancel_charge(new_page, memcg, true); > -- > 1.9.1 ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem 2016-06-20 2:51 ` Hillf Danton @ 2016-06-22 11:24 ` Ebru Akagunduz 0 siblings, 0 replies; 7+ messages in thread From: Ebru Akagunduz @ 2016-06-22 11:24 UTC (permalink / raw) To: Hillf Danton, Kirill A. Shutemov; +Cc: hughd, linux-kernel, linux-mm, akpm On Mon, Jun 20, 2016 at 10:51:25AM +0800, Hillf Danton wrote: > > > > > @@ -2401,11 +2430,18 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, > > > > > continue; > > > > > swapped_in++; > > > > > ret = do_swap_page(mm, vma, _address, pte, pmd, > > > > > - FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, > > > > > + FAULT_FLAG_ALLOW_RETRY, > > > > > > > > Add a description in change log for it please. > > > > > > Ebru, would you address it? > > > > > This changelog really seems poor. > > Is there a way to update only changelog of the commit? > > I tried to use git rebase to amend commit, however > > I could not rebase. This patch only needs better changelog. > > > > I would like to update it as follows, if you would like to too: > > > > " > > Currently khugepaged makes swapin readahead under down_write. This patch > > supplies to make swapin readahead under down_read instead of down_write. > > > > Along swapin, we can need to drop and re-take mmap_sem. Therefore we > > have to be sure vma is consistent. This patch adds a helper function > > to validate vma and also supplies that async swapin should not be > > performed without waiting. > > > > The patch was tested with a test program that allocates 800MB of memory, > > writes to it, and then sleeps. The system was forced to swap out all. > > Afterwards, the test program touches the area by writing, it skips a page > > in each 20 pages of the area. > > " > > > I like to ask again, why is FAULT_FLAG_RETRY_NOWAIT dropped? > I dropped it regarding to Hugh's concern. If I understood correctly, he said async swapin without waiting can cause waste. When I looked the code path, it seemed subtle to me. There was a large discussion, below is Hugh's concern: " Doesn't this imply that __collapse_huge_page_swapin() will initiate all the necessary swapins for a THP, then (given the FAULT_FLAG_ALLOW_RETRY) not wait for them to complete, so khugepaged will give up on that extent and move on to another; then after another full circuit of all the mms it needs to examine, it will arrive back at this extent and build a THP from the swapins it arranged last time. Which may work well when a system transitions from busy+swappingout to idle+swappingin, but isn't that rather a special case? It feels (meaning, I've not measured at all) as if the inbetween busyish case will waste a lot of I/O and memory on swapins that have to be discarded again before khugepaged has made its sedate way back to slotting them in. " Cc'ed Hugh. Maybe I misunderstood him. > > Could you please suggest me a way to replace above changelog with the old? > > > We can ask Andrew for some advices. > > > > > > > > > They are cleaned up in subsequent darns? > > > > > Yes, that is reported and fixed here: > > http://git.kernel.org/cgit/linux/kernel/git/next/linux-next.git/commit/?id=fc7038a69cee6b817261f7cd805e9663fdc1075c > > > > However, the above comment inconsistency still there. > > I've added a fix patch: > > > > From 404438ff1b0617cbf7434cba0c5a08f79ccb8a5d Mon Sep 17 00:00:00 2001 > > From: Ebru Akagunduz <ebru.akagunduz@gmail.com> > > Date: Sat, 18 Jun 2016 21:07:22 +0300 > > Subject: [PATCH] mm, thp: fix comment inconsistency for swapin readahead > > functions > > > Fill in change log please. > I filled it and sent as part of a series. Cc'ed you in that patch. > thanks > Hillf > > > Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com> > > --- > > mm/huge_memory.c | 9 +++++---- > > 1 file changed, 5 insertions(+), 4 deletions(-) > > > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > > index acd374e..f0d528e 100644 > > --- a/mm/huge_memory.c > > +++ b/mm/huge_memory.c > > @@ -2436,9 +2436,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, > > /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ > > if (ret & VM_FAULT_RETRY) { > > down_read(&mm->mmap_sem); > > - /* vma is no longer available, don't continue to swapin */ > > - if (hugepage_vma_revalidate(mm, address)) > > + if (hugepage_vma_revalidate(mm, address)) { > > + /* vma is no longer available, don't continue to swapin */ > > return false; > > + } > > } > > if (ret & VM_FAULT_ERROR) { > > trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); > > @@ -2513,8 +2514,8 @@ static void collapse_huge_page(struct mm_struct *mm, > > if (allocstall == curr_allocstall && swap != 0) { > > /* > > * __collapse_huge_page_swapin always returns with mmap_sem > > - * locked. If it fails, release mmap_sem and jump directly > > - * out. Continuing to collapse causes inconsistency. > > + * locked. If it fails, we release mmap_sem and jump out_nolock. > > + * Continuing to collapse causes inconsistency. > > */ > > if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { > > mem_cgroup_cancel_charge(new_page, memcg, true); > > -- > > 1.9.1 > ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem 2016-06-18 19:09 ` Ebru Akagunduz 2016-06-20 2:51 ` Hillf Danton @ 2016-06-20 11:15 ` Michal Hocko 1 sibling, 0 replies; 7+ messages in thread From: Michal Hocko @ 2016-06-20 11:15 UTC (permalink / raw) To: Ebru Akagunduz; +Cc: Kirill A. Shutemov, Hillf Danton, linux-kernel, linux-mm On Sat 18-06-16 22:09:51, Ebru Akagunduz wrote: [...] > This changelog really seems poor. > Is there a way to update only changelog of the commit? git commit --amend would do that for the current commit. You can also tell git rebase -i to 'reword' a particular commits. > Could you please suggest me a way to replace above changelog with the old? Just tell Andrew, he can replace the changelog in his tree. -- Michal Hocko SUSE Labs ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCHv9 00/32] THP-enabled tmpfs/shmem using compound pages @ 2016-06-06 14:06 Kirill A. Shutemov 2016-06-15 20:06 ` [PATCHv9-rebased2 00/37] " Kirill A. Shutemov 0 siblings, 1 reply; 7+ messages in thread From: Kirill A. Shutemov @ 2016-06-06 14:06 UTC (permalink / raw) To: Hugh Dickins, Andrea Arcangeli, Andrew Morton Cc: Dave Hansen, Vlastimil Babka, Christoph Lameter, Naoya Horiguchi, Jerome Marchand, Yang Shi, Sasha Levin, Andres Lagar-Cavilla, Ning Qu, linux-kernel, linux-mm, linux-fsdevel, Kirill A. Shutemov This is rebased version of my implementation of huge pages support for tmpfs. There are few fixes by Hugh since v8. Rebase on v4.7-rc1 was somewhat painful, because of changes in radix-tree API, but everything looks fine now. Andrew, please consider applying the patchset to -mm tree. The patchset is on top of v4.7-rc1 plus khugepaged updates from -mm tree. Git tree: git://git.kernel.org/pub/scm/linux/kernel/git/kas/linux.git hugetmpfs/v9 == Changelog == v9: - rebased to v4.7-rc1; - truncate_inode_pages_range() and invalidate_inode_pages2_range() are adjusted to use page_to_pgoff() (Hugh); - filemap: fix refcounting in error path in radix-tree opeartions (Hugh); - khugepaged: handle !PageUptodate() pages (due fallocate() ?) during collapse (Hugh); - shmem_unused_huge_shrink: - fix shrinklist_len accounting (Hugh); - call find_lock_page() for alligned address, so we will not get tail page and don't crash in PageTransHuge() (Hugh); v8: - khugepaged updates: + mark collapsed page dirty, otherwise vmscan would discard it; + account pages to mapping->nrpages on shmem_charge; + fix a situation when not all tail pages put on radix tree on collapse; + fix off-by-one in loop-exit condition in khugepaged_scan_shmem(); + use radix_tree_iter_next/radix_tree_iter_retry instead of gotos; + fix build withount CONFIG_SHMEM (again); - split huge pages beyond i_size under memory pressure; - disable huge tmpfs on Power, as it makes use of deposited page tables, we don't have; - fix filesystem size limit accouting; - mark page referenced on split_huge_pmd() if the pmd is young; - uncharge pages from shmem, removed during split_huge_page(); - make shmem_inode_info::lock irq-safe -- required by khugepaged; v7: - khugepaged updates: + fix page leak/page cache corruption on collapse fail; + filter out VMAs not suitable for huge pages due misaligned vm_pgoff; + fix build without CONFIG_SHMEM; + drop few over-protective checks; - fix bogus VM_BUG_ON() in __delete_from_page_cache(); v6: - experimental collapse support; - fix swapout mapped huge pages; - fix page leak in faularound code; - fix exessive huge page allocation with huge=within_size; - rename VM_NO_THP to VM_NO_KHUGEPAGED; - fix condition in hugepage_madvise(); - accounting reworked again; v5: - add FileHugeMapped to /proc/PID/smaps; - make FileHugeMapped in meminfo aligned with other fields; - Documentation/vm/transhuge.txt updated; v4: - first four patch were applied to -mm tree; - drop pages beyond i_size on split_huge_pages; - few small random bugfixes; v3: - huge= mountoption now can have values always, within_size, advice and never; - sysctl handle is replaced with sysfs knob; - MADV_HUGEPAGE/MADV_NOHUGEPAGE is now respected on page allocation via page fault; - mlock() handling had been fixed; - bunch of smaller bugfixes and cleanups. == Design overview == Huge pages are allocated by shmem when it's allowed (by mount option) and there's no entries for the range in radix-tree. Huge page is represented by HPAGE_PMD_NR entries in radix-tree. MM core maps a page with PMD if ->fault() returns huge page and the VMA is suitable for huge pages (size, alignment). There's no need into two requests to file system: filesystem returns huge page if it can, graceful fallback to small pages otherwise. As with DAX, split_huge_pmd() is implemented by unmapping the PMD: we can re-fault the page with PTEs later. Basic scheme for split_huge_page() is the same as for anon-THP. Few differences: - File pages are on radix-tree, so we have head->_count offset by HPAGE_PMD_NR. The count got distributed to small pages during split. - mapping->tree_lock prevents non-lockless access to pages under split over radix-tree; - Lockless access is prevented by setting the head->_count to 0 during split, so get_page_unless_zero() would fail; - After split, some pages can be beyond i_size. We drop them from radix-tree. - We don't setup migration entries. Just unmap pages. It helps handling cases when i_size is in the middle of the page: no need handle unmap pages beyond i_size manually. COW mapping handled on PTE-level. It's not clear how beneficial would be allocation of huge pages on COW faults. And it would require some code to make them work. I think at some point we can consider teaching khugepaged to collapse pages in COW mappings, but allocating huge on fault is probably overkill. As with anon THP, we mlock file huge page only if it mapped with PMD. PTE-mapped THPs are never mlocked. This way we can avoid all sorts of scenarios when we can leak mlocked page. As with anon THP, we split huge page on swap out. Truncate and punch hole that only cover part of THP range is implemented by zero out this part of THP. This have visible effect on fallocate(FALLOC_FL_PUNCH_HOLE) behaviour. As we don't really create hole in this case, lseek(SEEK_HOLE) may have inconsistent results depending what pages happened to be allocated. I don't think this will be a problem. We track per-super_block list of inodes which potentially have huge page partly beyond i_size. Under memory pressure or if we hit -ENOSPC, we split such pages in order to recovery memory. The list is per-sb, as we need to split a page from our filesystem if hit -ENOSPC (-o size= limit) during shmem_getpage_gfp() to free some space. == Patchset overview == [01/29] Update documentation on THP vs. mlock. I've posted it separately before. It can go in. [02-04/29] Rework fault path and rmap to handle file pmd. Unlike DAX with vm_ops->pmd_fault, we don't need to ask filesystem twice -- first for huge page and then for small. If ->fault happened to return huge page and VMA is suitable for mapping it as huge, we would do so. [05/29] Add support for huge file pages in rmap; [06-15/29] Various preparation of THP core for file pages. [16-20/29] Various preparation of MM core for file pages. [21-24/29] And finally, bring huge pages into tmpfs/shmem. [25/29] Wire up madvise() existing hints for file THP. We can implement fadvise() later. [26/29] Documentation update. [27-29/29] Extend khugepaged to support shmem/tmpfs. Hugh Dickins (1): shmem: get_unmapped_area align huge page Kirill A. Shutemov (31): thp, mlock: update unevictable-lru.txt mm: do not pass mm_struct into handle_mm_fault mm: introduce fault_env mm: postpone page table allocation until we have page to map rmap: support file thp mm: introduce do_set_pmd() thp, vmstats: add counters for huge file pages thp: support file pages in zap_huge_pmd() thp: handle file pages in split_huge_pmd() thp: handle file COW faults thp: skip file huge pmd on copy_huge_pmd() thp: prepare change_huge_pmd() for file thp thp: run vma_adjust_trans_huge() outside i_mmap_rwsem thp: file pages support for split_huge_page() thp, mlock: do not mlock PTE-mapped file huge pages vmscan: split file huge pages before paging them out page-flags: relax policy for PG_mappedtodisk and PG_reclaim radix-tree: implement radix_tree_maybe_preload_order() filemap: prepare find and delete operations for huge pages truncate: handle file thp mm, rmap: account shmem thp pages shmem: prepare huge= mount option and sysfs knob shmem: add huge pages support shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings thp: extract khugepaged from mm/huge_memory.c khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page() shmem: make shmem_inode_info::lock irq-safe khugepaged: add support of collapse for tmpfs/shmem pages thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE shmem: split huge pages beyond i_size under memory pressure thp: update Documentation/{vm/transhuge,filesystems/proc}.txt Documentation/filesystems/Locking | 10 +- Documentation/filesystems/proc.txt | 9 + Documentation/vm/transhuge.txt | 128 ++- Documentation/vm/unevictable-lru.txt | 21 + arch/alpha/mm/fault.c | 2 +- arch/arc/mm/fault.c | 2 +- arch/arm/mm/fault.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/avr32/mm/fault.c | 2 +- arch/cris/mm/fault.c | 2 +- arch/frv/mm/fault.c | 2 +- arch/hexagon/mm/vm_fault.c | 2 +- arch/ia64/mm/fault.c | 2 +- arch/m32r/mm/fault.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/metag/mm/fault.c | 2 +- arch/microblaze/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/mn10300/mm/fault.c | 2 +- arch/nios2/mm/fault.c | 2 +- arch/openrisc/mm/fault.c | 2 +- arch/parisc/mm/fault.c | 2 +- arch/powerpc/mm/copro_fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/s390/mm/fault.c | 2 +- arch/score/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/sparc/mm/fault_32.c | 4 +- arch/sparc/mm/fault_64.c | 2 +- arch/tile/mm/fault.c | 2 +- arch/um/kernel/trap.c | 2 +- arch/unicore32/mm/fault.c | 2 +- arch/x86/mm/fault.c | 2 +- arch/xtensa/mm/fault.c | 2 +- drivers/base/node.c | 13 +- drivers/char/mem.c | 24 + drivers/iommu/amd_iommu_v2.c | 3 +- drivers/iommu/intel-svm.c | 2 +- fs/proc/meminfo.c | 7 +- fs/proc/task_mmu.c | 10 +- fs/userfaultfd.c | 22 +- include/linux/huge_mm.h | 36 +- include/linux/khugepaged.h | 6 + include/linux/mm.h | 51 +- include/linux/mmzone.h | 4 +- include/linux/page-flags.h | 19 +- include/linux/radix-tree.h | 1 + include/linux/rmap.h | 2 +- include/linux/shmem_fs.h | 45 +- include/linux/userfaultfd_k.h | 8 +- include/linux/vm_event_item.h | 7 + include/trace/events/huge_memory.h | 3 +- ipc/shm.c | 10 +- lib/radix-tree.c | 84 +- mm/Kconfig | 8 + mm/Makefile | 2 +- mm/filemap.c | 217 ++-- mm/gup.c | 7 +- mm/huge_memory.c | 2102 ++++++---------------------------- mm/internal.h | 4 +- mm/khugepaged.c | 1911 +++++++++++++++++++++++++++++++ mm/ksm.c | 5 +- mm/memory.c | 879 +++++++------- mm/mempolicy.c | 4 +- mm/migrate.c | 5 +- mm/mmap.c | 26 +- mm/nommu.c | 3 +- mm/page-writeback.c | 1 + mm/page_alloc.c | 21 + mm/rmap.c | 78 +- mm/shmem.c | 918 +++++++++++++-- mm/swap.c | 2 + mm/truncate.c | 28 +- mm/util.c | 6 + mm/vmscan.c | 6 + mm/vmstat.c | 4 + 76 files changed, 4333 insertions(+), 2491 deletions(-) create mode 100644 mm/khugepaged.c -- 2.8.1 ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCHv9-rebased2 00/37] THP-enabled tmpfs/shmem using compound pages 2016-06-06 14:06 [PATCHv9 00/32] THP-enabled tmpfs/shmem using compound pages Kirill A. Shutemov @ 2016-06-15 20:06 ` Kirill A. Shutemov 2016-06-15 20:06 ` [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem Kirill A. Shutemov 0 siblings, 1 reply; 7+ messages in thread From: Kirill A. Shutemov @ 2016-06-15 20:06 UTC (permalink / raw) To: Hugh Dickins, Andrea Arcangeli, Andrew Morton Cc: Dave Hansen, Vlastimil Babka, Christoph Lameter, Naoya Horiguchi, Jerome Marchand, Yang Shi, Sasha Levin, Andres Lagar-Cavilla, Ning Qu, linux-kernel, linux-mm, linux-fsdevel, Ebru Akagunduz, Kirill A. Shutemov Andrew, As requested, here's refreshed version of the patchset. During preparation, Ebru mentionedi (on irc) on that she wanted to withdraw mm-thp-avoid-unnecessary-swapin-in-khugepaged.patch from mm tree, but it's difficult in current state of the tree. So I did rebase removing the patch. The patchset below is aimed to replace patches in your series, staring with mm-vmstat-calculate-particular-vm-event.patch (it's not necessary after mm-thp-avoid-unnecessary-swapin-in-khugepaged.patch removal) up to end of my patchset. I also took opportunity to address Vlastimil's concern about 'pmd' re-validiation after mmap_sem drop (you mentioned it in series file). See patch 05/37. I did few sanity check. Everything looks good. Hopefully, I didn't screw up anything on the way. :) Andrew Morton (1): mm-thp-make-swapin-readahead-under-down_read-of-mmap_sem-fix-2-fix Ebru Akagunduz (2): mm, thp: make swapin readahead under down_read of mmap_sem mm, thp: fix locking inconsistency in collapse_huge_page Hugh Dickins (1): shmem: get_unmapped_area align huge page Kirill A. Shutemov (33): mm-thp-make-swapin-readahead-under-down_read-of-mmap_sem-fix khugepaged: recheck pmd after mmap_sem re-acquired thp, mlock: update unevictable-lru.txt mm: do not pass mm_struct into handle_mm_fault mm: introduce fault_env mm: postpone page table allocation until we have page to map rmap: support file thp mm: introduce do_set_pmd() thp, vmstats: add counters for huge file pages thp: support file pages in zap_huge_pmd() thp: handle file pages in split_huge_pmd() thp: handle file COW faults thp: skip file huge pmd on copy_huge_pmd() thp: prepare change_huge_pmd() for file thp thp: run vma_adjust_trans_huge() outside i_mmap_rwsem thp: file pages support for split_huge_page() thp, mlock: do not mlock PTE-mapped file huge pages vmscan: split file huge pages before paging them out page-flags: relax policy for PG_mappedtodisk and PG_reclaim radix-tree: implement radix_tree_maybe_preload_order() filemap: prepare find and delete operations for huge pages truncate: handle file thp mm, rmap: account shmem thp pages shmem: prepare huge= mount option and sysfs knob shmem: add huge pages support shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings thp: extract khugepaged from mm/huge_memory.c khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page() shmem: make shmem_inode_info::lock irq-safe khugepaged: add support of collapse for tmpfs/shmem pages thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE shmem: split huge pages beyond i_size under memory pressure thp: update Documentation/{vm/transhuge,filesystems/proc}.txt Documentation/filesystems/Locking | 10 +- Documentation/filesystems/proc.txt | 9 + Documentation/vm/transhuge.txt | 128 ++- Documentation/vm/unevictable-lru.txt | 21 + arch/alpha/mm/fault.c | 2 +- arch/arc/mm/fault.c | 2 +- arch/arm/mm/fault.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/avr32/mm/fault.c | 2 +- arch/cris/mm/fault.c | 2 +- arch/frv/mm/fault.c | 2 +- arch/hexagon/mm/vm_fault.c | 2 +- arch/ia64/mm/fault.c | 2 +- arch/m32r/mm/fault.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/metag/mm/fault.c | 2 +- arch/microblaze/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/mn10300/mm/fault.c | 2 +- arch/nios2/mm/fault.c | 2 +- arch/openrisc/mm/fault.c | 2 +- arch/parisc/mm/fault.c | 2 +- arch/powerpc/mm/copro_fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/s390/mm/fault.c | 2 +- arch/score/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/sparc/mm/fault_32.c | 4 +- arch/sparc/mm/fault_64.c | 2 +- arch/tile/mm/fault.c | 2 +- arch/um/kernel/trap.c | 2 +- arch/unicore32/mm/fault.c | 2 +- arch/x86/mm/fault.c | 2 +- arch/xtensa/mm/fault.c | 2 +- drivers/base/node.c | 13 +- drivers/char/mem.c | 24 + drivers/iommu/amd_iommu_v2.c | 3 +- drivers/iommu/intel-svm.c | 2 +- fs/proc/meminfo.c | 7 +- fs/proc/task_mmu.c | 10 +- fs/userfaultfd.c | 22 +- include/linux/huge_mm.h | 36 +- include/linux/khugepaged.h | 5 + include/linux/mm.h | 51 +- include/linux/mmzone.h | 4 +- include/linux/page-flags.h | 19 +- include/linux/radix-tree.h | 1 + include/linux/rmap.h | 2 +- include/linux/shmem_fs.h | 45 +- include/linux/userfaultfd_k.h | 8 +- include/linux/vm_event_item.h | 7 + include/trace/events/huge_memory.h | 3 +- ipc/shm.c | 10 +- lib/radix-tree.c | 84 +- mm/Kconfig | 8 + mm/Makefile | 2 +- mm/filemap.c | 217 ++-- mm/gup.c | 7 +- mm/huge_memory.c | 2048 ++++++---------------------------- mm/internal.h | 4 +- mm/khugepaged.c | 1913 +++++++++++++++++++++++++++++++ mm/ksm.c | 5 +- mm/memory.c | 860 +++++++------- mm/mempolicy.c | 2 +- mm/migrate.c | 5 +- mm/mmap.c | 26 +- mm/nommu.c | 3 +- mm/page-writeback.c | 1 + mm/page_alloc.c | 21 + mm/rmap.c | 78 +- mm/shmem.c | 918 +++++++++++++-- mm/swap.c | 2 + mm/truncate.c | 28 +- mm/util.c | 6 + mm/vmscan.c | 6 + mm/vmstat.c | 4 + 76 files changed, 4319 insertions(+), 2431 deletions(-) create mode 100644 mm/khugepaged.c -- 2.8.1 ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem 2016-06-15 20:06 ` [PATCHv9-rebased2 00/37] " Kirill A. Shutemov @ 2016-06-15 20:06 ` Kirill A. Shutemov 0 siblings, 0 replies; 7+ messages in thread From: Kirill A. Shutemov @ 2016-06-15 20:06 UTC (permalink / raw) To: Hugh Dickins, Andrea Arcangeli, Andrew Morton Cc: Dave Hansen, Vlastimil Babka, Christoph Lameter, Naoya Horiguchi, Jerome Marchand, Yang Shi, Sasha Levin, Andres Lagar-Cavilla, Ning Qu, linux-kernel, linux-mm, linux-fsdevel, Ebru Akagunduz, Rik van Riel, Kirill A. Shutemov, Joonsoo Kim, Cyrill Gorcunov, Mel Gorman, David Rientjes, Aneesh Kumar K . V, Johannes Weiner, Michal Hocko, Minchan Kim From: Ebru Akagunduz <ebru.akagunduz@gmail.com> Currently khugepaged makes swapin readahead under down_write. This patch supplies to make swapin readahead under down_read instead of down_write. The patch was tested with a test program that allocates 800MB of memory, writes to it, and then sleeps. The system was forced to swap out all. Afterwards, the test program touches the area by writing, it skips a page in each 20 pages of the area. Link: http://lkml.kernel.org/r/1464335964-6510-4-git-send-email-ebru.akagunduz@gmail.com Signed-off-by: Ebru Akagunduz <ebru.akagunduz@gmail.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- mm/huge_memory.c | 92 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 29 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f2bc57c45d2f..96dfe3f09bf6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2378,6 +2378,35 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) } /* + * If mmap_sem temporarily dropped, revalidate vma + * before taking mmap_sem. + * Return 0 if succeeds, otherwise return none-zero + * value (scan code). + */ + +static int hugepage_vma_revalidate(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address) +{ + unsigned long hstart, hend; + + if (unlikely(khugepaged_test_exit(mm))) + return SCAN_ANY_PROCESS; + + vma = find_vma(mm, address); + if (!vma) + return SCAN_VMA_NULL; + + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = vma->vm_end & HPAGE_PMD_MASK; + if (address < hstart || address + HPAGE_PMD_SIZE > hend) + return SCAN_ADDRESS_RANGE; + if (!hugepage_vma_check(vma)) + return SCAN_VMA_CHECK; + return 0; +} + +/* * Bring missing pages in from swap, to complete THP collapse. * Only done if khugepaged_scan_pmd believes it is worthwhile. * @@ -2385,7 +2414,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) * but with mmap_sem held to protect against vma changes. */ -static void __collapse_huge_page_swapin(struct mm_struct *mm, +static bool __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) { @@ -2401,11 +2430,18 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, continue; swapped_in++; ret = do_swap_page(mm, vma, _address, pte, pmd, - FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, + FAULT_FLAG_ALLOW_RETRY, pteval); + /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ + if (ret & VM_FAULT_RETRY) { + down_read(&mm->mmap_sem); + /* vma is no longer available, don't continue to swapin */ + if (hugepage_vma_revalidate(mm, vma, address)) + return false; + } if (ret & VM_FAULT_ERROR) { trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); - return; + return false; } /* pte is unmapped now, we need to map it */ pte = pte_offset_map(pmd, _address); @@ -2413,6 +2449,7 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, pte--; pte_unmap(pte); trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); + return true; } static void collapse_huge_page(struct mm_struct *mm, @@ -2427,7 +2464,6 @@ static void collapse_huge_page(struct mm_struct *mm, struct page *new_page; spinlock_t *pmd_ptl, *pte_ptl; int isolated = 0, result = 0; - unsigned long hstart, hend; struct mem_cgroup *memcg; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ @@ -2450,39 +2486,37 @@ static void collapse_huge_page(struct mm_struct *mm, goto out_nolock; } - /* - * Prevent all access to pagetables with the exception of - * gup_fast later hanlded by the ptep_clear_flush and the VM - * handled by the anon_vma lock + PG_lock. - */ - down_write(&mm->mmap_sem); - if (unlikely(khugepaged_test_exit(mm))) { - result = SCAN_ANY_PROCESS; + down_read(&mm->mmap_sem); + result = hugepage_vma_revalidate(mm, vma, address); + if (result) goto out; - } - vma = find_vma(mm, address); - if (!vma) { - result = SCAN_VMA_NULL; - goto out; - } - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; - hend = vma->vm_end & HPAGE_PMD_MASK; - if (address < hstart || address + HPAGE_PMD_SIZE > hend) { - result = SCAN_ADDRESS_RANGE; - goto out; - } - if (!hugepage_vma_check(vma)) { - result = SCAN_VMA_CHECK; - goto out; - } pmd = mm_find_pmd(mm, address); if (!pmd) { result = SCAN_PMD_NULL; goto out; } - __collapse_huge_page_swapin(mm, vma, address, pmd); + /* + * __collapse_huge_page_swapin always returns with mmap_sem + * locked. If it fails, release mmap_sem and jump directly + * label out. Continuing to collapse causes inconsistency. + */ + if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { + up_read(&mm->mmap_sem); + goto out; + } + + up_read(&mm->mmap_sem); + /* + * Prevent all access to pagetables with the exception of + * gup_fast later handled by the ptep_clear_flush and the VM + * handled by the anon_vma lock + PG_lock. + */ + down_write(&mm->mmap_sem); + result = hugepage_vma_revalidate(mm, vma, address); + if (result) + goto out; anon_vma_lock_write(vma->anon_vma); -- 2.8.1 ^ permalink raw reply related [flat|nested] 7+ messages in thread
end of thread, other threads:[~2016-06-22 11:26 UTC | newest] Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- [not found] <04f701d1c797$1ebe6b80$5c3b4280$@alibaba-inc.com> 2016-06-16 6:52 ` [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem Hillf Danton 2016-06-16 10:08 ` Kirill A. Shutemov 2016-06-18 19:09 ` Ebru Akagunduz 2016-06-20 2:51 ` Hillf Danton 2016-06-22 11:24 ` Ebru Akagunduz 2016-06-20 11:15 ` Michal Hocko 2016-06-06 14:06 [PATCHv9 00/32] THP-enabled tmpfs/shmem using compound pages Kirill A. Shutemov 2016-06-15 20:06 ` [PATCHv9-rebased2 00/37] " Kirill A. Shutemov 2016-06-15 20:06 ` [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem Kirill A. Shutemov
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).