From mboxrd@z Thu Jan 1 00:00:00 1970 From: akpm@linux-foundation.org Subject: [merged] mm-migrate-migrate_vma-unmap-page-from-vma-while-collecting-pages.patch removed from -mm tree Date: Mon, 11 Sep 2017 12:39:24 -0700 Message-ID: <20170911193924.nW__zEcbY%akpm@linux-foundation.org> Reply-To: linux-kernel@vger.kernel.org Mime-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8BIT Return-path: Received: from mail.linuxfoundation.org ([140.211.169.12]:49044 "EHLO mail.linuxfoundation.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751028AbdIKTj0 (ORCPT ); Mon, 11 Sep 2017 15:39:26 -0400 Sender: mm-commits-owner@vger.kernel.org List-Id: mm-commits@vger.kernel.org To: aneesh.kumar@linux.vnet.ibm.com, benh@kernel.crashing.org, bsingharora@gmail.com, dan.j.williams@intel.com, dnellans@nvidia.com, ebaskakov@nvidia.com, hannes@cmpxchg.org, jglisse@redhat.com, jhubbard@nvidia.com, kirill.shutemov@linux.intel.com, liubo95@huawei.com, mhairgrove@nvidia.com, mhocko@kernel.org, mm-commits@vger.kernel.org, paulmck@linux.vnet.ibm.com, ross.zwisler@linux.intel.com, SCheung@nvidia.com, sgutti@nvidia.com, vdavydov.dev@gmail.com The patch titled Subject: mm/migrate: migrate_vma() unmap page from vma while collecting pages has been removed from the -mm tree. Its filename was mm-migrate-migrate_vma-unmap-page-from-vma-while-collecting-pages.patch This patch was dropped because it was merged into mainline or a subsystem tree ------------------------------------------------------ From: Jérôme Glisse Subject: mm/migrate: migrate_vma() unmap page from vma while collecting pages Common case for migration of virtual address range is page are map only once inside the vma in which migration is taking place. Because we already walk the CPU page table for that range we can directly do the unmap there and setup special migration swap entry. Link: http://lkml.kernel.org/r/20170817000548.32038-16-jglisse@redhat.com Signed-off-by: Jérôme Glisse Signed-off-by: Evgeny Baskakov Signed-off-by: John Hubbard Signed-off-by: Mark Hairgrove Signed-off-by: Sherry Cheung Signed-off-by: Subhash Gutti Cc: Aneesh Kumar Cc: Balbir Singh Cc: Benjamin Herrenschmidt Cc: Dan Williams Cc: David Nellans Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Michal Hocko Cc: Paul E. McKenney Cc: Ross Zwisler Cc: Vladimir Davydov Cc: Bob Liu Signed-off-by: Andrew Morton --- mm/migrate.c | 141 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 112 insertions(+), 29 deletions(-) diff -puN mm/migrate.c~mm-migrate-migrate_vma-unmap-page-from-vma-while-collecting-pages mm/migrate.c --- a/mm/migrate.c~mm-migrate-migrate_vma-unmap-page-from-vma-while-collecting-pages +++ a/mm/migrate.c @@ -2149,7 +2149,7 @@ static int migrate_vma_collect_pmd(pmd_t struct migrate_vma *migrate = walk->private; struct vm_area_struct *vma = walk->vma; struct mm_struct *mm = vma->vm_mm; - unsigned long addr = start; + unsigned long addr = start, unmapped = 0; spinlock_t *ptl; pte_t *ptep; @@ -2194,9 +2194,12 @@ again: return migrate_vma_collect_hole(start, end, walk); ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + arch_enter_lazy_mmu_mode(); + for (; addr < end; addr += PAGE_SIZE, ptep++) { unsigned long mpfn, pfn; struct page *page; + swp_entry_t entry; pte_t pte; pte = *ptep; @@ -2228,11 +2231,44 @@ again: mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; + /* + * Optimize for the common case where page is only mapped once + * in one process. If we can lock the page, then we can safely + * set up a special migration page table entry now. + */ + if (trylock_page(page)) { + pte_t swp_pte; + + mpfn |= MIGRATE_PFN_LOCKED; + ptep_get_and_clear(mm, addr, ptep); + + /* Setup special migration page table entry */ + entry = make_migration_entry(page, pte_write(pte)); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + set_pte_at(mm, addr, ptep, swp_pte); + + /* + * This is like regular unmap: we remove the rmap and + * drop page refcount. Page won't be freed, as we took + * a reference just above. + */ + page_remove_rmap(page, false); + put_page(page); + unmapped++; + } + next: migrate->src[migrate->npages++] = mpfn; } + arch_leave_lazy_mmu_mode(); pte_unmap_unlock(ptep - 1, ptl); + /* Only flush the TLB if we actually modified any entries */ + if (unmapped) + flush_tlb_range(walk->vma, start, end); + return 0; } @@ -2257,7 +2293,13 @@ static void migrate_vma_collect(struct m mm_walk.mm = migrate->vma->vm_mm; mm_walk.private = migrate; + mmu_notifier_invalidate_range_start(mm_walk.mm, + migrate->start, + migrate->end); walk_page_range(migrate->start, migrate->end, &mm_walk); + mmu_notifier_invalidate_range_end(mm_walk.mm, + migrate->start, + migrate->end); migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); } @@ -2305,32 +2347,37 @@ static bool migrate_vma_check_page(struc static void migrate_vma_prepare(struct migrate_vma *migrate) { const unsigned long npages = migrate->npages; + const unsigned long start = migrate->start; + unsigned long addr, i, restore = 0; bool allow_drain = true; - unsigned long i; lru_add_drain(); for (i = 0; (i < npages) && migrate->cpages; i++) { struct page *page = migrate_pfn_to_page(migrate->src[i]); + bool remap = true; if (!page) continue; - /* - * Because we are migrating several pages there can be - * a deadlock between 2 concurrent migration where each - * are waiting on each other page lock. - * - * Make migrate_vma() a best effort thing and backoff - * for any page we can not lock right away. - */ - if (!trylock_page(page)) { - migrate->src[i] = 0; - migrate->cpages--; - put_page(page); - continue; + if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { + /* + * Because we are migrating several pages there can be + * a deadlock between 2 concurrent migration where each + * are waiting on each other page lock. + * + * Make migrate_vma() a best effort thing and backoff + * for any page we can not lock right away. + */ + if (!trylock_page(page)) { + migrate->src[i] = 0; + migrate->cpages--; + put_page(page); + continue; + } + remap = false; + migrate->src[i] |= MIGRATE_PFN_LOCKED; } - migrate->src[i] |= MIGRATE_PFN_LOCKED; if (!PageLRU(page) && allow_drain) { /* Drain CPU's pagevec */ @@ -2339,21 +2386,50 @@ static void migrate_vma_prepare(struct m } if (isolate_lru_page(page)) { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; - put_page(page); + if (remap) { + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; + } else { + migrate->src[i] = 0; + unlock_page(page); + migrate->cpages--; + put_page(page); + } continue; } if (!migrate_vma_check_page(page)) { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; + if (remap) { + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; + + get_page(page); + putback_lru_page(page); + } else { + migrate->src[i] = 0; + unlock_page(page); + migrate->cpages--; - putback_lru_page(page); + putback_lru_page(page); + } } } + + for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { + struct page *page = migrate_pfn_to_page(migrate->src[i]); + + if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) + continue; + + remove_migration_pte(page, migrate->vma, addr, page); + + migrate->src[i] = 0; + unlock_page(page); + put_page(page); + restore--; + } } /* @@ -2380,12 +2456,19 @@ static void migrate_vma_unmap(struct mig if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) continue; - try_to_unmap(page, flags); - if (page_mapped(page) || !migrate_vma_check_page(page)) { - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; + if (page_mapped(page)) { + try_to_unmap(page, flags); + if (page_mapped(page)) + goto restore; } + + if (migrate_vma_check_page(page)) + continue; + +restore: + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; } for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { _ Patches currently in -mm which might be from jglisse@redhat.com are