From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S934160AbdDEUlx (ORCPT ); Wed, 5 Apr 2017 16:41:53 -0400 Received: from mx1.redhat.com ([209.132.183.28]:63159 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S933951AbdDEUky (ORCPT ); Wed, 5 Apr 2017 16:40:54 -0400 DMARC-Filter: OpenDMARC Filter v1.3.2 mx1.redhat.com BDD0D15F541 Authentication-Results: ext-mx04.extmail.prod.ext.phx2.redhat.com; dmarc=none (p=none dis=none) header.from=redhat.com Authentication-Results: ext-mx04.extmail.prod.ext.phx2.redhat.com; spf=pass smtp.mailfrom=jglisse@redhat.com DKIM-Filter: OpenDKIM Filter v2.11.0 mx1.redhat.com BDD0D15F541 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= To: akpm@linux-foundation.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org Cc: John Hubbard , Dan Williams , Naoya Horiguchi , David Nellans , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Evgeny Baskakov , Mark Hairgrove , Sherry Cheung , Subhash Gutti Subject: [HMM 07/16] mm/migrate: migrate_vma() unmap page from vma while collecting pages Date: Wed, 5 Apr 2017 16:40:17 -0400 Message-Id: <20170405204026.3940-8-jglisse@redhat.com> In-Reply-To: <20170405204026.3940-1-jglisse@redhat.com> References: <20170405204026.3940-1-jglisse@redhat.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.28]); Wed, 05 Apr 2017 20:40:49 +0000 (UTC) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Common case for migration of virtual address range is page are map only once inside the vma in which migration is taking place. Because we already walk the CPU page table for that range we can directly do the unmap there and setup special migration swap entry. Signed-off-by: Jérôme Glisse Signed-off-by: Evgeny Baskakov Signed-off-by: John Hubbard Signed-off-by: Mark Hairgrove Signed-off-by: Sherry Cheung Signed-off-by: Subhash Gutti --- mm/migrate.c | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 98 insertions(+), 16 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index b2ce541..4486e30 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2117,7 +2117,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, { struct migrate_vma *migrate = walk->private; struct mm_struct *mm = walk->vma->vm_mm; - unsigned long addr = start; + unsigned long addr = start, unmapped = 0; spinlock_t *ptl; pte_t *ptep; @@ -2127,9 +2127,12 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, } ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + arch_enter_lazy_mmu_mode(); + for (; addr < end; addr += PAGE_SIZE, ptep++) { unsigned long mpfn, pfn; struct page *page; + swp_entry_t entry; pte_t pte; pte = *ptep; @@ -2161,11 +2164,44 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; + /* + * Optimize for the common case where page is only mapped once + * in one process. If we can lock the page, then we can safely + * set up a special migration page table entry now. + */ + if (trylock_page(page)) { + pte_t swp_pte; + + mpfn |= MIGRATE_PFN_LOCKED; + ptep_get_and_clear(mm, addr, ptep); + + /* Setup special migration page table entry */ + entry = make_migration_entry(page, pte_write(pte)); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + set_pte_at(mm, addr, ptep, swp_pte); + + /* + * This is like regular unmap: we remove the rmap and + * drop page refcount. Page won't be freed, as we took + * a reference just above. + */ + page_remove_rmap(page, false); + put_page(page); + unmapped++; + } + next: migrate->src[migrate->npages++] = mpfn; } + arch_leave_lazy_mmu_mode(); pte_unmap_unlock(ptep - 1, ptl); + /* Only flush the TLB if we actually modified any entries */ + if (unmapped) + flush_tlb_range(walk->vma, start, end); + return 0; } @@ -2190,7 +2226,13 @@ static void migrate_vma_collect(struct migrate_vma *migrate) mm_walk.mm = migrate->vma->vm_mm; mm_walk.private = migrate; + mmu_notifier_invalidate_range_start(mm_walk.mm, + migrate->start, + migrate->end); walk_page_range(migrate->start, migrate->end, &mm_walk); + mmu_notifier_invalidate_range_end(mm_walk.mm, + migrate->start, + migrate->end); migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); } @@ -2246,12 +2288,16 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) for (i = 0; i < npages; i++) { struct page *page = migrate_pfn_to_page(migrate->src[i]); + bool remap = true; if (!page) continue; - lock_page(page); - migrate->src[i] |= MIGRATE_PFN_LOCKED; + if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { + remap = false; + lock_page(page); + migrate->src[i] |= MIGRATE_PFN_LOCKED; + } if (!PageLRU(page) && allow_drain) { /* Drain CPU's pagevec */ @@ -2260,21 +2306,50 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) } if (isolate_lru_page(page)) { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; - put_page(page); + if (remap) { + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; + } else { + migrate->src[i] = 0; + unlock_page(page); + migrate->cpages--; + put_page(page); + } continue; } if (!migrate_vma_check_page(page)) { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; + if (remap) { + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; - putback_lru_page(page); + get_page(page); + putback_lru_page(page); + } else { + migrate->src[i] = 0; + unlock_page(page); + migrate->cpages--; + + putback_lru_page(page); + } } } + + for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { + struct page *page = migrate_pfn_to_page(migrate->src[i]); + + if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) + continue; + + remove_migration_pte(page, migrate->vma, addr, page); + + migrate->src[i] = 0; + unlock_page(page); + put_page(page); + restore--; + } } /* @@ -2301,12 +2376,19 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) continue; - try_to_unmap(page, flags); - if (page_mapped(page) || !migrate_vma_check_page(page)) { - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; + if (page_mapped(page)) { + try_to_unmap(page, flags); + if (page_mapped(page)) + goto restore; } + + if (migrate_vma_check_page(page)) + continue; + +restore: + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; } for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { -- 2.9.3 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-qk0-f199.google.com (mail-qk0-f199.google.com [209.85.220.199]) by kanga.kvack.org (Postfix) with ESMTP id 274336B03B5 for ; Wed, 5 Apr 2017 16:40:51 -0400 (EDT) Received: by mail-qk0-f199.google.com with SMTP id i13so6369416qki.16 for ; Wed, 05 Apr 2017 13:40:51 -0700 (PDT) Received: from mx1.redhat.com (mx1.redhat.com. [209.132.183.28]) by mx.google.com with ESMTPS id a64si18701750qkf.331.2017.04.05.13.40.49 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 05 Apr 2017 13:40:49 -0700 (PDT) From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= Subject: [HMM 07/16] mm/migrate: migrate_vma() unmap page from vma while collecting pages Date: Wed, 5 Apr 2017 16:40:17 -0400 Message-Id: <20170405204026.3940-8-jglisse@redhat.com> In-Reply-To: <20170405204026.3940-1-jglisse@redhat.com> References: <20170405204026.3940-1-jglisse@redhat.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sender: owner-linux-mm@kvack.org List-ID: To: akpm@linux-foundation.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org Cc: John Hubbard , Dan Williams , Naoya Horiguchi , David Nellans , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Evgeny Baskakov , Mark Hairgrove , Sherry Cheung , Subhash Gutti Common case for migration of virtual address range is page are map only once inside the vma in which migration is taking place. Because we already walk the CPU page table for that range we can directly do the unmap there and setup special migration swap entry. Signed-off-by: JA(C)rA'me Glisse Signed-off-by: Evgeny Baskakov Signed-off-by: John Hubbard Signed-off-by: Mark Hairgrove Signed-off-by: Sherry Cheung Signed-off-by: Subhash Gutti --- mm/migrate.c | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 98 insertions(+), 16 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index b2ce541..4486e30 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2117,7 +2117,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, { struct migrate_vma *migrate = walk->private; struct mm_struct *mm = walk->vma->vm_mm; - unsigned long addr = start; + unsigned long addr = start, unmapped = 0; spinlock_t *ptl; pte_t *ptep; @@ -2127,9 +2127,12 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, } ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + arch_enter_lazy_mmu_mode(); + for (; addr < end; addr += PAGE_SIZE, ptep++) { unsigned long mpfn, pfn; struct page *page; + swp_entry_t entry; pte_t pte; pte = *ptep; @@ -2161,11 +2164,44 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; + /* + * Optimize for the common case where page is only mapped once + * in one process. If we can lock the page, then we can safely + * set up a special migration page table entry now. + */ + if (trylock_page(page)) { + pte_t swp_pte; + + mpfn |= MIGRATE_PFN_LOCKED; + ptep_get_and_clear(mm, addr, ptep); + + /* Setup special migration page table entry */ + entry = make_migration_entry(page, pte_write(pte)); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + set_pte_at(mm, addr, ptep, swp_pte); + + /* + * This is like regular unmap: we remove the rmap and + * drop page refcount. Page won't be freed, as we took + * a reference just above. + */ + page_remove_rmap(page, false); + put_page(page); + unmapped++; + } + next: migrate->src[migrate->npages++] = mpfn; } + arch_leave_lazy_mmu_mode(); pte_unmap_unlock(ptep - 1, ptl); + /* Only flush the TLB if we actually modified any entries */ + if (unmapped) + flush_tlb_range(walk->vma, start, end); + return 0; } @@ -2190,7 +2226,13 @@ static void migrate_vma_collect(struct migrate_vma *migrate) mm_walk.mm = migrate->vma->vm_mm; mm_walk.private = migrate; + mmu_notifier_invalidate_range_start(mm_walk.mm, + migrate->start, + migrate->end); walk_page_range(migrate->start, migrate->end, &mm_walk); + mmu_notifier_invalidate_range_end(mm_walk.mm, + migrate->start, + migrate->end); migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); } @@ -2246,12 +2288,16 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) for (i = 0; i < npages; i++) { struct page *page = migrate_pfn_to_page(migrate->src[i]); + bool remap = true; if (!page) continue; - lock_page(page); - migrate->src[i] |= MIGRATE_PFN_LOCKED; + if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { + remap = false; + lock_page(page); + migrate->src[i] |= MIGRATE_PFN_LOCKED; + } if (!PageLRU(page) && allow_drain) { /* Drain CPU's pagevec */ @@ -2260,21 +2306,50 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) } if (isolate_lru_page(page)) { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; - put_page(page); + if (remap) { + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; + } else { + migrate->src[i] = 0; + unlock_page(page); + migrate->cpages--; + put_page(page); + } continue; } if (!migrate_vma_check_page(page)) { - migrate->src[i] = 0; - unlock_page(page); - migrate->cpages--; + if (remap) { + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; - putback_lru_page(page); + get_page(page); + putback_lru_page(page); + } else { + migrate->src[i] = 0; + unlock_page(page); + migrate->cpages--; + + putback_lru_page(page); + } } } + + for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { + struct page *page = migrate_pfn_to_page(migrate->src[i]); + + if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) + continue; + + remove_migration_pte(page, migrate->vma, addr, page); + + migrate->src[i] = 0; + unlock_page(page); + put_page(page); + restore--; + } } /* @@ -2301,12 +2376,19 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) continue; - try_to_unmap(page, flags); - if (page_mapped(page) || !migrate_vma_check_page(page)) { - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; - migrate->cpages--; - restore++; + if (page_mapped(page)) { + try_to_unmap(page, flags); + if (page_mapped(page)) + goto restore; } + + if (migrate_vma_check_page(page)) + continue; + +restore: + migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; + migrate->cpages--; + restore++; } for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { -- 2.9.3 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org