From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752147AbdA2Rjg (ORCPT ); Sun, 29 Jan 2017 12:39:36 -0500 Received: from mga01.intel.com ([192.55.52.88]:22663 "EHLO mga01.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751594AbdA2RjU (ORCPT ); Sun, 29 Jan 2017 12:39:20 -0500 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.33,307,1477983600"; d="scan'208";a="1119447279" From: "Kirill A. Shutemov" To: Andrea Arcangeli , Hugh Dickins , Rik van Riel , Andrew Morton Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, "Kirill A. Shutemov" Subject: [PATCHv3 06/12] mm: convert page_mkclean_one() to use page_vma_mapped_walk() Date: Sun, 29 Jan 2017 20:38:52 +0300 Message-Id: <20170129173858.45174-7-kirill.shutemov@linux.intel.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170129173858.45174-1-kirill.shutemov@linux.intel.com> References: <20170129173858.45174-1-kirill.shutemov@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org For consistency, it worth converting all page_check_address() to page_vma_mapped_walk(), so we could drop the former. PMD handling here is future-proofing, we don't have users yet. ext4 with huge pages will be the first. Signed-off-by: Kirill A. Shutemov --- mm/rmap.c | 68 ++++++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 23 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index c4bad599cc7b..58597de049fd 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1017,34 +1017,56 @@ int page_referenced(struct page *page, static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { - struct mm_struct *mm = vma->vm_mm; - pte_t *pte; - spinlock_t *ptl; - int ret = 0; + struct page_vma_mapped_walk pvmw = { + .page = page, + .vma = vma, + .address = address, + .flags = PVMW_SYNC, + }; int *cleaned = arg; - pte = page_check_address(page, mm, address, &ptl, 1); - if (!pte) - goto out; - - if (pte_dirty(*pte) || pte_write(*pte)) { - pte_t entry; + while (page_vma_mapped_walk(&pvmw)) { + int ret = 0; + address = pvmw.address; + if (pvmw.pte) { + pte_t entry; + pte_t *pte = pvmw.pte; + + if (!pte_dirty(*pte) && !pte_write(*pte)) + continue; + + flush_cache_page(vma, address, pte_pfn(*pte)); + entry = ptep_clear_flush(vma, address, pte); + entry = pte_wrprotect(entry); + entry = pte_mkclean(entry); + set_pte_at(vma->vm_mm, address, pte, entry); + ret = 1; + } else { +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE + pmd_t *pmd = pvmw.pmd; + pmd_t entry; + + if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) + continue; + + flush_cache_page(vma, address, page_to_pfn(page)); + entry = pmdp_huge_clear_flush(vma, address, pmd); + entry = pmd_wrprotect(entry); + entry = pmd_mkclean(entry); + set_pmd_at(vma->vm_mm, address, pmd, entry); + ret = 1; +#else + /* unexpected pmd-mapped page? */ + WARN_ON_ONCE(1); +#endif + } - flush_cache_page(vma, address, pte_pfn(*pte)); - entry = ptep_clear_flush(vma, address, pte); - entry = pte_wrprotect(entry); - entry = pte_mkclean(entry); - set_pte_at(mm, address, pte, entry); - ret = 1; + if (ret) { + mmu_notifier_invalidate_page(vma->vm_mm, address); + (*cleaned)++; + } } - pte_unmap_unlock(pte, ptl); - - if (ret) { - mmu_notifier_invalidate_page(mm, address); - (*cleaned)++; - } -out: return SWAP_AGAIN; } -- 2.11.0