From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757400AbaFTUMN (ORCPT ); Fri, 20 Jun 2014 16:12:13 -0400 Received: from mx1.redhat.com ([209.132.183.28]:40712 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754805AbaFTUML (ORCPT ); Fri, 20 Jun 2014 16:12:11 -0400 From: Naoya Horiguchi To: linux-mm@kvack.org Cc: Andrew Morton , Dave Hansen , Hugh Dickins , "Kirill A. Shutemov" , linux-kernel@vger.kernel.org, Naoya Horiguchi Subject: [PATCH v3 05/13] clear_refs: remove clear_refs_private->vma and introduce clear_refs_test_walk() Date: Fri, 20 Jun 2014 16:11:31 -0400 Message-Id: <1403295099-6407-6-git-send-email-n-horiguchi@ah.jp.nec.com> In-Reply-To: <1403295099-6407-1-git-send-email-n-horiguchi@ah.jp.nec.com> References: <1403295099-6407-1-git-send-email-n-horiguchi@ah.jp.nec.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org clear_refs_write() has some prechecks to determine if we really walk over a given vma. Now we have a test_walk() callback to filter vmas, so let's utilize it. Signed-off-by: Naoya Horiguchi --- fs/proc/task_mmu.c | 55 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git v3.16-rc1.orig/fs/proc/task_mmu.c v3.16-rc1/fs/proc/task_mmu.c index 9b6c7d4fd3f4..3c42cd40ad36 100644 --- v3.16-rc1.orig/fs/proc/task_mmu.c +++ v3.16-rc1/fs/proc/task_mmu.c @@ -716,7 +716,6 @@ enum clear_refs_types { }; struct clear_refs_private { - struct vm_area_struct *vma; enum clear_refs_types type; }; @@ -749,7 +748,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct clear_refs_private *cp = walk->private; - struct vm_area_struct *vma = cp->vma; + struct vm_area_struct *vma = walk->vma; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; @@ -783,6 +782,29 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, return 0; } +static int clear_refs_test_walk(unsigned long start, unsigned long end, + struct mm_walk *walk) +{ + struct clear_refs_private *cp = walk->private; + struct vm_area_struct *vma = walk->vma; + + /* + * Writing 1 to /proc/pid/clear_refs affects all pages. + * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. + * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. + * Writing 4 to /proc/pid/clear_refs affects all pages. + */ + if (cp->type == CLEAR_REFS_ANON && vma->vm_file) + return 1; + if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) + return 1; + if (cp->type == CLEAR_REFS_SOFT_DIRTY) { + if (vma->vm_flags & VM_SOFTDIRTY) + vma->vm_flags &= ~VM_SOFTDIRTY; + } + return 0; +} + static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -823,38 +845,15 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, }; struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range, + .test_walk = clear_refs_test_walk, .mm = mm, .private = &cp, }; down_read(&mm->mmap_sem); if (type == CLEAR_REFS_SOFT_DIRTY) mmu_notifier_invalidate_range_start(mm, 0, -1); - for (vma = mm->mmap; vma; vma = vma->vm_next) { - cp.vma = vma; - if (is_vm_hugetlb_page(vma)) - continue; - /* - * Writing 1 to /proc/pid/clear_refs affects all pages. - * - * Writing 2 to /proc/pid/clear_refs only affects - * Anonymous pages. - * - * Writing 3 to /proc/pid/clear_refs only affects file - * mapped pages. - * - * Writing 4 to /proc/pid/clear_refs affects all pages. - */ - if (type == CLEAR_REFS_ANON && vma->vm_file) - continue; - if (type == CLEAR_REFS_MAPPED && !vma->vm_file) - continue; - if (type == CLEAR_REFS_SOFT_DIRTY) { - if (vma->vm_flags & VM_SOFTDIRTY) - vma->vm_flags &= ~VM_SOFTDIRTY; - } - walk_page_range(vma->vm_start, vma->vm_end, - &clear_refs_walk); - } + for (vma = mm->mmap; vma; vma = vma->vm_next) + walk_page_vma(vma, &clear_refs_walk); if (type == CLEAR_REFS_SOFT_DIRTY) mmu_notifier_invalidate_range_end(mm, 0, -1); flush_tlb_mm(mm); -- 1.9.3 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-we0-f176.google.com (mail-we0-f176.google.com [74.125.82.176]) by kanga.kvack.org (Postfix) with ESMTP id 6D9E06B004D for ; Fri, 20 Jun 2014 16:12:10 -0400 (EDT) Received: by mail-we0-f176.google.com with SMTP id u56so4210125wes.21 for ; Fri, 20 Jun 2014 13:12:09 -0700 (PDT) Received: from mx1.redhat.com (mx1.redhat.com. [209.132.183.28]) by mx.google.com with ESMTPS id p7si3915765wiz.103.2014.06.20.13.12.08 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 20 Jun 2014 13:12:09 -0700 (PDT) From: Naoya Horiguchi Subject: [PATCH v3 05/13] clear_refs: remove clear_refs_private->vma and introduce clear_refs_test_walk() Date: Fri, 20 Jun 2014 16:11:31 -0400 Message-Id: <1403295099-6407-6-git-send-email-n-horiguchi@ah.jp.nec.com> In-Reply-To: <1403295099-6407-1-git-send-email-n-horiguchi@ah.jp.nec.com> References: <1403295099-6407-1-git-send-email-n-horiguchi@ah.jp.nec.com> Sender: owner-linux-mm@kvack.org List-ID: To: linux-mm@kvack.org Cc: Andrew Morton , Dave Hansen , Hugh Dickins , "Kirill A. Shutemov" , linux-kernel@vger.kernel.org, Naoya Horiguchi clear_refs_write() has some prechecks to determine if we really walk over a given vma. Now we have a test_walk() callback to filter vmas, so let's utilize it. Signed-off-by: Naoya Horiguchi --- fs/proc/task_mmu.c | 55 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git v3.16-rc1.orig/fs/proc/task_mmu.c v3.16-rc1/fs/proc/task_mmu.c index 9b6c7d4fd3f4..3c42cd40ad36 100644 --- v3.16-rc1.orig/fs/proc/task_mmu.c +++ v3.16-rc1/fs/proc/task_mmu.c @@ -716,7 +716,6 @@ enum clear_refs_types { }; struct clear_refs_private { - struct vm_area_struct *vma; enum clear_refs_types type; }; @@ -749,7 +748,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct clear_refs_private *cp = walk->private; - struct vm_area_struct *vma = cp->vma; + struct vm_area_struct *vma = walk->vma; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; @@ -783,6 +782,29 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, return 0; } +static int clear_refs_test_walk(unsigned long start, unsigned long end, + struct mm_walk *walk) +{ + struct clear_refs_private *cp = walk->private; + struct vm_area_struct *vma = walk->vma; + + /* + * Writing 1 to /proc/pid/clear_refs affects all pages. + * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. + * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. + * Writing 4 to /proc/pid/clear_refs affects all pages. + */ + if (cp->type == CLEAR_REFS_ANON && vma->vm_file) + return 1; + if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) + return 1; + if (cp->type == CLEAR_REFS_SOFT_DIRTY) { + if (vma->vm_flags & VM_SOFTDIRTY) + vma->vm_flags &= ~VM_SOFTDIRTY; + } + return 0; +} + static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -823,38 +845,15 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, }; struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range, + .test_walk = clear_refs_test_walk, .mm = mm, .private = &cp, }; down_read(&mm->mmap_sem); if (type == CLEAR_REFS_SOFT_DIRTY) mmu_notifier_invalidate_range_start(mm, 0, -1); - for (vma = mm->mmap; vma; vma = vma->vm_next) { - cp.vma = vma; - if (is_vm_hugetlb_page(vma)) - continue; - /* - * Writing 1 to /proc/pid/clear_refs affects all pages. - * - * Writing 2 to /proc/pid/clear_refs only affects - * Anonymous pages. - * - * Writing 3 to /proc/pid/clear_refs only affects file - * mapped pages. - * - * Writing 4 to /proc/pid/clear_refs affects all pages. - */ - if (type == CLEAR_REFS_ANON && vma->vm_file) - continue; - if (type == CLEAR_REFS_MAPPED && !vma->vm_file) - continue; - if (type == CLEAR_REFS_SOFT_DIRTY) { - if (vma->vm_flags & VM_SOFTDIRTY) - vma->vm_flags &= ~VM_SOFTDIRTY; - } - walk_page_range(vma->vm_start, vma->vm_end, - &clear_refs_walk); - } + for (vma = mm->mmap; vma; vma = vma->vm_next) + walk_page_vma(vma, &clear_refs_walk); if (type == CLEAR_REFS_SOFT_DIRTY) mmu_notifier_invalidate_range_end(mm, 0, -1); flush_tlb_mm(mm); -- 1.9.3 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org