All of lore.kernel.org
 help / color / mirror / Atom feed
From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
To: linux-mm@kvack.org
Cc: Dave Hansen <dave.hansen@intel.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Hugh Dickins <hughd@google.com>,
	"Kirill A. Shutemov" <kirill@shutemov.name>,
	linux-kernel@vger.kernel.org
Subject: [PATCH -mm v2 05/11] pagewalk: remove mm_walk->skip
Date: Thu, 12 Jun 2014 17:48:05 -0400	[thread overview]
Message-ID: <1402609691-13950-6-git-send-email-n-horiguchi@ah.jp.nec.com> (raw)
In-Reply-To: <1402609691-13950-1-git-send-email-n-horiguchi@ah.jp.nec.com>

Due to the relocation of pmd locking, mm_walk->skip becomes less important
because only walk_page_test() and walk->test_walk() use it. None of these
functions uses a positive value as a return value, so we can define it to
determine whether we skip the current vma or not.
Thus this patch removes mm_walk->skip.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
---
 fs/proc/task_mmu.c |  4 ++--
 include/linux/mm.h |  3 ---
 mm/mempolicy.c     |  9 ++++-----
 mm/pagewalk.c      | 36 ++++++++----------------------------
 4 files changed, 14 insertions(+), 38 deletions(-)

diff --git mmotm-2014-05-21-16-57.orig/fs/proc/task_mmu.c mmotm-2014-05-21-16-57/fs/proc/task_mmu.c
index 059206ea3c6b..8211f6c8236d 100644
--- mmotm-2014-05-21-16-57.orig/fs/proc/task_mmu.c
+++ mmotm-2014-05-21-16-57/fs/proc/task_mmu.c
@@ -755,9 +755,9 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
 	 * Writing 4 to /proc/pid/clear_refs affects all pages.
 	 */
 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
-		walk->skip = 1;
+		return 1;
 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
-		walk->skip = 1;
+		return 1;
 	if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 		if (vma->vm_flags & VM_SOFTDIRTY)
 			vma->vm_flags &= ~VM_SOFTDIRTY;
diff --git mmotm-2014-05-21-16-57.orig/include/linux/mm.h mmotm-2014-05-21-16-57/include/linux/mm.h
index aa832161a1ff..0a20674c84e2 100644
--- mmotm-2014-05-21-16-57.orig/include/linux/mm.h
+++ mmotm-2014-05-21-16-57/include/linux/mm.h
@@ -1106,8 +1106,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  *             right now." 0 means "skip the current vma."
  * @mm:        mm_struct representing the target process of page table walk
  * @vma:       vma currently walked
- * @skip:      internal control flag which is set when we skip the lower
- *             level entries.
  * @pmd:       current pmd entry
  * @ptl:       page table lock associated with current entry
  * @private:   private data for callbacks' use
@@ -1127,7 +1125,6 @@ struct mm_walk {
 			struct mm_walk *walk);
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;
-	int skip;
 	pmd_t *pmd;
 	spinlock_t *ptl;
 	void *private;
diff --git mmotm-2014-05-21-16-57.orig/mm/mempolicy.c mmotm-2014-05-21-16-57/mm/mempolicy.c
index cf3b995b21d0..b8267f753748 100644
--- mmotm-2014-05-21-16-57.orig/mm/mempolicy.c
+++ mmotm-2014-05-21-16-57/mm/mempolicy.c
@@ -596,22 +596,21 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
 	}
 
 	qp->prev = vma;
-	walk->skip = 1;
 
 	if (vma->vm_flags & VM_PFNMAP)
-		return 0;
+		return 1;
 
 	if (flags & MPOL_MF_LAZY) {
 		change_prot_numa(vma, start, endvma);
-		return 0;
+		return 1;
 	}
 
 	if ((flags & MPOL_MF_STRICT) ||
 	    ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
 	     vma_migratable(vma)))
 		/* queue pages from current vma */
-		walk->skip = 0;
-	return 0;
+		return 0;
+	return 1;
 }
 
 /*
diff --git mmotm-2014-05-21-16-57.orig/mm/pagewalk.c mmotm-2014-05-21-16-57/mm/pagewalk.c
index f1a3417d0b51..61d6bd9545d6 100644
--- mmotm-2014-05-21-16-57.orig/mm/pagewalk.c
+++ mmotm-2014-05-21-16-57/mm/pagewalk.c
@@ -3,24 +3,6 @@
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
 
-/*
- * Check the current skip status of page table walker.
- *
- * Here what I mean by skip is to skip lower level walking, and that was
- * determined for each entry independently. For example, when walk_pmd_range
- * handles a pmd_trans_huge we don't have to walk over ptes under that pmd,
- * and the skipping does not affect the walking over ptes under other pmds.
- * That's why we reset @walk->skip after tested.
- */
-static bool skip_lower_level_walking(struct mm_walk *walk)
-{
-	if (walk->skip) {
-		walk->skip = 0;
-		return true;
-	}
-	return false;
-}
-
 static int walk_pte_range(pmd_t *pmd, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
 {
@@ -89,8 +71,6 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr,
 				err = walk->pmd_entry(pmd, addr, next, walk);
 				spin_unlock(walk->ptl);
 			}
-			if (skip_lower_level_walking(walk))
-				continue;
 			if (err)
 				break;
 		}
@@ -225,9 +205,9 @@ static inline int walk_hugetlb_range(unsigned long addr, unsigned long end,
 
 /*
  * Decide whether we really walk over the current vma on [@start, @end)
- * or skip it. When we skip it, we set @walk->skip to 1.
- * The return value is used to control the page table walking to
- * continue (for zero) or not (for non-zero).
+ * or skip it via the returned value. Return 0 if we do walk over the
+ * current vma, and return 1 if we skip the vma. Negative values means
+ * error, where we abort the current walk.
  *
  * Default check (only VM_PFNMAP check for now) is used when the caller
  * doesn't define test_walk() callback.
@@ -245,7 +225,7 @@ static int walk_page_test(unsigned long start, unsigned long end,
 	 * page backing a VM_PFNMAP range. See also commit a9ff785e4437.
 	 */
 	if (vma->vm_flags & VM_PFNMAP)
-		walk->skip = 1;
+		return 1;
 	return 0;
 }
 
@@ -330,9 +310,9 @@ int walk_page_range(unsigned long start, unsigned long end,
 			next = min(end, vma->vm_end);
 
 			err = walk_page_test(start, next, walk);
-			if (skip_lower_level_walking(walk))
+			if (err == 1)
 				continue;
-			if (err)
+			if (err < 0)
 				break;
 		}
 		err = __walk_page_range(start, next, walk);
@@ -353,9 +333,9 @@ int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
 	VM_BUG_ON(!vma);
 	walk->vma = vma;
 	err = walk_page_test(vma->vm_start, vma->vm_end, walk);
-	if (skip_lower_level_walking(walk))
+	if (err == 1)
 		return 0;
-	if (err)
+	if (err < 0)
 		return err;
 	return __walk_page_range(vma->vm_start, vma->vm_end, walk);
 }
-- 
1.9.3


WARNING: multiple messages have this Message-ID (diff)
From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
To: linux-mm@kvack.org
Cc: Dave Hansen <dave.hansen@intel.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Hugh Dickins <hughd@google.com>,
	"Kirill A. Shutemov" <kirill@shutemov.name>,
	linux-kernel@vger.kernel.org
Subject: [PATCH -mm v2 05/11] pagewalk: remove mm_walk->skip
Date: Thu, 12 Jun 2014 17:48:05 -0400	[thread overview]
Message-ID: <1402609691-13950-6-git-send-email-n-horiguchi@ah.jp.nec.com> (raw)
In-Reply-To: <1402609691-13950-1-git-send-email-n-horiguchi@ah.jp.nec.com>

Due to the relocation of pmd locking, mm_walk->skip becomes less important
because only walk_page_test() and walk->test_walk() use it. None of these
functions uses a positive value as a return value, so we can define it to
determine whether we skip the current vma or not.
Thus this patch removes mm_walk->skip.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
---
 fs/proc/task_mmu.c |  4 ++--
 include/linux/mm.h |  3 ---
 mm/mempolicy.c     |  9 ++++-----
 mm/pagewalk.c      | 36 ++++++++----------------------------
 4 files changed, 14 insertions(+), 38 deletions(-)

diff --git mmotm-2014-05-21-16-57.orig/fs/proc/task_mmu.c mmotm-2014-05-21-16-57/fs/proc/task_mmu.c
index 059206ea3c6b..8211f6c8236d 100644
--- mmotm-2014-05-21-16-57.orig/fs/proc/task_mmu.c
+++ mmotm-2014-05-21-16-57/fs/proc/task_mmu.c
@@ -755,9 +755,9 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
 	 * Writing 4 to /proc/pid/clear_refs affects all pages.
 	 */
 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
-		walk->skip = 1;
+		return 1;
 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
-		walk->skip = 1;
+		return 1;
 	if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 		if (vma->vm_flags & VM_SOFTDIRTY)
 			vma->vm_flags &= ~VM_SOFTDIRTY;
diff --git mmotm-2014-05-21-16-57.orig/include/linux/mm.h mmotm-2014-05-21-16-57/include/linux/mm.h
index aa832161a1ff..0a20674c84e2 100644
--- mmotm-2014-05-21-16-57.orig/include/linux/mm.h
+++ mmotm-2014-05-21-16-57/include/linux/mm.h
@@ -1106,8 +1106,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  *             right now." 0 means "skip the current vma."
  * @mm:        mm_struct representing the target process of page table walk
  * @vma:       vma currently walked
- * @skip:      internal control flag which is set when we skip the lower
- *             level entries.
  * @pmd:       current pmd entry
  * @ptl:       page table lock associated with current entry
  * @private:   private data for callbacks' use
@@ -1127,7 +1125,6 @@ struct mm_walk {
 			struct mm_walk *walk);
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;
-	int skip;
 	pmd_t *pmd;
 	spinlock_t *ptl;
 	void *private;
diff --git mmotm-2014-05-21-16-57.orig/mm/mempolicy.c mmotm-2014-05-21-16-57/mm/mempolicy.c
index cf3b995b21d0..b8267f753748 100644
--- mmotm-2014-05-21-16-57.orig/mm/mempolicy.c
+++ mmotm-2014-05-21-16-57/mm/mempolicy.c
@@ -596,22 +596,21 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
 	}
 
 	qp->prev = vma;
-	walk->skip = 1;
 
 	if (vma->vm_flags & VM_PFNMAP)
-		return 0;
+		return 1;
 
 	if (flags & MPOL_MF_LAZY) {
 		change_prot_numa(vma, start, endvma);
-		return 0;
+		return 1;
 	}
 
 	if ((flags & MPOL_MF_STRICT) ||
 	    ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
 	     vma_migratable(vma)))
 		/* queue pages from current vma */
-		walk->skip = 0;
-	return 0;
+		return 0;
+	return 1;
 }
 
 /*
diff --git mmotm-2014-05-21-16-57.orig/mm/pagewalk.c mmotm-2014-05-21-16-57/mm/pagewalk.c
index f1a3417d0b51..61d6bd9545d6 100644
--- mmotm-2014-05-21-16-57.orig/mm/pagewalk.c
+++ mmotm-2014-05-21-16-57/mm/pagewalk.c
@@ -3,24 +3,6 @@
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
 
-/*
- * Check the current skip status of page table walker.
- *
- * Here what I mean by skip is to skip lower level walking, and that was
- * determined for each entry independently. For example, when walk_pmd_range
- * handles a pmd_trans_huge we don't have to walk over ptes under that pmd,
- * and the skipping does not affect the walking over ptes under other pmds.
- * That's why we reset @walk->skip after tested.
- */
-static bool skip_lower_level_walking(struct mm_walk *walk)
-{
-	if (walk->skip) {
-		walk->skip = 0;
-		return true;
-	}
-	return false;
-}
-
 static int walk_pte_range(pmd_t *pmd, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
 {
@@ -89,8 +71,6 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr,
 				err = walk->pmd_entry(pmd, addr, next, walk);
 				spin_unlock(walk->ptl);
 			}
-			if (skip_lower_level_walking(walk))
-				continue;
 			if (err)
 				break;
 		}
@@ -225,9 +205,9 @@ static inline int walk_hugetlb_range(unsigned long addr, unsigned long end,
 
 /*
  * Decide whether we really walk over the current vma on [@start, @end)
- * or skip it. When we skip it, we set @walk->skip to 1.
- * The return value is used to control the page table walking to
- * continue (for zero) or not (for non-zero).
+ * or skip it via the returned value. Return 0 if we do walk over the
+ * current vma, and return 1 if we skip the vma. Negative values means
+ * error, where we abort the current walk.
  *
  * Default check (only VM_PFNMAP check for now) is used when the caller
  * doesn't define test_walk() callback.
@@ -245,7 +225,7 @@ static int walk_page_test(unsigned long start, unsigned long end,
 	 * page backing a VM_PFNMAP range. See also commit a9ff785e4437.
 	 */
 	if (vma->vm_flags & VM_PFNMAP)
-		walk->skip = 1;
+		return 1;
 	return 0;
 }
 
@@ -330,9 +310,9 @@ int walk_page_range(unsigned long start, unsigned long end,
 			next = min(end, vma->vm_end);
 
 			err = walk_page_test(start, next, walk);
-			if (skip_lower_level_walking(walk))
+			if (err == 1)
 				continue;
-			if (err)
+			if (err < 0)
 				break;
 		}
 		err = __walk_page_range(start, next, walk);
@@ -353,9 +333,9 @@ int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
 	VM_BUG_ON(!vma);
 	walk->vma = vma;
 	err = walk_page_test(vma->vm_start, vma->vm_end, walk);
-	if (skip_lower_level_walking(walk))
+	if (err == 1)
 		return 0;
-	if (err)
+	if (err < 0)
 		return err;
 	return __walk_page_range(vma->vm_start, vma->vm_end, walk);
 }
-- 
1.9.3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2014-06-12 21:48 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-12 21:48 [PATCH -mm v2 00/11] pagewalk: standardize current users, move pmd locking, apply to mincore Naoya Horiguchi
2014-06-12 21:48 ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 01/11] pagewalk: remove pgd_entry() and pud_entry() Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 02/11] madvise: cleanup swapin_walk_pmd_entry() Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-15 20:24   ` Hugh Dickins
2014-06-15 20:24     ` Hugh Dickins
2014-06-16 15:59     ` Naoya Horiguchi
2014-06-16 15:59       ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 03/11] memcg: separate mem_cgroup_move_charge_pte_range() Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 04/11] pagewalk: move pmd_trans_huge_lock() from callbacks to common code Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-17 14:27   ` Jerome Marchand
2014-06-17 14:27     ` Jerome Marchand
2014-06-17 15:01     ` Naoya Horiguchi
2014-06-17 15:01       ` Naoya Horiguchi
2014-06-18 15:13       ` Jerome Marchand
2014-06-18 15:13         ` Jerome Marchand
2014-06-18 15:31         ` Naoya Horiguchi
2014-06-18 15:31           ` Naoya Horiguchi
2014-06-12 21:48 ` Naoya Horiguchi [this message]
2014-06-12 21:48   ` [PATCH -mm v2 05/11] pagewalk: remove mm_walk->skip Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 06/11] pagewalk: add size to struct mm_walk Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-12 22:07   ` Dave Hansen
2014-06-12 22:07     ` Dave Hansen
2014-06-12 22:36     ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 07/11] pagewalk: change type of arg of callbacks Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 08/11] pagewalk: update comment on walk_page_range() Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 09/11] fs/proc/task_mmu.c: refactor smaps Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 10/11] fs/proc/task_mmu.c: clean up gather_*_stats() Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-12 21:48 ` [PATCH -mm v2 11/11] mincore: apply page table walker on do_mincore() Naoya Horiguchi
2014-06-12 21:48   ` Naoya Horiguchi
2014-06-12 21:56 ` [PATCH -mm v2 00/11] pagewalk: standardize current users, move pmd locking, apply to mincore Andrew Morton
2014-06-12 21:56   ` Andrew Morton
2014-06-12 22:21   ` Naoya Horiguchi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1402609691-13950-6-git-send-email-n-horiguchi@ah.jp.nec.com \
    --to=n-horiguchi@ah.jp.nec.com \
    --cc=akpm@linux-foundation.org \
    --cc=dave.hansen@intel.com \
    --cc=hughd@google.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.