All of lore.kernel.org
 help / color / mirror / Atom feed
From: jglisse@redhat.com
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-block@vger.kernel.org
Cc: linux-kernel@vger.kernel.org,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Andrea Arcangeli" <aarcange@redhat.com>
Subject: [RFC PATCH 78/79] mm/ksm: rename PAGE_MAPPING_KSM to PAGE_MAPPING_RONLY
Date: Wed,  4 Apr 2018 15:18:30 -0400	[thread overview]
Message-ID: <20180404191831.5378-41-jglisse@redhat.com> (raw)
In-Reply-To: <20180404191831.5378-1-jglisse@redhat.com>

From: Jérôme Glisse <jglisse@redhat.com>

This just rename all KSM specific helper to generic page read only
name. No functional change.

Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
---
 fs/proc/page.c             |  2 +-
 include/linux/page-flags.h | 30 +++++++++++++++++-------------
 mm/ksm.c                   | 12 ++++++------
 mm/memory-failure.c        |  2 +-
 mm/memory.c                |  2 +-
 mm/migrate.c               |  6 +++---
 mm/mprotect.c              |  2 +-
 mm/page_idle.c             |  2 +-
 mm/rmap.c                  | 10 +++++-----
 mm/swapfile.c              |  2 +-
 10 files changed, 37 insertions(+), 33 deletions(-)

diff --git a/fs/proc/page.c b/fs/proc/page.c
index 1491918a33c3..00cc037758ef 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -110,7 +110,7 @@ u64 stable_page_flags(struct page *page)
 		u |= 1 << KPF_MMAP;
 	if (PageAnon(page))
 		u |= 1 << KPF_ANON;
-	if (PageKsm(page))
+	if (PageReadOnly(page))
 		u |= 1 << KPF_KSM;
 
 	/*
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 50c2b8786831..0338fb5dde8d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -374,12 +374,12 @@ PAGEFLAG(Idle, idle, PF_ANY)
  * page->mapping points to its anon_vma, not to a struct address_space;
  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
  *
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_RONLY is enabled,
  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
  * bit; and then page->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page.  See ksm.h.
+ * structure which RONLY associates with that merged page.  See page-ronly.h.
  *
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
+ * PAGE_MAPPING_RONLY without PAGE_MAPPING_ANON is used for non-lru movable
  * page and then page->mapping points a struct address_space.
  *
  * Please note that, confusingly, "page_mapping" refers to the inode
@@ -388,7 +388,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
  */
 #define PAGE_MAPPING_ANON	0x1
 #define PAGE_MAPPING_MOVABLE	0x2
-#define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define PAGE_MAPPING_RONLY	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 
 static __always_inline int PageMappingFlags(struct page *page)
@@ -408,21 +408,25 @@ static __always_inline int __PageMovable(struct page *page)
 				PAGE_MAPPING_MOVABLE;
 }
 
-#ifdef CONFIG_KSM
-/*
- * A KSM page is one of those write-protected "shared pages" or "merged pages"
- * which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
- * anon_vma, but to that page's node of the stable tree.
+#ifdef CONFIG_PAGE_RONLY
+/* PageReadOnly() - Returns true if page is read only, false otherwise.
+ *
+ * @page: Page under test.
+ *
+ * A read only page is one of those write-protected. Currently only KSM does
+ * write protect a page as "shared pages" or "merged pages"  which KSM maps
+ * into multiple mms, wherever identical anonymous page content is found in
+ * VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any anon_vma,
+ * but to that page's node of the stable tree.
  */
-static __always_inline int PageKsm(struct page *page)
+static __always_inline int PageReadOnly(struct page *page)
 {
 	page = compound_head(page);
 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
-				PAGE_MAPPING_KSM;
+				PAGE_MAPPING_RONLY;
 }
 #else
-TESTPAGEFLAG_FALSE(Ksm)
+TESTPAGEFLAG_FALSE(ReadOnly)
 #endif
 
 u64 stable_page_flags(struct page *page);
diff --git a/mm/ksm.c b/mm/ksm.c
index f9bd1251c288..6085068fb8b3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -318,13 +318,13 @@ static void __init ksm_slab_free(void)
 
 static inline struct stable_node *page_stable_node(struct page *page)
 {
-	return PageKsm(page) ? page_rmapping(page) : NULL;
+	return PageReadOnly(page) ? page_rmapping(page) : NULL;
 }
 
 static inline void set_page_stable_node(struct page *page,
 					struct stable_node *stable_node)
 {
-	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_RONLY);
 }
 
 static __always_inline bool is_stable_node_chain(struct stable_node *chain)
@@ -470,7 +470,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
 				FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
 		if (IS_ERR_OR_NULL(page))
 			break;
-		if (PageKsm(page))
+		if (PageReadOnly(page))
 			ret = handle_mm_fault(vma, addr,
 					FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
 		else
@@ -684,7 +684,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
 	unsigned long kpfn;
 
 	expected_mapping = (void *)((unsigned long)stable_node |
-					PAGE_MAPPING_KSM);
+					PAGE_MAPPING_RONLY);
 again:
 	kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
 	page = pfn_to_page(kpfn);
@@ -2490,7 +2490,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 	struct anon_vma *anon_vma = page_anon_vma(page);
 	struct page *new_page;
 
-	if (PageKsm(page)) {
+	if (PageReadOnly(page)) {
 		if (page_stable_node(page) &&
 		    !(ksm_run & KSM_RUN_UNMERGE))
 			return page;	/* no need to copy it */
@@ -2521,7 +2521,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
 	struct rmap_item *rmap_item;
 	int search_new_forks = 0;
 
-	VM_BUG_ON_PAGE(!PageKsm(page), page);
+	VM_BUG_ON_PAGE(!PageReadOnly(page), page);
 
 	/*
 	 * Rely on the page lock to protect against concurrent modifications
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8291b75f42c8..18efefc20e67 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -947,7 +947,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	if (!page_mapped(hpage))
 		return true;
 
-	if (PageKsm(p)) {
+	if (PageReadOnly(p)) {
 		pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
 		return false;
 	}
diff --git a/mm/memory.c b/mm/memory.c
index fbd80bb7a50a..b565db41400f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2733,7 +2733,7 @@ static int do_wp_page(struct vm_fault *vmf)
 	 * Take out anonymous pages first, anonymous shared vmas are
 	 * not dirty accountable.
 	 */
-	if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
+	if (PageAnon(vmf->page) && !PageReadOnly(vmf->page)) {
 		int total_map_swapcount;
 		if (!trylock_page(vmf->page)) {
 			get_page(vmf->page);
diff --git a/mm/migrate.c b/mm/migrate.c
index e4b20ac6cf36..b73b31f6d2fd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -214,7 +214,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 
 	VM_BUG_ON_PAGE(PageTail(page), page);
 	while (page_vma_mapped_walk(&pvmw)) {
-		if (PageKsm(page))
+		if (PageReadOnly(page))
 			new = page;
 		else
 			new = page - pvmw.page->index +
@@ -1038,7 +1038,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 	 * because that implies that the anon page is no longer mapped
 	 * (and cannot be remapped so long as we hold the page lock).
 	 */
-	if (PageAnon(page) && !PageKsm(page))
+	if (PageAnon(page) && !PageReadOnly(page))
 		anon_vma = page_get_anon_vma(page);
 
 	/*
@@ -1077,7 +1077,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 		}
 	} else if (page_mapped(page)) {
 		/* Establish migration ptes */
-		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
+		VM_BUG_ON_PAGE(PageAnon(page) && !PageReadOnly(page) && !anon_vma,
 				page);
 		try_to_unmap(page,
 			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e3309fcf586b..ab2f2e4961d8 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -81,7 +81,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 				struct page *page;
 
 				page = vm_normal_page(vma, addr, oldpte);
-				if (!page || PageKsm(page))
+				if (!page || PageReadOnly(page))
 					continue;
 
 				/* Also skip shared copy-on-write pages */
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 0a49374e6931..7e5258e4d2ad 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -104,7 +104,7 @@ static void page_idle_clear_pte_refs(struct page *page)
 	    !page_rmapping(page))
 		return;
 
-	need_lock = !PageAnon(page) || PageKsm(page);
+	need_lock = !PageAnon(page) || PageReadOnly(page);
 	if (need_lock && !trylock_page(page))
 		return;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index 822a3a0cd51c..70d37f77e7a4 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -855,7 +855,7 @@ int page_referenced(struct page *page,
 	if (!page_rmapping(page))
 		return 0;
 
-	if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
+	if (!is_locked && (!PageAnon(page) || PageReadOnly(page))) {
 		we_locked = trylock_page(page);
 		if (!we_locked)
 			return 1;
@@ -1122,7 +1122,7 @@ void do_page_add_anon_rmap(struct page *page,
 			__inc_node_page_state(page, NR_ANON_THPS);
 		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
 	}
-	if (unlikely(PageKsm(page)))
+	if (unlikely(PageReadOnly(page)))
 		return;
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -1660,7 +1660,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
 	 * temporary VMAs until after exec() completes.
 	 */
 	if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
-	    && !PageKsm(page) && PageAnon(page))
+	    && !PageReadOnly(page) && PageAnon(page))
 		rwc.invalid_vma = invalid_migration_vma;
 
 	if (flags & TTU_RMAP_LOCKED)
@@ -1842,7 +1842,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
 
 void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
 {
-	if (unlikely(PageKsm(page)))
+	if (unlikely(PageReadOnly(page)))
 		rmap_walk_ksm(page, rwc);
 	else if (PageAnon(page))
 		rmap_walk_anon(page, rwc, false);
@@ -1854,7 +1854,7 @@ void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
 {
 	/* no ksm support for now */
-	VM_BUG_ON_PAGE(PageKsm(page), page);
+	VM_BUG_ON_PAGE(PageReadOnly(page), page);
 	if (PageAnon(page))
 		rmap_walk_anon(page, rwc, true);
 	else
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c429c19e5d5d..83c73cca9e21 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1552,7 +1552,7 @@ bool reuse_swap_page(struct page *page, int *total_map_swapcount)
 	int count, total_mapcount, total_swapcount;
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
-	if (unlikely(PageKsm(page)))
+	if (unlikely(PageReadOnly(page)))
 		return false;
 	count = page_trans_huge_map_swapcount(page, &total_mapcount,
 					      &total_swapcount);
-- 
2.14.3

WARNING: multiple messages have this Message-ID (diff)
From: jglisse@redhat.com
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-block@vger.kernel.org
Cc: linux-kernel@vger.kernel.org,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Andrea Arcangeli" <aarcange@redhat.com>
Subject: [RFC PATCH 78/79] mm/ksm: rename PAGE_MAPPING_KSM to PAGE_MAPPING_RONLY
Date: Wed,  4 Apr 2018 15:18:30 -0400	[thread overview]
Message-ID: <20180404191831.5378-41-jglisse@redhat.com> (raw)
In-Reply-To: <20180404191831.5378-1-jglisse@redhat.com>

From: JA(C)rA'me Glisse <jglisse@redhat.com>

This just rename all KSM specific helper to generic page read only
name. No functional change.

Signed-off-by: JA(C)rA'me Glisse <jglisse@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
---
 fs/proc/page.c             |  2 +-
 include/linux/page-flags.h | 30 +++++++++++++++++-------------
 mm/ksm.c                   | 12 ++++++------
 mm/memory-failure.c        |  2 +-
 mm/memory.c                |  2 +-
 mm/migrate.c               |  6 +++---
 mm/mprotect.c              |  2 +-
 mm/page_idle.c             |  2 +-
 mm/rmap.c                  | 10 +++++-----
 mm/swapfile.c              |  2 +-
 10 files changed, 37 insertions(+), 33 deletions(-)

diff --git a/fs/proc/page.c b/fs/proc/page.c
index 1491918a33c3..00cc037758ef 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -110,7 +110,7 @@ u64 stable_page_flags(struct page *page)
 		u |= 1 << KPF_MMAP;
 	if (PageAnon(page))
 		u |= 1 << KPF_ANON;
-	if (PageKsm(page))
+	if (PageReadOnly(page))
 		u |= 1 << KPF_KSM;
 
 	/*
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 50c2b8786831..0338fb5dde8d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -374,12 +374,12 @@ PAGEFLAG(Idle, idle, PF_ANY)
  * page->mapping points to its anon_vma, not to a struct address_space;
  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
  *
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_RONLY is enabled,
  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
  * bit; and then page->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page.  See ksm.h.
+ * structure which RONLY associates with that merged page.  See page-ronly.h.
  *
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
+ * PAGE_MAPPING_RONLY without PAGE_MAPPING_ANON is used for non-lru movable
  * page and then page->mapping points a struct address_space.
  *
  * Please note that, confusingly, "page_mapping" refers to the inode
@@ -388,7 +388,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
  */
 #define PAGE_MAPPING_ANON	0x1
 #define PAGE_MAPPING_MOVABLE	0x2
-#define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define PAGE_MAPPING_RONLY	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 
 static __always_inline int PageMappingFlags(struct page *page)
@@ -408,21 +408,25 @@ static __always_inline int __PageMovable(struct page *page)
 				PAGE_MAPPING_MOVABLE;
 }
 
-#ifdef CONFIG_KSM
-/*
- * A KSM page is one of those write-protected "shared pages" or "merged pages"
- * which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
- * anon_vma, but to that page's node of the stable tree.
+#ifdef CONFIG_PAGE_RONLY
+/* PageReadOnly() - Returns true if page is read only, false otherwise.
+ *
+ * @page: Page under test.
+ *
+ * A read only page is one of those write-protected. Currently only KSM does
+ * write protect a page as "shared pages" or "merged pages"  which KSM maps
+ * into multiple mms, wherever identical anonymous page content is found in
+ * VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any anon_vma,
+ * but to that page's node of the stable tree.
  */
-static __always_inline int PageKsm(struct page *page)
+static __always_inline int PageReadOnly(struct page *page)
 {
 	page = compound_head(page);
 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
-				PAGE_MAPPING_KSM;
+				PAGE_MAPPING_RONLY;
 }
 #else
-TESTPAGEFLAG_FALSE(Ksm)
+TESTPAGEFLAG_FALSE(ReadOnly)
 #endif
 
 u64 stable_page_flags(struct page *page);
diff --git a/mm/ksm.c b/mm/ksm.c
index f9bd1251c288..6085068fb8b3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -318,13 +318,13 @@ static void __init ksm_slab_free(void)
 
 static inline struct stable_node *page_stable_node(struct page *page)
 {
-	return PageKsm(page) ? page_rmapping(page) : NULL;
+	return PageReadOnly(page) ? page_rmapping(page) : NULL;
 }
 
 static inline void set_page_stable_node(struct page *page,
 					struct stable_node *stable_node)
 {
-	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_RONLY);
 }
 
 static __always_inline bool is_stable_node_chain(struct stable_node *chain)
@@ -470,7 +470,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
 				FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
 		if (IS_ERR_OR_NULL(page))
 			break;
-		if (PageKsm(page))
+		if (PageReadOnly(page))
 			ret = handle_mm_fault(vma, addr,
 					FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
 		else
@@ -684,7 +684,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
 	unsigned long kpfn;
 
 	expected_mapping = (void *)((unsigned long)stable_node |
-					PAGE_MAPPING_KSM);
+					PAGE_MAPPING_RONLY);
 again:
 	kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
 	page = pfn_to_page(kpfn);
@@ -2490,7 +2490,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 	struct anon_vma *anon_vma = page_anon_vma(page);
 	struct page *new_page;
 
-	if (PageKsm(page)) {
+	if (PageReadOnly(page)) {
 		if (page_stable_node(page) &&
 		    !(ksm_run & KSM_RUN_UNMERGE))
 			return page;	/* no need to copy it */
@@ -2521,7 +2521,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
 	struct rmap_item *rmap_item;
 	int search_new_forks = 0;
 
-	VM_BUG_ON_PAGE(!PageKsm(page), page);
+	VM_BUG_ON_PAGE(!PageReadOnly(page), page);
 
 	/*
 	 * Rely on the page lock to protect against concurrent modifications
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8291b75f42c8..18efefc20e67 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -947,7 +947,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	if (!page_mapped(hpage))
 		return true;
 
-	if (PageKsm(p)) {
+	if (PageReadOnly(p)) {
 		pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
 		return false;
 	}
diff --git a/mm/memory.c b/mm/memory.c
index fbd80bb7a50a..b565db41400f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2733,7 +2733,7 @@ static int do_wp_page(struct vm_fault *vmf)
 	 * Take out anonymous pages first, anonymous shared vmas are
 	 * not dirty accountable.
 	 */
-	if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
+	if (PageAnon(vmf->page) && !PageReadOnly(vmf->page)) {
 		int total_map_swapcount;
 		if (!trylock_page(vmf->page)) {
 			get_page(vmf->page);
diff --git a/mm/migrate.c b/mm/migrate.c
index e4b20ac6cf36..b73b31f6d2fd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -214,7 +214,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 
 	VM_BUG_ON_PAGE(PageTail(page), page);
 	while (page_vma_mapped_walk(&pvmw)) {
-		if (PageKsm(page))
+		if (PageReadOnly(page))
 			new = page;
 		else
 			new = page - pvmw.page->index +
@@ -1038,7 +1038,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 	 * because that implies that the anon page is no longer mapped
 	 * (and cannot be remapped so long as we hold the page lock).
 	 */
-	if (PageAnon(page) && !PageKsm(page))
+	if (PageAnon(page) && !PageReadOnly(page))
 		anon_vma = page_get_anon_vma(page);
 
 	/*
@@ -1077,7 +1077,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 		}
 	} else if (page_mapped(page)) {
 		/* Establish migration ptes */
-		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
+		VM_BUG_ON_PAGE(PageAnon(page) && !PageReadOnly(page) && !anon_vma,
 				page);
 		try_to_unmap(page,
 			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e3309fcf586b..ab2f2e4961d8 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -81,7 +81,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 				struct page *page;
 
 				page = vm_normal_page(vma, addr, oldpte);
-				if (!page || PageKsm(page))
+				if (!page || PageReadOnly(page))
 					continue;
 
 				/* Also skip shared copy-on-write pages */
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 0a49374e6931..7e5258e4d2ad 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -104,7 +104,7 @@ static void page_idle_clear_pte_refs(struct page *page)
 	    !page_rmapping(page))
 		return;
 
-	need_lock = !PageAnon(page) || PageKsm(page);
+	need_lock = !PageAnon(page) || PageReadOnly(page);
 	if (need_lock && !trylock_page(page))
 		return;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index 822a3a0cd51c..70d37f77e7a4 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -855,7 +855,7 @@ int page_referenced(struct page *page,
 	if (!page_rmapping(page))
 		return 0;
 
-	if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
+	if (!is_locked && (!PageAnon(page) || PageReadOnly(page))) {
 		we_locked = trylock_page(page);
 		if (!we_locked)
 			return 1;
@@ -1122,7 +1122,7 @@ void do_page_add_anon_rmap(struct page *page,
 			__inc_node_page_state(page, NR_ANON_THPS);
 		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
 	}
-	if (unlikely(PageKsm(page)))
+	if (unlikely(PageReadOnly(page)))
 		return;
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -1660,7 +1660,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
 	 * temporary VMAs until after exec() completes.
 	 */
 	if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
-	    && !PageKsm(page) && PageAnon(page))
+	    && !PageReadOnly(page) && PageAnon(page))
 		rwc.invalid_vma = invalid_migration_vma;
 
 	if (flags & TTU_RMAP_LOCKED)
@@ -1842,7 +1842,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
 
 void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
 {
-	if (unlikely(PageKsm(page)))
+	if (unlikely(PageReadOnly(page)))
 		rmap_walk_ksm(page, rwc);
 	else if (PageAnon(page))
 		rmap_walk_anon(page, rwc, false);
@@ -1854,7 +1854,7 @@ void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
 {
 	/* no ksm support for now */
-	VM_BUG_ON_PAGE(PageKsm(page), page);
+	VM_BUG_ON_PAGE(PageReadOnly(page), page);
 	if (PageAnon(page))
 		rmap_walk_anon(page, rwc, true);
 	else
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c429c19e5d5d..83c73cca9e21 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1552,7 +1552,7 @@ bool reuse_swap_page(struct page *page, int *total_map_swapcount)
 	int count, total_mapcount, total_swapcount;
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
-	if (unlikely(PageKsm(page)))
+	if (unlikely(PageReadOnly(page)))
 		return false;
 	count = page_trans_huge_map_swapcount(page, &total_mapcount,
 					      &total_swapcount);
-- 
2.14.3

  parent reply	other threads:[~2018-04-04 19:18 UTC|newest]

Thread overview: 104+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-04 19:17 [RFC PATCH 00/79] Generic page write protection and a solution to page waitqueue jglisse
2018-04-04 19:17 ` jglisse
2018-04-04 19:17 ` [RFC PATCH 04/79] pipe: add inode field to struct pipe_inode_info jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:17 ` [RFC PATCH 05/79] mm/swap: add an helper to get address_space from swap_entry_t jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:17 ` [RFC PATCH 06/79] mm/page: add helpers to dereference struct page index field jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:17 ` [RFC PATCH 07/79] mm/page: add helpers to find mapping give a page and buffer head jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:17 ` [RFC PATCH 08/79] mm/page: add helpers to find page mapping and private given a bio jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:17 ` [RFC PATCH 09/79] fs: add struct address_space to read_cache_page() callback argument jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:17 ` [RFC PATCH 20/79] fs: add struct address_space to write_cache_pages() " jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:17 ` [RFC PATCH 22/79] fs: add struct inode to block_read_full_page() arguments jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:17 ` [RFC PATCH 24/79] fs: add struct inode to nobh_writepage() arguments jglisse
2018-04-04 19:17   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 26/79] fs: add struct address_space to mpage_readpage() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 27/79] fs: add struct address_space to fscache_read*() callback arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 28/79] fs: introduce page_is_truncated() helper jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 29/79] fs/block: add struct address_space to bdev_write_page() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 30/79] fs/block: add struct address_space to __block_write_begin() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 31/79] fs/block: add struct address_space to __block_write_begin_int() args jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 32/79] fs/block: do not rely on page->mapping get it from the context jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 33/79] fs/journal: add struct super_block to jbd2_journal_forget() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 34/79] fs/journal: add struct inode to jbd2_journal_revoke() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 35/79] fs/buffer: add struct address_space and struct page to end_io callback jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 36/79] fs/buffer: add struct super_block to bforget() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 37/79] fs/buffer: add struct super_block to __bforget() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 38/79] fs/buffer: add first buffer flag for first buffer_head in a page jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 39/79] fs/buffer: add struct address_space to clean_page_buffers() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 50/79] fs: stop relying on mapping field of struct page, get it from context jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 51/79] " jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 52/79] fs/buffer: use _page_has_buffers() instead of page_has_buffers() jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 63/79] mm/page: convert page's index lookup to be against specific mapping jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 64/79] mm/buffer: use _page_has_buffers() instead of page_has_buffers() jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 65/79] mm/swap: add struct swap_info_struct swap_readpage() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 68/79] mm/vma_address: convert page's index lookup to be against specific mapping jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 69/79] fs/journal: add struct address_space to jbd2_journal_try_to_free_buffers() arguments jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 70/79] mm: add struct address_space to mark_buffer_dirty() jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 71/79] mm: add struct address_space to set_page_dirty() jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 72/79] mm: add struct address_space to set_page_dirty_lock() jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 73/79] mm: pass down struct address_space to set_page_dirty() jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 74/79] mm/page_ronly: add config option for generic read only page framework jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 75/79] mm/page_ronly: add page read only core structure and helpers jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 76/79] mm/ksm: have ksm select PAGE_RONLY config jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` [RFC PATCH 77/79] mm/ksm: hide set_page_stable_node() and page_stable_node() jglisse
2018-04-04 19:18   ` jglisse
2018-04-04 19:18 ` jglisse [this message]
2018-04-04 19:18   ` [RFC PATCH 78/79] mm/ksm: rename PAGE_MAPPING_KSM to PAGE_MAPPING_RONLY jglisse
2018-04-04 19:18 ` [RFC PATCH 79/79] mm/ksm: set page->mapping to page_ronly struct instead of stable_node jglisse
2018-04-04 19:18   ` jglisse
2018-04-18 14:13 ` [RFC PATCH 00/79] Generic page write protection and a solution to page waitqueue Jan Kara
2018-04-18 14:13   ` Jan Kara
2018-04-18 14:13   ` Jan Kara
2018-04-18 15:54   ` Jerome Glisse
2018-04-18 15:54     ` Jerome Glisse
2018-04-18 15:54     ` Jerome Glisse
2018-04-18 16:20     ` Darrick J. Wong
2018-04-18 16:20       ` Darrick J. Wong
2018-04-18 16:20       ` Darrick J. Wong
2018-04-19 10:32     ` Jan Kara
2018-04-19 14:52       ` Jerome Glisse
2018-04-19 14:52         ` Jerome Glisse
2018-04-19 14:52         ` Jerome Glisse
2018-04-20 19:57 ` Tim Chen
2018-04-20 19:57   ` Tim Chen
2018-04-20 22:19   ` Jerome Glisse
2018-04-20 22:19     ` Jerome Glisse
2018-04-20 22:19     ` Jerome Glisse
2018-04-20 23:48     ` Tim Chen
2018-04-20 23:48       ` Tim Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180404191831.5378-41-jglisse@redhat.com \
    --to=jglisse@redhat.com \
    --cc=aarcange@redhat.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.