linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] Improve page poisoning implementation
@ 2020-04-08 15:01 Matthew Wilcox
  2020-04-08 15:01 ` [PATCH 1/5] mm: Constify a lot of struct page arguments Matthew Wilcox
                   ` (5 more replies)
  0 siblings, 6 replies; 25+ messages in thread
From: Matthew Wilcox @ 2020-04-08 15:01 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle), kirill.shutemov, pasha.tatashin

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

I really don't like that this feature is called page poisoning.
We already had something called page poisoning and it's when you detect
a memory error in a page.  This is just uninitialised pages.  I don't
have the time right now to do that renaming, so just fix some of the
other problems I found with the implementation.

This hasn't been checked out by the build-bots yet, so there may be some
missing conversions.

Matthew Wilcox (Oracle) (5):
  mm: Constify a lot of struct page arguments
  mm: Rename PF_POISONED_PAGE to page_poison_check
  mm: Remove casting away of constness
  mm: Check for page poison in both page_to_nid implementations
  mm: Check page poison before finding a head page

 include/linux/mm.h              | 32 +++++++-------
 include/linux/mm_types.h        | 11 +----
 include/linux/mmdebug.h         |  4 +-
 include/linux/page-flags.h      | 74 ++++++++++++++++-----------------
 include/linux/page_owner.h      |  6 +--
 include/linux/page_ref.h        |  4 +-
 include/linux/pageblock-flags.h |  2 +-
 include/linux/pagemap.h         |  4 +-
 include/linux/swap.h            |  2 +-
 mm/debug.c                      |  6 +--
 mm/hugetlb.c                    |  6 +--
 mm/page_alloc.c                 | 12 +++---
 mm/page_owner.c                 |  2 +-
 mm/sparse.c                     |  1 +
 mm/swapfile.c                   |  6 +--
 mm/util.c                       |  6 +--
 16 files changed, 85 insertions(+), 93 deletions(-)

-- 
2.25.1



^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-08 15:01 [PATCH 0/5] Improve page poisoning implementation Matthew Wilcox
@ 2020-04-08 15:01 ` Matthew Wilcox
  2020-04-08 15:14   ` Pavel Tatashin
  2020-04-09 14:09   ` Kirill A. Shutemov
  2020-04-08 15:01 ` [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check Matthew Wilcox
                   ` (4 subsequent siblings)
  5 siblings, 2 replies; 25+ messages in thread
From: Matthew Wilcox @ 2020-04-08 15:01 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle), kirill.shutemov, pasha.tatashin

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

For an upcoming patch, we want to be able to pass a const struct page
to dump_page().  That means some inline functions have to become macros
so they can return a struct page which is the same const-ness as their
argument.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/mm.h              | 28 +++++++++++-----------
 include/linux/mm_types.h        | 11 ++-------
 include/linux/mmdebug.h         |  4 ++--
 include/linux/page-flags.h      | 41 +++++++++++++++------------------
 include/linux/page_owner.h      |  6 ++---
 include/linux/page_ref.h        |  4 ++--
 include/linux/pageblock-flags.h |  2 +-
 include/linux/pagemap.h         |  4 ++--
 include/linux/swap.h            |  2 +-
 mm/debug.c                      |  6 ++---
 mm/hugetlb.c                    |  6 ++---
 mm/page_alloc.c                 | 12 +++++-----
 mm/page_owner.c                 |  2 +-
 mm/swapfile.c                   |  6 ++---
 mm/util.c                       |  6 ++---
 15 files changed, 65 insertions(+), 75 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index e2f938c5a9d8..61aa63449e7e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -764,7 +764,7 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
 
 extern void kvfree(const void *addr);
 
-static inline int compound_mapcount(struct page *page)
+static inline int compound_mapcount(const struct page *page)
 {
 	VM_BUG_ON_PAGE(!PageCompound(page), page);
 	page = compound_head(page);
@@ -781,9 +781,9 @@ static inline void page_mapcount_reset(struct page *page)
 	atomic_set(&(page)->_mapcount, -1);
 }
 
-int __page_mapcount(struct page *page);
+int __page_mapcount(const struct page *page);
 
-static inline int page_mapcount(struct page *page)
+static inline int page_mapcount(const struct page *page)
 {
 	VM_BUG_ON_PAGE(PageSlab(page), page);
 
@@ -857,14 +857,14 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
 	return compound_page_dtors[page[1].compound_dtor];
 }
 
-static inline unsigned int compound_order(struct page *page)
+static inline unsigned int compound_order(const struct page *page)
 {
 	if (!PageHead(page))
 		return 0;
 	return page[1].compound_order;
 }
 
-static inline bool hpage_pincount_available(struct page *page)
+static inline bool hpage_pincount_available(const struct page *page)
 {
 	/*
 	 * Can the page->hpage_pinned_refcount field be used? That field is in
@@ -875,7 +875,7 @@ static inline bool hpage_pincount_available(struct page *page)
 	return PageCompound(page) && compound_order(page) > 1;
 }
 
-static inline int compound_pincount(struct page *page)
+static inline int compound_pincount(const struct page *page)
 {
 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
 	page = compound_head(page);
@@ -1495,12 +1495,12 @@ void page_address_init(void);
 
 extern void *page_rmapping(struct page *page);
 extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
+extern struct address_space *page_mapping(const struct page *page);
 
-extern struct address_space *__page_file_mapping(struct page *);
+extern struct address_space *__page_file_mapping(const struct page *);
 
 static inline
-struct address_space *page_file_mapping(struct page *page)
+struct address_space *page_file_mapping(const struct page *page)
 {
 	if (unlikely(PageSwapCache(page)))
 		return __page_file_mapping(page);
@@ -1508,13 +1508,13 @@ struct address_space *page_file_mapping(struct page *page)
 	return page->mapping;
 }
 
-extern pgoff_t __page_file_index(struct page *page);
+extern pgoff_t __page_file_index(const struct page *page);
 
 /*
  * Return the pagecache index of the passed page.  Regular pagecache pages
  * use ->index whereas swapcache pages use swp_offset(->private)
  */
-static inline pgoff_t page_index(struct page *page)
+static inline pgoff_t page_index(const struct page *page)
 {
 	if (unlikely(PageSwapCache(page)))
 		return __page_file_index(page);
@@ -1522,15 +1522,15 @@ static inline pgoff_t page_index(struct page *page)
 }
 
 bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
-struct address_space *page_mapping_file(struct page *page);
+struct address_space *page_mapping(const struct page *page);
+struct address_space *page_mapping_file(const struct page *page);
 
 /*
  * Return true only if the page has been allocated with
  * ALLOC_NO_WATERMARKS and the low watermark was not
  * met implying that the system is under some pressure.
  */
-static inline bool page_is_pfmemalloc(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
 {
 	/*
 	 * Page index cannot be this large so this must be
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4aba6c0c2ba8..a8c3fa076f43 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -221,15 +221,8 @@ struct page {
 #endif
 } _struct_page_alignment;
 
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
-	return &page[1].compound_mapcount;
-}
-
-static inline atomic_t *compound_pincount_ptr(struct page *page)
-{
-	return &page[2].hpage_pinned_refcount;
-}
+#define compound_mapcount_ptr(page)	(&(page)[1].compound_mapcount)
+#define compound_pincount_ptr(page)	(&(page)[2].hpage_pinned_refcount)
 
 /*
  * Used for sizing the vmemmap region on some architectures
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 2ad72d2c8cc5..71246df469a0 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,8 +9,8 @@ struct page;
 struct vm_area_struct;
 struct mm_struct;
 
-extern void dump_page(struct page *page, const char *reason);
-extern void __dump_page(struct page *page, const char *reason);
+extern void dump_page(const struct page *page, const char *reason);
+extern void __dump_page(const struct page *page, const char *reason);
 void dump_vma(const struct vm_area_struct *vma);
 void dump_mm(const struct mm_struct *mm);
 
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 222f6f7b2bb3..f1ab1f2e6aba 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -175,23 +175,20 @@ enum pageflags {
 
 #ifndef __GENERATING_BOUNDS_H
 
-struct page;	/* forward declaration */
+#define compound_head(page) ({						\
+	__typeof__(page) _page = page;					\
+	unsigned long head = READ_ONCE(_page->compound_head);		\
+	if (unlikely(head & 1))						\
+		_page = (void *)(head - 1);				\
+	_page;								\
+})
 
-static inline struct page *compound_head(struct page *page)
-{
-	unsigned long head = READ_ONCE(page->compound_head);
-
-	if (unlikely(head & 1))
-		return (struct page *) (head - 1);
-	return page;
-}
-
-static __always_inline int PageTail(struct page *page)
+static __always_inline int PageTail(const struct page *page)
 {
 	return READ_ONCE(page->compound_head) & 1;
 }
 
-static __always_inline int PageCompound(struct page *page)
+static __always_inline int PageCompound(const struct page *page)
 {
 	return test_bit(PG_head, &page->flags) || PageTail(page);
 }
@@ -252,7 +249,7 @@ static inline void page_init_poison(struct page *page, size_t size)
  * Macros to create function definitions for page flags
  */
 #define TESTPAGEFLAG(uname, lname, policy)				\
-static __always_inline int Page##uname(struct page *page)		\
+static __always_inline int Page##uname(const struct page *page)		\
 	{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
 
 #define SETPAGEFLAG(uname, lname, policy)				\
@@ -385,7 +382,7 @@ PAGEFLAG_FALSE(HighMem)
 #endif
 
 #ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline int PageSwapCache(const struct page *page)
 {
 #ifdef CONFIG_THP_SWAP
 	page = compound_head(page);
@@ -474,7 +471,7 @@ static __always_inline int PageMappingFlags(struct page *page)
 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
 }
 
-static __always_inline int PageAnon(struct page *page)
+static __always_inline int PageAnon(const struct page *page)
 {
 	page = compound_head(page);
 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -493,7 +490,7 @@ static __always_inline int __PageMovable(struct page *page)
  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
  * anon_vma, but to that page's node of the stable tree.
  */
-static __always_inline int PageKsm(struct page *page)
+static __always_inline int PageKsm(const struct page *page)
 {
 	page = compound_head(page);
 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
@@ -586,14 +583,14 @@ static inline void ClearPageCompound(struct page *page)
 #define PG_head_mask ((1UL << PG_head))
 
 #ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(struct page *page);
-int PageHeadHuge(struct page *page);
-bool page_huge_active(struct page *page);
+int PageHuge(const struct page *page);
+int PageHeadHuge(const struct page *page);
+bool page_huge_active(const struct page *page);
 #else
 TESTPAGEFLAG_FALSE(Huge)
 TESTPAGEFLAG_FALSE(HeadHuge)
 
-static inline bool page_huge_active(struct page *page)
+static inline bool page_huge_active(const struct page *page)
 {
 	return 0;
 }
@@ -667,7 +664,7 @@ static inline int PageTransCompoundMap(struct page *page)
  * and hugetlbfs pages, so it should only be called when it's known
  * that hugetlbfs pages aren't involved.
  */
-static inline int PageTransTail(struct page *page)
+static inline int PageTransTail(const struct page *page)
 {
 	return PageTail(page);
 }
@@ -685,7 +682,7 @@ static inline int PageTransTail(struct page *page)
  *
  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
  */
-static inline int PageDoubleMap(struct page *page)
+static inline int PageDoubleMap(const struct page *page)
 {
 	return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
 }
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 8679ccd722e8..16d4885e2f7a 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page,
 extern void __split_page_owner(struct page *page, unsigned int order);
 extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
-extern void __dump_page_owner(struct page *page);
+extern void __dump_page_owner(const struct page *page);
 extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 					pg_data_t *pgdat, struct zone *zone);
 
@@ -46,7 +46,7 @@ static inline void set_page_owner_migrate_reason(struct page *page, int reason)
 	if (static_branch_unlikely(&page_owner_inited))
 		__set_page_owner_migrate_reason(page, reason);
 }
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
 {
 	if (static_branch_unlikely(&page_owner_inited))
 		__dump_page_owner(page);
@@ -69,7 +69,7 @@ static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
 static inline void set_page_owner_migrate_reason(struct page *page, int reason)
 {
 }
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
 {
 }
 #endif /* CONFIG_PAGE_OWNER */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index d27701199a4d..f2c4872f50f0 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -62,12 +62,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
 
 #endif
 
-static inline int page_ref_count(struct page *page)
+static inline int page_ref_count(const struct page *page)
 {
 	return atomic_read(&page->_refcount);
 }
 
-static inline int page_count(struct page *page)
+static inline int page_count(const struct page *page)
 {
 	return atomic_read(&compound_head(page)->_refcount);
 }
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index c066fec5b74b..aa13dca7bf04 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -54,7 +54,7 @@ extern unsigned int pageblock_order;
 /* Forward declaration */
 struct page;
 
-unsigned long get_pfnblock_flags_mask(struct page *page,
+unsigned long get_pfnblock_flags_mask(const struct page *page,
 				unsigned long pfn,
 				unsigned long end_bitidx,
 				unsigned long mask);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a8f7bd8ea1c6..ca1aaa8ce813 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -401,7 +401,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
  * Get index of the page with in radix-tree
  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
  */
-static inline pgoff_t page_to_index(struct page *page)
+static inline pgoff_t page_to_index(const struct page *page)
 {
 	pgoff_t pgoff;
 
@@ -421,7 +421,7 @@ static inline pgoff_t page_to_index(struct page *page)
  * Get the offset in PAGE_SIZE.
  * (TODO: hugepage should have ->index in PAGE_SIZE)
  */
-static inline pgoff_t page_to_pgoff(struct page *page)
+static inline pgoff_t page_to_pgoff(const struct page *page)
 {
 	if (unlikely(PageHeadHuge(page)))
 		return page->index << compound_order(page);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b835d8dbea0e..6e602ce0eb0c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -464,7 +464,7 @@ extern int page_swapcount(struct page *);
 extern int __swap_count(swp_entry_t entry);
 extern int __swp_swapcount(swp_entry_t entry);
 extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
+extern struct swap_info_struct *page_swap_info(const struct page *);
 extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
 extern bool reuse_swap_page(struct page *, int *);
 extern int try_to_free_swap(struct page *);
diff --git a/mm/debug.c b/mm/debug.c
index 2189357f0987..69862d3b04e5 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -42,9 +42,9 @@ const struct trace_print_flags vmaflag_names[] = {
 	{0, NULL}
 };
 
-void __dump_page(struct page *page, const char *reason)
+void __dump_page(const struct page *page, const char *reason)
 {
-	struct page *head = compound_head(page);
+	const struct page *head = compound_head(page);
 	struct address_space *mapping;
 	bool page_poisoned = PagePoisoned(page);
 	bool compound = PageCompound(page);
@@ -140,7 +140,7 @@ void __dump_page(struct page *page, const char *reason)
 #endif
 }
 
-void dump_page(struct page *page, const char *reason)
+void dump_page(const struct page *page, const char *reason)
 {
 	__dump_page(page, reason);
 	dump_page_owner(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f5fb53fdfa02..0131974369cb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1305,7 +1305,7 @@ struct hstate *size_to_hstate(unsigned long size)
  *
  * This function can be called for tail pages, but never returns true for them.
  */
-bool page_huge_active(struct page *page)
+bool page_huge_active(const struct page *page)
 {
 	VM_BUG_ON_PAGE(!PageHuge(page), page);
 	return PageHead(page) && PagePrivate(&page[1]);
@@ -1509,7 +1509,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
  * transparent huge pages.  See the PageTransHuge() documentation for more
  * details.
  */
-int PageHuge(struct page *page)
+int PageHuge(const struct page *page)
 {
 	if (!PageCompound(page))
 		return 0;
@@ -1523,7 +1523,7 @@ EXPORT_SYMBOL_GPL(PageHuge);
  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
  * normal or transparent huge pages.
  */
-int PageHeadHuge(struct page *page_head)
+int PageHeadHuge(const struct page *page_head)
 {
 	if (!PageHead(page_head))
 		return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 114c56c3685d..9634c6e44197 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -446,7 +446,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 #endif
 
 /* Return a pointer to the bitmap storing bits affecting a block of pages */
-static inline unsigned long *get_pageblock_bitmap(struct page *page,
+static inline unsigned long *get_pageblock_bitmap(const struct page *page,
 							unsigned long pfn)
 {
 #ifdef CONFIG_SPARSEMEM
@@ -456,7 +456,7 @@ static inline unsigned long *get_pageblock_bitmap(struct page *page,
 #endif /* CONFIG_SPARSEMEM */
 }
 
-static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
+static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
 {
 #ifdef CONFIG_SPARSEMEM
 	pfn &= (PAGES_PER_SECTION-1);
@@ -476,7 +476,8 @@ static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
  *
  * Return: pageblock_bits flags
  */
-static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
+static __always_inline
+unsigned long __get_pfnblock_flags_mask(const struct page *page,
 					unsigned long pfn,
 					unsigned long end_bitidx,
 					unsigned long mask)
@@ -495,9 +496,8 @@ static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page
 	return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
 }
 
-unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
-					unsigned long end_bitidx,
-					unsigned long mask)
+unsigned long get_pfnblock_flags_mask(const struct page *page,
+		unsigned long pfn, unsigned long end_bitidx, unsigned long mask)
 {
 	return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
 }
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 18ecde9f45b2..a22afbb95c46 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -399,7 +399,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
 	return -ENOMEM;
 }
 
-void __dump_page_owner(struct page *page)
+void __dump_page_owner(const struct page *page)
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
 	struct page_owner *page_owner;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5871a2aa86a5..9d9e01f1716c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3489,7 +3489,7 @@ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 	return swap_type_to_swap_info(swp_type(entry));
 }
 
-struct swap_info_struct *page_swap_info(struct page *page)
+struct swap_info_struct *page_swap_info(const struct page *page)
 {
 	swp_entry_t entry = { .val = page_private(page) };
 	return swp_swap_info(entry);
@@ -3498,13 +3498,13 @@ struct swap_info_struct *page_swap_info(struct page *page)
 /*
  * out-of-line __page_file_ methods to avoid include hell.
  */
-struct address_space *__page_file_mapping(struct page *page)
+struct address_space *__page_file_mapping(const struct page *page)
 {
 	return page_swap_info(page)->swap_file->f_mapping;
 }
 EXPORT_SYMBOL_GPL(__page_file_mapping);
 
-pgoff_t __page_file_index(struct page *page)
+pgoff_t __page_file_index(const struct page *page)
 {
 	swp_entry_t swap = { .val = page_private(page) };
 	return swp_offset(swap);
diff --git a/mm/util.c b/mm/util.c
index 988d11e6c17c..05f5b36f81f9 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -655,7 +655,7 @@ struct anon_vma *page_anon_vma(struct page *page)
 	return __page_rmapping(page);
 }
 
-struct address_space *page_mapping(struct page *page)
+struct address_space *page_mapping(const struct page *page)
 {
 	struct address_space *mapping;
 
@@ -683,7 +683,7 @@ EXPORT_SYMBOL(page_mapping);
 /*
  * For file cache pages, return the address_space, otherwise return NULL
  */
-struct address_space *page_mapping_file(struct page *page)
+struct address_space *page_mapping_file(const struct page *page)
 {
 	if (unlikely(PageSwapCache(page)))
 		return NULL;
@@ -691,7 +691,7 @@ struct address_space *page_mapping_file(struct page *page)
 }
 
 /* Slow path of page_mapcount() for compound pages */
-int __page_mapcount(struct page *page)
+int __page_mapcount(const struct page *page)
 {
 	int ret;
 
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check
  2020-04-08 15:01 [PATCH 0/5] Improve page poisoning implementation Matthew Wilcox
  2020-04-08 15:01 ` [PATCH 1/5] mm: Constify a lot of struct page arguments Matthew Wilcox
@ 2020-04-08 15:01 ` Matthew Wilcox
  2020-04-08 15:21   ` Pavel Tatashin
  2020-04-09 14:14   ` Kirill A. Shutemov
  2020-04-08 15:01 ` [PATCH 3/5] mm: Remove casting away of constness Matthew Wilcox
                   ` (3 subsequent siblings)
  5 siblings, 2 replies; 25+ messages in thread
From: Matthew Wilcox @ 2020-04-08 15:01 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle), kirill.shutemov, pasha.tatashin

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

The PF_POISONED_PAGE name is misleading because it's not a page flag
policy.  Switch from VM_BUG_ON_PGFLAGS to VM_BUG_ON_PAGE.  Move the
implementation further up in the file for the benefit of future patches.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/mm.h         |  2 +-
 include/linux/page-flags.h | 34 +++++++++++++++++-----------------
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 61aa63449e7e..933450bdcfd4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1248,7 +1248,7 @@ static inline int page_to_nid(const struct page *page)
 {
 	struct page *p = (struct page *)page;
 
-	return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
+	return (page_poison_check(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
 }
 #endif
 
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f1ab1f2e6aba..331aef35f3e0 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -175,6 +175,18 @@ enum pageflags {
 
 #ifndef __GENERATING_BOUNDS_H
 
+#define	PAGE_POISON_PATTERN	-1l
+static inline int PagePoisoned(const struct page *page)
+{
+	return page->flags == PAGE_POISON_PATTERN;
+}
+
+#define page_poison_check(page) ({					\
+	__typeof__(page) ___page = page;				\
+	VM_BUG_ON_PAGE(PagePoisoned(___page), ___page);			\
+	___page;							\
+})
+
 #define compound_head(page) ({						\
 	__typeof__(page) _page = page;					\
 	unsigned long head = READ_ONCE(_page->compound_head);		\
@@ -193,12 +205,6 @@ static __always_inline int PageCompound(const struct page *page)
 	return test_bit(PG_head, &page->flags) || PageTail(page);
 }
 
-#define	PAGE_POISON_PATTERN	-1l
-static inline int PagePoisoned(const struct page *page)
-{
-	return page->flags == PAGE_POISON_PATTERN;
-}
-
 #ifdef CONFIG_DEBUG_VM
 void page_init_poison(struct page *page, size_t size);
 #else
@@ -210,9 +216,6 @@ static inline void page_init_poison(struct page *page, size_t size)
 /*
  * Page flags policies wrt compound pages
  *
- * PF_POISONED_CHECK
- *     check if this struct page poisoned/uninitialized
- *
  * PF_ANY:
  *     the page flag is relevant for small, head and tail pages.
  *
@@ -230,20 +233,17 @@ static inline void page_init_poison(struct page *page, size_t size)
  * PF_NO_COMPOUND:
  *     the page flag is not relevant for compound pages.
  */
-#define PF_POISONED_CHECK(page) ({					\
-		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
-		page; })
-#define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
-#define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
+#define PF_ANY(page, enforce)	page_poison_check(page)
+#define PF_HEAD(page, enforce)	page_poison_check(compound_head(page))
 #define PF_ONLY_HEAD(page, enforce) ({					\
 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
-		PF_POISONED_CHECK(page); })
+		page_poison_check(page); })
 #define PF_NO_TAIL(page, enforce) ({					\
 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
-		PF_POISONED_CHECK(compound_head(page)); })
+		page_poison_check(compound_head(page)); })
 #define PF_NO_COMPOUND(page, enforce) ({				\
 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
-		PF_POISONED_CHECK(page); })
+		page_poison_check(page); })
 
 /*
  * Macros to create function definitions for page flags
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 3/5] mm: Remove casting away of constness
  2020-04-08 15:01 [PATCH 0/5] Improve page poisoning implementation Matthew Wilcox
  2020-04-08 15:01 ` [PATCH 1/5] mm: Constify a lot of struct page arguments Matthew Wilcox
  2020-04-08 15:01 ` [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check Matthew Wilcox
@ 2020-04-08 15:01 ` Matthew Wilcox
  2020-04-08 15:23   ` Pavel Tatashin
  2020-04-09 14:19   ` Kirill A. Shutemov
  2020-04-08 15:01 ` [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations Matthew Wilcox
                   ` (2 subsequent siblings)
  5 siblings, 2 replies; 25+ messages in thread
From: Matthew Wilcox @ 2020-04-08 15:01 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle), kirill.shutemov, pasha.tatashin

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Now that dump_page can take a const struct page pointer, we can get rid
of the cast in page_to_nid().
---
 include/linux/mm.h | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 933450bdcfd4..047144b894bd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1246,9 +1246,7 @@ extern int page_to_nid(const struct page *page);
 #else
 static inline int page_to_nid(const struct page *page)
 {
-	struct page *p = (struct page *)page;
-
-	return (page_poison_check(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
+	return (page_poison_check(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
 }
 #endif
 
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations
  2020-04-08 15:01 [PATCH 0/5] Improve page poisoning implementation Matthew Wilcox
                   ` (2 preceding siblings ...)
  2020-04-08 15:01 ` [PATCH 3/5] mm: Remove casting away of constness Matthew Wilcox
@ 2020-04-08 15:01 ` Matthew Wilcox
  2020-04-08 15:24   ` Pavel Tatashin
  2020-04-09 14:21   ` Kirill A. Shutemov
  2020-04-08 15:01 ` [PATCH 5/5] mm: Check page poison before finding a head page Matthew Wilcox
  2020-04-08 15:11 ` [PATCH 0/5] Improve page poisoning implementation Pavel Tatashin
  5 siblings, 2 replies; 25+ messages in thread
From: Matthew Wilcox @ 2020-04-08 15:01 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle), kirill.shutemov, pasha.tatashin

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

The earlier patch that added page poison checking in page_to_nid()
only modified one implementation; both configuration options should
have this check.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/sparse.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/mm/sparse.c b/mm/sparse.c
index 1aee5a481571..39114451408a 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -46,6 +46,7 @@ static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
 
 int page_to_nid(const struct page *page)
 {
+	page_poison_check(page);
 	return section_to_node_table[page_to_section(page)];
 }
 EXPORT_SYMBOL(page_to_nid);
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 5/5] mm: Check page poison before finding a head page
  2020-04-08 15:01 [PATCH 0/5] Improve page poisoning implementation Matthew Wilcox
                   ` (3 preceding siblings ...)
  2020-04-08 15:01 ` [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations Matthew Wilcox
@ 2020-04-08 15:01 ` Matthew Wilcox
  2020-04-08 15:32   ` Pavel Tatashin
  2020-04-09 14:25   ` Kirill A. Shutemov
  2020-04-08 15:11 ` [PATCH 0/5] Improve page poisoning implementation Pavel Tatashin
  5 siblings, 2 replies; 25+ messages in thread
From: Matthew Wilcox @ 2020-04-08 15:01 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle), kirill.shutemov, pasha.tatashin

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

If a page is poisoned, the page->compound_head will be set to -1.
Since it has bit zero set, we will think it is a tail page, and the head
page is at 0xff..fe.  Checking said head page for being poisoned will
not have good results.  Therefore we need to check for poison in each
of compound_head(), PageTail() and PageCompound() (and can remove the
checks which are now redundant from the PF_ macros).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/page-flags.h | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 331aef35f3e0..340ceeeda8ed 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -190,6 +190,7 @@ static inline int PagePoisoned(const struct page *page)
 #define compound_head(page) ({						\
 	__typeof__(page) _page = page;					\
 	unsigned long head = READ_ONCE(_page->compound_head);		\
+	VM_BUG_ON_PAGE(head == PAGE_POISON_PATTERN, page);		\
 	if (unlikely(head & 1))						\
 		_page = (void *)(head - 1);				\
 	_page;								\
@@ -197,11 +198,13 @@ static inline int PagePoisoned(const struct page *page)
 
 static __always_inline int PageTail(const struct page *page)
 {
+	page_poison_check(page);
 	return READ_ONCE(page->compound_head) & 1;
 }
 
 static __always_inline int PageCompound(const struct page *page)
 {
+	page_poison_check(page);
 	return test_bit(PG_head, &page->flags) || PageTail(page);
 }
 
@@ -234,13 +237,13 @@ static inline void page_init_poison(struct page *page, size_t size)
  *     the page flag is not relevant for compound pages.
  */
 #define PF_ANY(page, enforce)	page_poison_check(page)
-#define PF_HEAD(page, enforce)	page_poison_check(compound_head(page))
+#define PF_HEAD(page, enforce)	compound_head(page)
 #define PF_ONLY_HEAD(page, enforce) ({					\
 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
-		page_poison_check(page); })
+		page; })
 #define PF_NO_TAIL(page, enforce) ({					\
 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
-		page_poison_check(compound_head(page)); })
+		compound_head(page); })
 #define PF_NO_COMPOUND(page, enforce) ({				\
 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
 		page_poison_check(page); })
-- 
2.25.1



^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: [PATCH 0/5] Improve page poisoning implementation
  2020-04-08 15:01 [PATCH 0/5] Improve page poisoning implementation Matthew Wilcox
                   ` (4 preceding siblings ...)
  2020-04-08 15:01 ` [PATCH 5/5] mm: Check page poison before finding a head page Matthew Wilcox
@ 2020-04-08 15:11 ` Pavel Tatashin
  2020-04-09 20:55   ` John Hubbard
  5 siblings, 1 reply; 25+ messages in thread
From: Pavel Tatashin @ 2020-04-08 15:11 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, Kirill A. Shutemov

On Wed, Apr 8, 2020 at 11:01 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>
> I really don't like that this feature is called page poisoning.
> We already had something called page poisoning and it's when you detect
> a memory error in a page.  This is just uninitialised pages.  I don't

Hi Matthew,

Thank you for working on this. Uninitialized struct pages are often
zeroed by firmware, and there were a number of implicit assumptions
about that memory when I worked on deferred page initializations, this
is why it is important to also test when struct pages are specifically
set to a pattern that is not all zeroes, something that can happen
during kexec, or when memory allocated and freed by kernel during
boot. We have caught a good number of bugs using this mechanism. So,
this is poisoning, but I agree "page poisoning" name is misleading, as
we have this term used in another place. So, lets agree on a better
term: how about memmap poisoning (s/page_poisoning/memmap_poisoning/)?

Pasha


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-08 15:01 ` [PATCH 1/5] mm: Constify a lot of struct page arguments Matthew Wilcox
@ 2020-04-08 15:14   ` Pavel Tatashin
  2020-04-09 14:09   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Pavel Tatashin @ 2020-04-08 15:14 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, Kirill A. Shutemov

On Wed, Apr 8, 2020 at 11:01 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>
> For an upcoming patch, we want to be able to pass a const struct page
> to dump_page().  That means some inline functions have to become macros
> so they can return a struct page which is the same const-ness as their
> argument.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check
  2020-04-08 15:01 ` [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check Matthew Wilcox
@ 2020-04-08 15:21   ` Pavel Tatashin
  2020-04-09 14:14   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Pavel Tatashin @ 2020-04-08 15:21 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, Kirill A. Shutemov

On Wed, Apr 8, 2020 at 11:01 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>
> The PF_POISONED_PAGE name is misleading because it's not a page flag
> policy.  Switch from VM_BUG_ON_PGFLAGS to VM_BUG_ON_PAGE.  Move the
> implementation further up in the file for the benefit of future patches.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 3/5] mm: Remove casting away of constness
  2020-04-08 15:01 ` [PATCH 3/5] mm: Remove casting away of constness Matthew Wilcox
@ 2020-04-08 15:23   ` Pavel Tatashin
  2020-04-09 14:19   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Pavel Tatashin @ 2020-04-08 15:23 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, Kirill A. Shutemov

On Wed, Apr 8, 2020 at 11:01 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>
> Now that dump_page can take a const struct page pointer, we can get rid
> of the cast in page_to_nid().

Missing Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> ?

Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations
  2020-04-08 15:01 ` [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations Matthew Wilcox
@ 2020-04-08 15:24   ` Pavel Tatashin
  2020-04-09 14:21   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Pavel Tatashin @ 2020-04-08 15:24 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, Kirill A. Shutemov

On Wed, Apr 8, 2020 at 11:01 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>
> The earlier patch that added page poison checking in page_to_nid()
> only modified one implementation; both configuration options should
> have this check.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 5/5] mm: Check page poison before finding a head page
  2020-04-08 15:01 ` [PATCH 5/5] mm: Check page poison before finding a head page Matthew Wilcox
@ 2020-04-08 15:32   ` Pavel Tatashin
  2020-04-09 14:25   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Pavel Tatashin @ 2020-04-08 15:32 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, Kirill A. Shutemov

On Wed, Apr 8, 2020 at 11:01 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>
> If a page is poisoned, the page->compound_head will be set to -1.
> Since it has bit zero set, we will think it is a tail page, and the head
> page is at 0xff..fe.  Checking said head page for being poisoned will
> not have good results.  Therefore we need to check for poison in each
> of compound_head(), PageTail() and PageCompound() (and can remove the
> checks which are now redundant from the PF_ macros).
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-08 15:01 ` [PATCH 1/5] mm: Constify a lot of struct page arguments Matthew Wilcox
  2020-04-08 15:14   ` Pavel Tatashin
@ 2020-04-09 14:09   ` Kirill A. Shutemov
  2020-04-09 14:15     ` Matthew Wilcox
  1 sibling, 1 reply; 25+ messages in thread
From: Kirill A. Shutemov @ 2020-04-09 14:09 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Wed, Apr 08, 2020 at 08:01:44AM -0700, Matthew Wilcox wrote:
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 4aba6c0c2ba8..a8c3fa076f43 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -221,15 +221,8 @@ struct page {
>  #endif
>  } _struct_page_alignment;
>  
> -static inline atomic_t *compound_mapcount_ptr(struct page *page)
> -{
> -	return &page[1].compound_mapcount;
> -}
> -
> -static inline atomic_t *compound_pincount_ptr(struct page *page)
> -{
> -	return &page[2].hpage_pinned_refcount;
> -}
> +#define compound_mapcount_ptr(page)	(&(page)[1].compound_mapcount)
> +#define compound_pincount_ptr(page)	(&(page)[2].hpage_pinned_refcount)

Looks like this conversion is not covered by the reason given in the
commit message. Hm?

> +#define compound_head(page) ({						\
> +	__typeof__(page) _page = page;					\
> +	unsigned long head = READ_ONCE(_page->compound_head);		\
> +	if (unlikely(head & 1))						\
> +		_page = (void *)(head - 1);				\
> +	_page;								\
> +})

Ugh..

Other option would be to make compound_head() take 'const struct page *'
and return 'void *'. It arguably would provide better type safety, no?

-- 
 Kirill A. Shutemov


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check
  2020-04-08 15:01 ` [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check Matthew Wilcox
  2020-04-08 15:21   ` Pavel Tatashin
@ 2020-04-09 14:14   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Kirill A. Shutemov @ 2020-04-09 14:14 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Wed, Apr 08, 2020 at 08:01:45AM -0700, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> The PF_POISONED_PAGE name is misleading because it's not a page flag
> policy.  Switch from VM_BUG_ON_PGFLAGS to VM_BUG_ON_PAGE.  Move the
> implementation further up in the file for the benefit of future patches.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

-- 
 Kirill A. Shutemov


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-09 14:09   ` Kirill A. Shutemov
@ 2020-04-09 14:15     ` Matthew Wilcox
  2020-04-09 14:28       ` Kirill A. Shutemov
  0 siblings, 1 reply; 25+ messages in thread
From: Matthew Wilcox @ 2020-04-09 14:15 UTC (permalink / raw)
  To: Kirill A. Shutemov; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Thu, Apr 09, 2020 at 05:09:14PM +0300, Kirill A. Shutemov wrote:
> On Wed, Apr 08, 2020 at 08:01:44AM -0700, Matthew Wilcox wrote:
> > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> > index 4aba6c0c2ba8..a8c3fa076f43 100644
> > --- a/include/linux/mm_types.h
> > +++ b/include/linux/mm_types.h
> > @@ -221,15 +221,8 @@ struct page {
> >  #endif
> >  } _struct_page_alignment;
> >  
> > -static inline atomic_t *compound_mapcount_ptr(struct page *page)
> > -{
> > -	return &page[1].compound_mapcount;
> > -}
> > -
> > -static inline atomic_t *compound_pincount_ptr(struct page *page)
> > -{
> > -	return &page[2].hpage_pinned_refcount;
> > -}
> > +#define compound_mapcount_ptr(page)	(&(page)[1].compound_mapcount)
> > +#define compound_pincount_ptr(page)	(&(page)[2].hpage_pinned_refcount)
> 
> Looks like this conversion is not covered by the reason given in the
> commit message. Hm?

They were supposed to be covered by "That means some inline functions
have to become macros so they can return a struct page which is the same
const-ness as their argument."

They're not quite covered since they need to return an atomic_t that
is the same constness as their argument.

> > +#define compound_head(page) ({						\
> > +	__typeof__(page) _page = page;					\
> > +	unsigned long head = READ_ONCE(_page->compound_head);		\
> > +	if (unlikely(head & 1))						\
> > +		_page = (void *)(head - 1);				\
> > +	_page;								\
> > +})
> 
> Ugh..
> 
> Other option would be to make compound_head() take 'const struct page *'
> and return 'void *'. It arguably would provide better type safety, no?

We have a few places that do things like

mm/filemap.c:           if (unlikely(compound_head(page)->mapping != mapping)) {



^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 3/5] mm: Remove casting away of constness
  2020-04-08 15:01 ` [PATCH 3/5] mm: Remove casting away of constness Matthew Wilcox
  2020-04-08 15:23   ` Pavel Tatashin
@ 2020-04-09 14:19   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Kirill A. Shutemov @ 2020-04-09 14:19 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Wed, Apr 08, 2020 at 08:01:46AM -0700, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> Now that dump_page can take a const struct page pointer, we can get rid
		    ^ ()
> of the cast in page_to_nid().

It took me a minute to understand how page_to_nid() related to
dump_page(). Please elaborate in the commit message.

Otherwise, looks good.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

-- 
 Kirill A. Shutemov


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations
  2020-04-08 15:01 ` [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations Matthew Wilcox
  2020-04-08 15:24   ` Pavel Tatashin
@ 2020-04-09 14:21   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Kirill A. Shutemov @ 2020-04-09 14:21 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Wed, Apr 08, 2020 at 08:01:47AM -0700, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> The earlier patch that added page poison checking in page_to_nid()
> only modified one implementation; both configuration options should
> have this check.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

-- 
 Kirill A. Shutemov


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 5/5] mm: Check page poison before finding a head page
  2020-04-08 15:01 ` [PATCH 5/5] mm: Check page poison before finding a head page Matthew Wilcox
  2020-04-08 15:32   ` Pavel Tatashin
@ 2020-04-09 14:25   ` Kirill A. Shutemov
  1 sibling, 0 replies; 25+ messages in thread
From: Kirill A. Shutemov @ 2020-04-09 14:25 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Wed, Apr 08, 2020 at 08:01:48AM -0700, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> If a page is poisoned, the page->compound_head will be set to -1.
> Since it has bit zero set, we will think it is a tail page, and the head
> page is at 0xff..fe.  Checking said head page for being poisoned will
> not have good results.  Therefore we need to check for poison in each
> of compound_head(), PageTail() and PageCompound() (and can remove the
> checks which are now redundant from the PF_ macros).
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

-- 
 Kirill A. Shutemov


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-09 14:15     ` Matthew Wilcox
@ 2020-04-09 14:28       ` Kirill A. Shutemov
  2020-04-09 14:32         ` Matthew Wilcox
  0 siblings, 1 reply; 25+ messages in thread
From: Kirill A. Shutemov @ 2020-04-09 14:28 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Thu, Apr 09, 2020 at 07:15:50AM -0700, Matthew Wilcox wrote:
> On Thu, Apr 09, 2020 at 05:09:14PM +0300, Kirill A. Shutemov wrote:
> > On Wed, Apr 08, 2020 at 08:01:44AM -0700, Matthew Wilcox wrote:
> > > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> > > index 4aba6c0c2ba8..a8c3fa076f43 100644
> > > --- a/include/linux/mm_types.h
> > > +++ b/include/linux/mm_types.h
> > > @@ -221,15 +221,8 @@ struct page {
> > >  #endif
> > >  } _struct_page_alignment;
> > >  
> > > -static inline atomic_t *compound_mapcount_ptr(struct page *page)
> > > -{
> > > -	return &page[1].compound_mapcount;
> > > -}
> > > -
> > > -static inline atomic_t *compound_pincount_ptr(struct page *page)
> > > -{
> > > -	return &page[2].hpage_pinned_refcount;
> > > -}
> > > +#define compound_mapcount_ptr(page)	(&(page)[1].compound_mapcount)
> > > +#define compound_pincount_ptr(page)	(&(page)[2].hpage_pinned_refcount)
> > 
> > Looks like this conversion is not covered by the reason given in the
> > commit message. Hm?
> 
> They were supposed to be covered by "That means some inline functions
> have to become macros so they can return a struct page which is the same
> const-ness as their argument."
> 
> They're not quite covered since they need to return an atomic_t that
> is the same constness as their argument.

Ah, right.

> > > +#define compound_head(page) ({						\
> > > +	__typeof__(page) _page = page;					\
> > > +	unsigned long head = READ_ONCE(_page->compound_head);		\
> > > +	if (unlikely(head & 1))						\
> > > +		_page = (void *)(head - 1);				\
> > > +	_page;								\
> > > +})
> > 
> > Ugh..
> > 
> > Other option would be to make compound_head() take 'const struct page *'
> > and return 'void *'. It arguably would provide better type safety, no?
> 
> We have a few places that do things like
> 
> mm/filemap.c:           if (unlikely(compound_head(page)->mapping != mapping)) {

Good point.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

-- 
 Kirill A. Shutemov


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-09 14:28       ` Kirill A. Shutemov
@ 2020-04-09 14:32         ` Matthew Wilcox
  2020-04-09 14:47           ` Kirill A. Shutemov
  0 siblings, 1 reply; 25+ messages in thread
From: Matthew Wilcox @ 2020-04-09 14:32 UTC (permalink / raw)
  To: Kirill A. Shutemov; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Thu, Apr 09, 2020 at 05:28:51PM +0300, Kirill A. Shutemov wrote:
> > We have a few places that do things like
> > 
> > mm/filemap.c:           if (unlikely(compound_head(page)->mapping != mapping)) {
> 
> Good point.
> 
> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

Darn, I hoped you'd have a better idea.  I feel quite ashamed of this patch.


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-09 14:32         ` Matthew Wilcox
@ 2020-04-09 14:47           ` Kirill A. Shutemov
  2020-04-09 15:00             ` Kirill A. Shutemov
  2020-04-09 17:12             ` Jason Gunthorpe
  0 siblings, 2 replies; 25+ messages in thread
From: Kirill A. Shutemov @ 2020-04-09 14:47 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Thu, Apr 09, 2020 at 07:32:38AM -0700, Matthew Wilcox wrote:
> On Thu, Apr 09, 2020 at 05:28:51PM +0300, Kirill A. Shutemov wrote:
> > > We have a few places that do things like
> > > 
> > > mm/filemap.c:           if (unlikely(compound_head(page)->mapping != mapping)) {
> > 
> > Good point.
> > 
> > Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> 
> Darn, I hoped you'd have a better idea.  I feel quite ashamed of this patch.

I had two ideas. Both awful.

 - Rename compound_head() to __compound_head() or something and make it
   return void *. Then wrap it into a macro that would cast return type to
   type of the argument. It would allow to regain *some* type safety.

 - Provide two implementations and use C11 _Generic() :P
   (bump GCC version requirements first)

-- 
 Kirill A. Shutemov


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-09 14:47           ` Kirill A. Shutemov
@ 2020-04-09 15:00             ` Kirill A. Shutemov
  2020-04-09 17:12             ` Jason Gunthorpe
  1 sibling, 0 replies; 25+ messages in thread
From: Kirill A. Shutemov @ 2020-04-09 15:00 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm, kirill.shutemov, pasha.tatashin

On Thu, Apr 09, 2020 at 05:47:44PM +0300, Kirill A. Shutemov wrote:
> On Thu, Apr 09, 2020 at 07:32:38AM -0700, Matthew Wilcox wrote:
> > On Thu, Apr 09, 2020 at 05:28:51PM +0300, Kirill A. Shutemov wrote:
> > > > We have a few places that do things like
> > > > 
> > > > mm/filemap.c:           if (unlikely(compound_head(page)->mapping != mapping)) {
> > > 
> > > Good point.
> > > 
> > > Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > 
> > Darn, I hoped you'd have a better idea.  I feel quite ashamed of this patch.
> 
> I had two ideas. Both awful.
> 
>  - Rename compound_head() to __compound_head() or something and make it
>    return void *. Then wrap it into a macro that would cast return type to
>    type of the argument. It would allow to regain *some* type safety.
> 
>  - Provide two implementations and use C11 _Generic() :P
>    (bump GCC version requirements first)

I guess a reasonable modification would be to add typechecking to the
macros:

	typecheck(const struct page *, page)

Not sure though if it threats const vs non-const as compatible.

-- 
 Kirill A. Shutemov


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-09 14:47           ` Kirill A. Shutemov
  2020-04-09 15:00             ` Kirill A. Shutemov
@ 2020-04-09 17:12             ` Jason Gunthorpe
  2020-04-09 20:47               ` John Hubbard
  1 sibling, 1 reply; 25+ messages in thread
From: Jason Gunthorpe @ 2020-04-09 17:12 UTC (permalink / raw)
  To: Kirill A. Shutemov
  Cc: Matthew Wilcox, linux-mm, kirill.shutemov, pasha.tatashin

On Thu, Apr 09, 2020 at 05:47:44PM +0300, Kirill A. Shutemov wrote:
> On Thu, Apr 09, 2020 at 07:32:38AM -0700, Matthew Wilcox wrote:
> > On Thu, Apr 09, 2020 at 05:28:51PM +0300, Kirill A. Shutemov wrote:
> > > > We have a few places that do things like
> > > > 
> > > > mm/filemap.c:           if (unlikely(compound_head(page)->mapping != mapping)) {
> > > 
> > > Good point.
> > > 
> > > Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > 
> > Darn, I hoped you'd have a better idea.  I feel quite ashamed of this patch.
> 
> I had two ideas. Both awful.
> 
>  - Rename compound_head() to __compound_head() or something and make it
>    return void *. Then wrap it into a macro that would cast return type to
>    type of the argument. It would allow to regain *some* type safety.
> 
>  - Provide two implementations and use C11 _Generic() :P
>    (bump GCC version requirements first)

I saw people using static_assert stuff in the kernel, maybe C11 is
feasible now? It is 9 years old..

Jason


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 1/5] mm: Constify a lot of struct page arguments
  2020-04-09 17:12             ` Jason Gunthorpe
@ 2020-04-09 20:47               ` John Hubbard
  0 siblings, 0 replies; 25+ messages in thread
From: John Hubbard @ 2020-04-09 20:47 UTC (permalink / raw)
  To: Jason Gunthorpe, Kirill A. Shutemov
  Cc: Matthew Wilcox, linux-mm, kirill.shutemov, pasha.tatashin

On 4/9/20 10:12 AM, Jason Gunthorpe wrote:
> On Thu, Apr 09, 2020 at 05:47:44PM +0300, Kirill A. Shutemov wrote:
>> On Thu, Apr 09, 2020 at 07:32:38AM -0700, Matthew Wilcox wrote:
>>> On Thu, Apr 09, 2020 at 05:28:51PM +0300, Kirill A. Shutemov wrote:
>>>>> We have a few places that do things like
>>>>>
>>>>> mm/filemap.c:           if (unlikely(compound_head(page)->mapping != mapping)) {
>>>>
>>>> Good point.
>>>>
>>>> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
>>>
>>> Darn, I hoped you'd have a better idea.  I feel quite ashamed of this patch.
>>
>> I had two ideas. Both awful.
>>
>>   - Rename compound_head() to __compound_head() or something and make it
>>     return void *. Then wrap it into a macro that would cast return type to
>>     type of the argument. It would allow to regain *some* type safety.
>>
>>   - Provide two implementations and use C11 _Generic() :P
>>     (bump GCC version requirements first)
> 
> I saw people using static_assert stuff in the kernel, maybe C11 is
> feasible now? It is 9 years old..


ohhh, if this patch is a reliable indicator of what const correctness looks
like in the kernel, then we're going to end up with a lot movement in that
direction, too. With that in mind, let's provide some counter-force, by:

1) Poking around at the C11 idea, just in case It Is Time. Because clearly
the C language dialect that we're using now is not *quite* up to doing const
correctness, so moving the language ahead is likely to give us the cleanest
solution. Better than writing our own language in macros, which yes, I
realize is a way to compensate. But, uggghhh. :)

2) Perhaps providing two routines with different names might be the answer
in this case? This avoids macros, still provides a const cast (which is
breaks the chain of const correctness, but on the other hand, we still get
most of the const goodness--the rest comes when the language supports it):

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 222f6f7b2bb3..71e2d07ddcf4 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -186,6 +186,11 @@ static inline struct page *compound_head(struct page *page)
         return page;
  }
  
+static inline const struct page *compound_head_const(struct page *page)
+{
+       return (const struct page*)compound_head(page);
+}
+
  static __always_inline int PageTail(struct page *page)
  {
         return READ_ONCE(page->compound_head) & 1;
diff --git a/mm/debug.c b/mm/debug.c
index 2189357f0987..9a1f37a80ed7 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = {
  
  void __dump_page(struct page *page, const char *reason)
  {
-       struct page *head = compound_head(page);
+       const struct page *head = compound_head_const(page);
         struct address_space *mapping;
         bool page_poisoned = PagePoisoned(page);
         bool compound = PageCompound(page);

...etc

thanks,
-- 
John Hubbard
NVIDIA


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: [PATCH 0/5] Improve page poisoning implementation
  2020-04-08 15:11 ` [PATCH 0/5] Improve page poisoning implementation Pavel Tatashin
@ 2020-04-09 20:55   ` John Hubbard
  0 siblings, 0 replies; 25+ messages in thread
From: John Hubbard @ 2020-04-09 20:55 UTC (permalink / raw)
  To: Pavel Tatashin, Matthew Wilcox; +Cc: linux-mm, Kirill A. Shutemov

On 4/8/20 8:11 AM, Pavel Tatashin wrote:
> On Wed, Apr 8, 2020 at 11:01 AM Matthew Wilcox <willy@infradead.org> wrote:
>>
>> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>>
>> I really don't like that this feature is called page poisoning.
>> We already had something called page poisoning and it's when you detect
>> a memory error in a page.  This is just uninitialised pages.  I don't
> 
> Hi Matthew,
> 
> Thank you for working on this. Uninitialized struct pages are often
> zeroed by firmware, and there were a number of implicit assumptions
> about that memory when I worked on deferred page initializations, this
> is why it is important to also test when struct pages are specifically
> set to a pattern that is not all zeroes, something that can happen
> during kexec, or when memory allocated and freed by kernel during
> boot. We have caught a good number of bugs using this mechanism. So,
> this is poisoning, but I agree "page poisoning" name is misleading, as
> we have this term used in another place. So, lets agree on a better
> term: how about memmap poisoning (s/page_poisoning/memmap_poisoning/)?
> 

Better to avoid the "poison" word entirely. It's just too well-established
as a hardware memory error case. Early ideas for other names, just to get
started: use "uninitialize" or "deinitialize" instead of "poison".
So approximately:

	page_deinit
	page_deinitialize
	page_uninit
	page_uninitialize
	mem_deinit
	...other variations...	

thanks,
-- 
John Hubbard
NVIDIA


^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2020-04-09 20:55 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-08 15:01 [PATCH 0/5] Improve page poisoning implementation Matthew Wilcox
2020-04-08 15:01 ` [PATCH 1/5] mm: Constify a lot of struct page arguments Matthew Wilcox
2020-04-08 15:14   ` Pavel Tatashin
2020-04-09 14:09   ` Kirill A. Shutemov
2020-04-09 14:15     ` Matthew Wilcox
2020-04-09 14:28       ` Kirill A. Shutemov
2020-04-09 14:32         ` Matthew Wilcox
2020-04-09 14:47           ` Kirill A. Shutemov
2020-04-09 15:00             ` Kirill A. Shutemov
2020-04-09 17:12             ` Jason Gunthorpe
2020-04-09 20:47               ` John Hubbard
2020-04-08 15:01 ` [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check Matthew Wilcox
2020-04-08 15:21   ` Pavel Tatashin
2020-04-09 14:14   ` Kirill A. Shutemov
2020-04-08 15:01 ` [PATCH 3/5] mm: Remove casting away of constness Matthew Wilcox
2020-04-08 15:23   ` Pavel Tatashin
2020-04-09 14:19   ` Kirill A. Shutemov
2020-04-08 15:01 ` [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations Matthew Wilcox
2020-04-08 15:24   ` Pavel Tatashin
2020-04-09 14:21   ` Kirill A. Shutemov
2020-04-08 15:01 ` [PATCH 5/5] mm: Check page poison before finding a head page Matthew Wilcox
2020-04-08 15:32   ` Pavel Tatashin
2020-04-09 14:25   ` Kirill A. Shutemov
2020-04-08 15:11 ` [PATCH 0/5] Improve page poisoning implementation Pavel Tatashin
2020-04-09 20:55   ` John Hubbard

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).