linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [RFC][PATCH v2 0/3] mm/page:_owner: track page free call chain
@ 2016-07-08 12:11 Sergey Senozhatsky
  2016-07-08 12:11 ` [RFC][PATCH v2 1/3] mm/page_owner: rename page_owner functions Sergey Senozhatsky
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2016-07-08 12:11 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Vlastimil Babka, linux-mm, linux-kernel,
	Sergey Senozhatsky, Sergey Senozhatsky

Hello,

Page owner tracks a call chain that has allocated a page, this
patch set extends it with a free_pages call chain tracking
functionality.

Page dump, thus, now has the following format:

        a) page allocated backtrace
        b) page free backtrace
        c) backtrace of the path that has trigger bad page

For a quick example of a case when this new b) part can make a
difference please see 0003.

v2:
-- do not add PAGE_OWNER_TRACK_FREE .config -- Joonsoo
-- minor improvements

Sergey Senozhatsky (3):
  mm/page_owner: rename page_owner functions
  mm/page_owner: rename PAGE_EXT_OWNER flag
  mm/page_owner: track page free call chain

 include/linux/page_ext.h   | 13 +++++--
 include/linux/page_owner.h | 16 ++++-----
 mm/page_alloc.c            |  4 +--
 mm/page_owner.c            | 86 ++++++++++++++++++++++++++++------------------
 mm/vmstat.c                |  5 ++-
 5 files changed, 77 insertions(+), 47 deletions(-)

-- 
2.9.0.37.g6d523a3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [RFC][PATCH v2 1/3] mm/page_owner: rename page_owner functions
  2016-07-08 12:11 [RFC][PATCH v2 0/3] mm/page:_owner: track page free call chain Sergey Senozhatsky
@ 2016-07-08 12:11 ` Sergey Senozhatsky
  2016-07-08 12:11 ` [RFC][PATCH v2 2/3] mm/page_owner: rename PAGE_EXT_OWNER flag Sergey Senozhatsky
  2016-07-08 12:11 ` [RFC][PATCH v2 3/3] mm/page_owner: track page free call chain Sergey Senozhatsky
  2 siblings, 0 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2016-07-08 12:11 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Vlastimil Babka, linux-mm, linux-kernel,
	Sergey Senozhatsky, Sergey Senozhatsky

A cosmetic change:

rename set_page_owner() and reset_page_owner() functions to
page_owner_alloc_pages()/page_owner_free_pages(). This is
sort of a preparation step for PAGE_OWNER_TRACK_FREE patch.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
---
 include/linux/page_owner.h | 16 ++++++++--------
 mm/page_alloc.c            |  4 ++--
 mm/page_owner.c            |  6 +++---
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 30583ab..7b25953 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -7,25 +7,25 @@
 extern struct static_key_false page_owner_inited;
 extern struct page_ext_operations page_owner_ops;
 
-extern void __reset_page_owner(struct page *page, unsigned int order);
-extern void __set_page_owner(struct page *page,
+extern void __page_owner_free_pages(struct page *page, unsigned int order);
+extern void __page_owner_alloc_pages(struct page *page,
 			unsigned int order, gfp_t gfp_mask);
 extern void __split_page_owner(struct page *page, unsigned int order);
 extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(struct page *page);
 
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void page_owner_free_pages(struct page *page, unsigned int order)
 {
 	if (static_branch_unlikely(&page_owner_inited))
-		__reset_page_owner(page, order);
+		__page_owner_free_pages(page, order);
 }
 
-static inline void set_page_owner(struct page *page,
+static inline void page_owner_alloc_pages(struct page *page,
 			unsigned int order, gfp_t gfp_mask)
 {
 	if (static_branch_unlikely(&page_owner_inited))
-		__set_page_owner(page, order, gfp_mask);
+		__page_owner_alloc_pages(page, order, gfp_mask);
 }
 
 static inline void split_page_owner(struct page *page, unsigned int order)
@@ -49,10 +49,10 @@ static inline void dump_page_owner(struct page *page)
 		__dump_page_owner(page);
 }
 #else
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void page_owner_free_pages(struct page *page, unsigned int order)
 {
 }
-static inline void set_page_owner(struct page *page,
+static inline void page_owner_alloc_pages(struct page *page,
 			unsigned int order, gfp_t gfp_mask)
 {
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 63801c7..5e3cba4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1031,7 +1031,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
 
 	page_cpupid_reset_last(page);
 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
-	reset_page_owner(page, order);
+	page_owner_free_pages(page, order);
 
 	if (!PageHighMem(page)) {
 		debug_check_no_locks_freed(page_address(page),
@@ -1770,7 +1770,7 @@ void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags)
 	kernel_map_pages(page, 1 << order, 1);
 	kernel_poison_pages(page, 1 << order, 1);
 	kasan_alloc_pages(page, order);
-	set_page_owner(page, order, gfp_flags);
+	page_owner_alloc_pages(page, order, gfp_flags);
 }
 
 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8fa5083..fde443a 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -85,7 +85,7 @@ struct page_ext_operations page_owner_ops = {
 	.init = init_page_owner,
 };
 
-void __reset_page_owner(struct page *page, unsigned int order)
+void __page_owner_free_pages(struct page *page, unsigned int order)
 {
 	int i;
 	struct page_ext *page_ext;
@@ -147,7 +147,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
 	return handle;
 }
 
-noinline void __set_page_owner(struct page *page, unsigned int order,
+noinline void __page_owner_alloc_pages(struct page *page, unsigned int order,
 					gfp_t gfp_mask)
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
@@ -452,7 +452,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 				continue;
 
 			/* Found early allocated page */
-			set_page_owner(page, 0, 0);
+			__page_owner_alloc_pages(page, 0, 0);
 			count++;
 		}
 	}
-- 
2.9.0.37.g6d523a3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC][PATCH v2 2/3] mm/page_owner: rename PAGE_EXT_OWNER flag
  2016-07-08 12:11 [RFC][PATCH v2 0/3] mm/page:_owner: track page free call chain Sergey Senozhatsky
  2016-07-08 12:11 ` [RFC][PATCH v2 1/3] mm/page_owner: rename page_owner functions Sergey Senozhatsky
@ 2016-07-08 12:11 ` Sergey Senozhatsky
  2016-07-08 12:11 ` [RFC][PATCH v2 3/3] mm/page_owner: track page free call chain Sergey Senozhatsky
  2 siblings, 0 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2016-07-08 12:11 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Vlastimil Babka, linux-mm, linux-kernel,
	Sergey Senozhatsky, Sergey Senozhatsky

A cosmetic change:

PAGE_OWNER_TRACK_FREE will introduce one more page_owner
flag: PAGE_EXT_OWNER_FREE. To make names symmetrical, rename
PAGE_EXT_OWNER to PAGE_EXT_OWNER_ALLOC.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
---
 include/linux/page_ext.h |  2 +-
 mm/page_owner.c          | 12 ++++++------
 mm/vmstat.c              |  2 +-
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 03f2a3e..66ba2bb 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -26,7 +26,7 @@ struct page_ext_operations {
 enum page_ext_flags {
 	PAGE_EXT_DEBUG_POISON,		/* Page is poisoned */
 	PAGE_EXT_DEBUG_GUARD,
-	PAGE_EXT_OWNER,
+	PAGE_EXT_OWNER_ALLOC,
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 	PAGE_EXT_YOUNG,
 	PAGE_EXT_IDLE,
diff --git a/mm/page_owner.c b/mm/page_owner.c
index fde443a..4acccb7 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -94,7 +94,7 @@ void __page_owner_free_pages(struct page *page, unsigned int order)
 		page_ext = lookup_page_ext(page + i);
 		if (unlikely(!page_ext))
 			continue;
-		__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
+		__clear_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
 	}
 }
 
@@ -160,7 +160,7 @@ noinline void __page_owner_alloc_pages(struct page *page, unsigned int order,
 	page_ext->gfp_mask = gfp_mask;
 	page_ext->last_migrate_reason = -1;
 
-	__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+	__set_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
 }
 
 void __set_page_owner_migrate_reason(struct page *page, int reason)
@@ -207,7 +207,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
 	 * in that case we also don't need to explicitly clear the info from
 	 * the new page, which will be freed.
 	 */
-	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
+	__set_bit(PAGE_EXT_OWNER_ALLOC, &new_ext->flags);
 }
 
 static ssize_t
@@ -301,7 +301,7 @@ void __dump_page_owner(struct page *page)
 	gfp_mask = page_ext->gfp_mask;
 	mt = gfpflags_to_migratetype(gfp_mask);
 
-	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
+	if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags)) {
 		pr_alert("page_owner info is not active (free page?)\n");
 		return;
 	}
@@ -374,7 +374,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 		 * Some pages could be missed by concurrent allocation or free,
 		 * because we don't hold the zone lock.
 		 */
-		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+		if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags))
 			continue;
 
 		/*
@@ -448,7 +448,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 				continue;
 
 			/* Maybe overraping zone */
-			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+			if (test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags))
 				continue;
 
 			/* Found early allocated page */
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7997f529..63ef65f 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1070,7 +1070,7 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 			if (unlikely(!page_ext))
 				continue;
 
-			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+			if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags))
 				continue;
 
 			page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
-- 
2.9.0.37.g6d523a3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC][PATCH v2 3/3] mm/page_owner: track page free call chain
  2016-07-08 12:11 [RFC][PATCH v2 0/3] mm/page:_owner: track page free call chain Sergey Senozhatsky
  2016-07-08 12:11 ` [RFC][PATCH v2 1/3] mm/page_owner: rename page_owner functions Sergey Senozhatsky
  2016-07-08 12:11 ` [RFC][PATCH v2 2/3] mm/page_owner: rename PAGE_EXT_OWNER flag Sergey Senozhatsky
@ 2016-07-08 12:11 ` Sergey Senozhatsky
  2016-07-11  6:21   ` Joonsoo Kim
  2016-07-11 14:27   ` [PATCH " Sergey Senozhatsky
  2 siblings, 2 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2016-07-08 12:11 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Vlastimil Babka, linux-mm, linux-kernel,
	Sergey Senozhatsky, Sergey Senozhatsky

Extend page_owner with free_pages() tracking functionality. This adds to the
dump_page_owner() output an additional backtrace, that tells us what path has
freed the page.

Aa a trivial example, let's assume that do_some_foo() has an error - an extra
put_page() on error return path, and the function is also getting preempted,
letting some other task to allocate the same page, which is then 'mistakenly'
getting freed once again by do_some_foo().

CPUA					CPUB

void do_some_foo(void)
{
	page = alloc_page();
	if (error) {
		put_page(page);
		goto out;
	}
	...
out:
	<<preempted>>
					void do_some_bar()
					{
						page = alloc_page();
						...
						<<preempted>>
	...
	put_page(page);
}
						<<use freed page>>
						put_page(page);
					}

With the existing implementation we would see only CPUB's backtrace
from bad_page. The extended page_owner would also report CPUA's
put_page(), which can be a helpful hint.

Backtrace:

 BUG: Bad page state in process cc1  pfn:bae1d
 page:ffffea0002eb8740 count:-1 mapcount:0 mapping:          (null) index:0x0
 flags: 0x4000000000000000()
 page dumped because: nonzero _count
 page allocated via order 0, migratetype Unmovable, gfp_mask 0x2000200(GFP_NOWAIT|__GFP_NOWARN)
  [<ffffffff8101bc9c>] save_stack_trace+0x26/0x41
  [<ffffffff81110fe4>] save_stack+0x46/0xc3
  [<ffffffff81111481>] __page_owner_alloc_pages+0x24/0x41
  [<ffffffff810c9867>] post_alloc_hook+0x1e/0x20
  [<ffffffff810ca63d>] get_page_from_freelist+0x4fd/0x756
  [<ffffffff810cadea>] __alloc_pages_nodemask+0xe7/0xbcf
  [<ffffffff810cb8e4>] __get_free_pages+0x12/0x40
  [<ffffffff810e6b64>] __tlb_remove_page_size.part.12+0x37/0x78
  [<ffffffff810e6d9b>] __tlb_remove_page_size+0x21/0x23
  [<ffffffff810e7ff2>] unmap_page_range+0x63a/0x75b
  [<ffffffff810e81cf>] unmap_single_vma+0xbc/0xc6
  [<ffffffff810e82d2>] unmap_vmas+0x35/0x44
  [<ffffffff810ee6f4>] exit_mmap+0x5a/0xec
  [<ffffffff810385b4>] mmput+0x4a/0xdc
  [<ffffffff8103dff7>] do_exit+0x398/0x8de
  [<ffffffff8103e5ae>] do_group_exit+0x45/0xb0
 page freed, was allocated via order 0, migratetype Unmovable, gfp_mask 0x2000200(GFP_NOWAIT|__GFP_NOWARN)
  [<ffffffff8101bc9c>] save_stack_trace+0x26/0x41
  [<ffffffff81110fe4>] save_stack+0x46/0xc3
  [<ffffffff81111411>] __page_owner_free_pages+0x25/0x71
  [<ffffffff810c9f0a>] free_hot_cold_page+0x1d6/0x1ea
  [<ffffffff810d03e1>] __put_page+0x37/0x3a
  [<ffffffff8115b8da>] do_some_foo()+0x8a/0x8e
	...
 Modules linked in: ....
 CPU: 3 PID: 1274 Comm: cc1 Not tainted 4.7.0-rc5-next-20160701-dbg-00009-ge01494f-dirty #535
  0000000000000000 ffff8800aeea3c18 ffffffff811e67ca ffffea0002eb8740
  ffffffff8175675e ffff8800aeea3c40 ffffffff810c87f5 0000000000000000
  ffffffff81880b40 ffff880137d98438 ffff8800aeea3c50 ffffffff810c88d5
 Call Trace:
  [<ffffffff811e67ca>] dump_stack+0x68/0x92
  [<ffffffff810c87f5>] bad_page+0xf8/0x11e
  [<ffffffff810c88d5>] check_new_page_bad+0x63/0x65
  [<ffffffff810ca36a>] get_page_from_freelist+0x22a/0x756
  [<ffffffff810cadea>] __alloc_pages_nodemask+0xe7/0xbcf
  [<ffffffff81073a43>] ? trace_hardirqs_on_caller+0x16d/0x189
  [<ffffffff810ede8d>] ? vma_merge+0x159/0x249
  [<ffffffff81074aa0>] ? __lock_acquire+0x2ac/0x15c7
  [<ffffffff81034ace>] pte_alloc_one+0x1b/0x67
  [<ffffffff810e922b>] __pte_alloc+0x19/0xa6
  [<ffffffff810eb09f>] handle_mm_fault+0x409/0xc59
  [<ffffffff810309f6>] __do_page_fault+0x1d8/0x3ac
  [<ffffffff81030bf7>] do_page_fault+0xc/0xe
  [<ffffffff814a84af>] page_fault+0x1f/0x30

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
---
 include/linux/page_ext.h | 11 ++++++-
 mm/page_owner.c          | 74 ++++++++++++++++++++++++++++++------------------
 mm/vmstat.c              |  3 ++
 3 files changed, 59 insertions(+), 29 deletions(-)

diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 66ba2bb..0cccc94 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -27,12 +27,21 @@ enum page_ext_flags {
 	PAGE_EXT_DEBUG_POISON,		/* Page is poisoned */
 	PAGE_EXT_DEBUG_GUARD,
 	PAGE_EXT_OWNER_ALLOC,
+	PAGE_EXT_OWNER_FREE,
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 	PAGE_EXT_YOUNG,
 	PAGE_EXT_IDLE,
 #endif
 };
 
+#ifdef CONFIG_PAGE_OWNER
+enum page_owner_handles {
+	PAGE_OWNER_HANDLE_ALLOC,
+	PAGE_OWNER_HANDLE_FREE,
+	PAGE_OWNER_HANDLE_MAX
+};
+#endif
+
 /*
  * Page Extension can be considered as an extended mem_map.
  * A page_ext page is associated with every page descriptor. The
@@ -46,7 +55,7 @@ struct page_ext {
 	unsigned int order;
 	gfp_t gfp_mask;
 	int last_migrate_reason;
-	depot_stack_handle_t handle;
+	depot_stack_handle_t handles[PAGE_OWNER_HANDLE_MAX];
 #endif
 };
 
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 4acccb7..c431ac4 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -13,6 +13,11 @@
 
 #define PAGE_OWNER_STACK_DEPTH (16)
 
+static const char *page_owner_handles_names[PAGE_OWNER_HANDLE_MAX] = {
+	"page allocated",
+	"page freed, was allocated",
+};
+
 static bool page_owner_disabled = true;
 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 
@@ -85,19 +90,6 @@ struct page_ext_operations page_owner_ops = {
 	.init = init_page_owner,
 };
 
-void __page_owner_free_pages(struct page *page, unsigned int order)
-{
-	int i;
-	struct page_ext *page_ext;
-
-	for (i = 0; i < (1 << order); i++) {
-		page_ext = lookup_page_ext(page + i);
-		if (unlikely(!page_ext))
-			continue;
-		__clear_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
-	}
-}
-
 static inline bool check_recursive_alloc(struct stack_trace *trace,
 					unsigned long ip)
 {
@@ -147,6 +139,23 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
 	return handle;
 }
 
+void __page_owner_free_pages(struct page *page, unsigned int order)
+{
+	int i;
+	depot_stack_handle_t handle = save_stack(0);
+
+	for (i = 0; i < (1 << order); i++) {
+		struct page_ext *page_ext = lookup_page_ext(page + i);
+
+		if (unlikely(!page_ext))
+			continue;
+
+		page_ext->handles[PAGE_OWNER_HANDLE_FREE] = handle;
+		__set_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags);
+		__clear_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
+	}
+}
+
 noinline void __page_owner_alloc_pages(struct page *page, unsigned int order,
 					gfp_t gfp_mask)
 {
@@ -155,7 +164,7 @@ noinline void __page_owner_alloc_pages(struct page *page, unsigned int order,
 	if (unlikely(!page_ext))
 		return;
 
-	page_ext->handle = save_stack(gfp_mask);
+	page_ext->handles[PAGE_OWNER_HANDLE_ALLOC] = save_stack(gfp_mask);
 	page_ext->order = order;
 	page_ext->gfp_mask = gfp_mask;
 	page_ext->last_migrate_reason = -1;
@@ -189,6 +198,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
 {
 	struct page_ext *old_ext = lookup_page_ext(oldpage);
 	struct page_ext *new_ext = lookup_page_ext(newpage);
+	int i;
 
 	if (unlikely(!old_ext || !new_ext))
 		return;
@@ -196,7 +206,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
 	new_ext->order = old_ext->order;
 	new_ext->gfp_mask = old_ext->gfp_mask;
 	new_ext->last_migrate_reason = old_ext->last_migrate_reason;
-	new_ext->handle = old_ext->handle;
+
+	for (i = 0; i < PAGE_OWNER_HANDLE_MAX; i++)
+		new_ext->handles[i] = old_ext->handles[i];
 
 	/*
 	 * We don't clear the bit on the oldpage as it's going to be freed
@@ -292,7 +304,7 @@ void __dump_page_owner(struct page *page)
 	};
 	depot_stack_handle_t handle;
 	gfp_t gfp_mask;
-	int mt;
+	int mt, i;
 
 	if (unlikely(!page_ext)) {
 		pr_alert("There is not page extension available.\n");
@@ -301,25 +313,31 @@ void __dump_page_owner(struct page *page)
 	gfp_mask = page_ext->gfp_mask;
 	mt = gfpflags_to_migratetype(gfp_mask);
 
-	if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags)) {
+	if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags) &&
+			!test_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags)) {
 		pr_alert("page_owner info is not active (free page?)\n");
 		return;
 	}
 
-	handle = READ_ONCE(page_ext->handle);
-	if (!handle) {
-		pr_alert("page_owner info is not active (free page?)\n");
-		return;
-	}
+	for (i = 0; i < PAGE_OWNER_HANDLE_MAX; i++) {
+		handle = READ_ONCE(page_ext->handles[i]);
+		if (!handle) {
+			pr_alert("page_owner info is not active for `%s'\n",
+					page_owner_handles_names[i]);
+			continue;
+		}
 
-	depot_fetch_stack(handle, &trace);
-	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
-		 page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
-	print_stack_trace(&trace, 0);
+		depot_fetch_stack(handle, &trace);
+		pr_alert("%s via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
+				page_owner_handles_names[i], page_ext->order,
+				migratetype_names[mt], gfp_mask, &gfp_mask);
+		print_stack_trace(&trace, 0);
 
-	if (page_ext->last_migrate_reason != -1)
+		if (page_ext->last_migrate_reason == -1)
+			continue;
 		pr_alert("page has been migrated, last migrate reason: %s\n",
 			migrate_reason_names[page_ext->last_migrate_reason]);
+	}
 }
 
 static ssize_t
@@ -381,7 +399,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 		 * Access to page_ext->handle isn't synchronous so we should
 		 * be careful to access it.
 		 */
-		handle = READ_ONCE(page_ext->handle);
+		handle = READ_ONCE(page_ext->handles[PAGE_OWNER_HANDLE_ALLOC]);
 		if (!handle)
 			continue;
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 63ef65f..4ff0135 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1073,6 +1073,9 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 			if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags))
 				continue;
 
+			if (!test_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags))
+				continue;
+
 			page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
 			if (pageblock_mt != page_mt) {
 				if (is_migrate_cma(pageblock_mt))
-- 
2.9.0.37.g6d523a3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [RFC][PATCH v2 3/3] mm/page_owner: track page free call chain
  2016-07-08 12:11 ` [RFC][PATCH v2 3/3] mm/page_owner: track page free call chain Sergey Senozhatsky
@ 2016-07-11  6:21   ` Joonsoo Kim
  2016-07-11  7:24     ` Sergey Senozhatsky
  2016-07-11 14:27   ` [PATCH " Sergey Senozhatsky
  1 sibling, 1 reply; 7+ messages in thread
From: Joonsoo Kim @ 2016-07-11  6:21 UTC (permalink / raw)
  To: Sergey Senozhatsky
  Cc: Andrew Morton, Vlastimil Babka, linux-mm, linux-kernel,
	Sergey Senozhatsky

On Fri, Jul 08, 2016 at 09:11:32PM +0900, Sergey Senozhatsky wrote:
> Extend page_owner with free_pages() tracking functionality. This adds to the
> dump_page_owner() output an additional backtrace, that tells us what path has
> freed the page.
> 
> Aa a trivial example, let's assume that do_some_foo() has an error - an extra
> put_page() on error return path, and the function is also getting preempted,
> letting some other task to allocate the same page, which is then 'mistakenly'
> getting freed once again by do_some_foo().
> 
> CPUA					CPUB
> 
> void do_some_foo(void)
> {
> 	page = alloc_page();
> 	if (error) {
> 		put_page(page);
> 		goto out;
> 	}
> 	...
> out:
> 	<<preempted>>
> 					void do_some_bar()
> 					{
> 						page = alloc_page();
> 						...
> 						<<preempted>>
> 	...
> 	put_page(page);
> }
> 						<<use freed page>>
> 						put_page(page);
> 					}
> 
> With the existing implementation we would see only CPUB's backtrace
> from bad_page. The extended page_owner would also report CPUA's
> put_page(), which can be a helpful hint.
> 
> Backtrace:
> 
>  BUG: Bad page state in process cc1  pfn:bae1d
>  page:ffffea0002eb8740 count:-1 mapcount:0 mapping:          (null) index:0x0
>  flags: 0x4000000000000000()
>  page dumped because: nonzero _count
>  page allocated via order 0, migratetype Unmovable, gfp_mask 0x2000200(GFP_NOWAIT|__GFP_NOWARN)
>   [<ffffffff8101bc9c>] save_stack_trace+0x26/0x41
>   [<ffffffff81110fe4>] save_stack+0x46/0xc3
>   [<ffffffff81111481>] __page_owner_alloc_pages+0x24/0x41
>   [<ffffffff810c9867>] post_alloc_hook+0x1e/0x20
>   [<ffffffff810ca63d>] get_page_from_freelist+0x4fd/0x756
>   [<ffffffff810cadea>] __alloc_pages_nodemask+0xe7/0xbcf
>   [<ffffffff810cb8e4>] __get_free_pages+0x12/0x40
>   [<ffffffff810e6b64>] __tlb_remove_page_size.part.12+0x37/0x78
>   [<ffffffff810e6d9b>] __tlb_remove_page_size+0x21/0x23
>   [<ffffffff810e7ff2>] unmap_page_range+0x63a/0x75b
>   [<ffffffff810e81cf>] unmap_single_vma+0xbc/0xc6
>   [<ffffffff810e82d2>] unmap_vmas+0x35/0x44
>   [<ffffffff810ee6f4>] exit_mmap+0x5a/0xec
>   [<ffffffff810385b4>] mmput+0x4a/0xdc
>   [<ffffffff8103dff7>] do_exit+0x398/0x8de
>   [<ffffffff8103e5ae>] do_group_exit+0x45/0xb0
>  page freed, was allocated via order 0, migratetype Unmovable, gfp_mask 0x2000200(GFP_NOWAIT|__GFP_NOWARN)
>   [<ffffffff8101bc9c>] save_stack_trace+0x26/0x41
>   [<ffffffff81110fe4>] save_stack+0x46/0xc3
>   [<ffffffff81111411>] __page_owner_free_pages+0x25/0x71
>   [<ffffffff810c9f0a>] free_hot_cold_page+0x1d6/0x1ea
>   [<ffffffff810d03e1>] __put_page+0x37/0x3a
>   [<ffffffff8115b8da>] do_some_foo()+0x8a/0x8e
> 	...
>  Modules linked in: ....
>  CPU: 3 PID: 1274 Comm: cc1 Not tainted 4.7.0-rc5-next-20160701-dbg-00009-ge01494f-dirty #535
>   0000000000000000 ffff8800aeea3c18 ffffffff811e67ca ffffea0002eb8740
>   ffffffff8175675e ffff8800aeea3c40 ffffffff810c87f5 0000000000000000
>   ffffffff81880b40 ffff880137d98438 ffff8800aeea3c50 ffffffff810c88d5
>  Call Trace:
>   [<ffffffff811e67ca>] dump_stack+0x68/0x92
>   [<ffffffff810c87f5>] bad_page+0xf8/0x11e
>   [<ffffffff810c88d5>] check_new_page_bad+0x63/0x65
>   [<ffffffff810ca36a>] get_page_from_freelist+0x22a/0x756
>   [<ffffffff810cadea>] __alloc_pages_nodemask+0xe7/0xbcf
>   [<ffffffff81073a43>] ? trace_hardirqs_on_caller+0x16d/0x189
>   [<ffffffff810ede8d>] ? vma_merge+0x159/0x249
>   [<ffffffff81074aa0>] ? __lock_acquire+0x2ac/0x15c7
>   [<ffffffff81034ace>] pte_alloc_one+0x1b/0x67
>   [<ffffffff810e922b>] __pte_alloc+0x19/0xa6
>   [<ffffffff810eb09f>] handle_mm_fault+0x409/0xc59
>   [<ffffffff810309f6>] __do_page_fault+0x1d8/0x3ac
>   [<ffffffff81030bf7>] do_page_fault+0xc/0xe
>   [<ffffffff814a84af>] page_fault+0x1f/0x30
> 
> Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
> ---
>  include/linux/page_ext.h | 11 ++++++-
>  mm/page_owner.c          | 74 ++++++++++++++++++++++++++++++------------------
>  mm/vmstat.c              |  3 ++
>  3 files changed, 59 insertions(+), 29 deletions(-)
> 
> diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
> index 66ba2bb..0cccc94 100644
> --- a/include/linux/page_ext.h
> +++ b/include/linux/page_ext.h
> @@ -27,12 +27,21 @@ enum page_ext_flags {
>  	PAGE_EXT_DEBUG_POISON,		/* Page is poisoned */
>  	PAGE_EXT_DEBUG_GUARD,
>  	PAGE_EXT_OWNER_ALLOC,
> +	PAGE_EXT_OWNER_FREE,
>  #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
>  	PAGE_EXT_YOUNG,
>  	PAGE_EXT_IDLE,
>  #endif
>  };
>  
> +#ifdef CONFIG_PAGE_OWNER
> +enum page_owner_handles {
> +	PAGE_OWNER_HANDLE_ALLOC,
> +	PAGE_OWNER_HANDLE_FREE,
> +	PAGE_OWNER_HANDLE_MAX
> +};
> +#endif
> +
>  /*
>   * Page Extension can be considered as an extended mem_map.
>   * A page_ext page is associated with every page descriptor. The
> @@ -46,7 +55,7 @@ struct page_ext {
>  	unsigned int order;
>  	gfp_t gfp_mask;
>  	int last_migrate_reason;
> -	depot_stack_handle_t handle;
> +	depot_stack_handle_t handles[PAGE_OWNER_HANDLE_MAX];
>  #endif
>  };
>  
> diff --git a/mm/page_owner.c b/mm/page_owner.c
> index 4acccb7..c431ac4 100644
> --- a/mm/page_owner.c
> +++ b/mm/page_owner.c
> @@ -13,6 +13,11 @@
>  
>  #define PAGE_OWNER_STACK_DEPTH (16)
>  
> +static const char *page_owner_handles_names[PAGE_OWNER_HANDLE_MAX] = {
> +	"page allocated",
> +	"page freed, was allocated",
> +};
> +
>  static bool page_owner_disabled = true;
>  DEFINE_STATIC_KEY_FALSE(page_owner_inited);
>  
> @@ -85,19 +90,6 @@ struct page_ext_operations page_owner_ops = {
>  	.init = init_page_owner,
>  };
>  
> -void __page_owner_free_pages(struct page *page, unsigned int order)
> -{
> -	int i;
> -	struct page_ext *page_ext;
> -
> -	for (i = 0; i < (1 << order); i++) {
> -		page_ext = lookup_page_ext(page + i);
> -		if (unlikely(!page_ext))
> -			continue;
> -		__clear_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
> -	}
> -}
> -
>  static inline bool check_recursive_alloc(struct stack_trace *trace,
>  					unsigned long ip)
>  {
> @@ -147,6 +139,23 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
>  	return handle;
>  }
>  
> +void __page_owner_free_pages(struct page *page, unsigned int order)
> +{
> +	int i;
> +	depot_stack_handle_t handle = save_stack(0);
> +
> +	for (i = 0; i < (1 << order); i++) {
> +		struct page_ext *page_ext = lookup_page_ext(page + i);
> +
> +		if (unlikely(!page_ext))
> +			continue;
> +
> +		page_ext->handles[PAGE_OWNER_HANDLE_FREE] = handle;
> +		__set_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags);
> +		__clear_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
> +	}
> +}

I can't find any clear function to PAGE_EXT_OWNER_FREE. Isn't it
intended? If so, why?

> +
>  noinline void __page_owner_alloc_pages(struct page *page, unsigned int order,
>  					gfp_t gfp_mask)
>  {
> @@ -155,7 +164,7 @@ noinline void __page_owner_alloc_pages(struct page *page, unsigned int order,
>  	if (unlikely(!page_ext))
>  		return;
>  
> -	page_ext->handle = save_stack(gfp_mask);
> +	page_ext->handles[PAGE_OWNER_HANDLE_ALLOC] = save_stack(gfp_mask);
>  	page_ext->order = order;
>  	page_ext->gfp_mask = gfp_mask;
>  	page_ext->last_migrate_reason = -1;
> @@ -189,6 +198,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
>  {
>  	struct page_ext *old_ext = lookup_page_ext(oldpage);
>  	struct page_ext *new_ext = lookup_page_ext(newpage);
> +	int i;
>  
>  	if (unlikely(!old_ext || !new_ext))
>  		return;
> @@ -196,7 +206,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
>  	new_ext->order = old_ext->order;
>  	new_ext->gfp_mask = old_ext->gfp_mask;
>  	new_ext->last_migrate_reason = old_ext->last_migrate_reason;
> -	new_ext->handle = old_ext->handle;
> +
> +	for (i = 0; i < PAGE_OWNER_HANDLE_MAX; i++)
> +		new_ext->handles[i] = old_ext->handles[i];
>  
>  	/*
>  	 * We don't clear the bit on the oldpage as it's going to be freed
> @@ -292,7 +304,7 @@ void __dump_page_owner(struct page *page)
>  	};
>  	depot_stack_handle_t handle;
>  	gfp_t gfp_mask;
> -	int mt;
> +	int mt, i;
>  
>  	if (unlikely(!page_ext)) {
>  		pr_alert("There is not page extension available.\n");
> @@ -301,25 +313,31 @@ void __dump_page_owner(struct page *page)
>  	gfp_mask = page_ext->gfp_mask;
>  	mt = gfpflags_to_migratetype(gfp_mask);
>  
> -	if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags)) {
> +	if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags) &&
> +			!test_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags)) {
>  		pr_alert("page_owner info is not active (free page?)\n");
>  		return;
>  	}
>  
> -	handle = READ_ONCE(page_ext->handle);
> -	if (!handle) {
> -		pr_alert("page_owner info is not active (free page?)\n");
> -		return;
> -	}
> +	for (i = 0; i < PAGE_OWNER_HANDLE_MAX; i++) {
> +		handle = READ_ONCE(page_ext->handles[i]);
> +		if (!handle) {
> +			pr_alert("page_owner info is not active for `%s'\n",
> +					page_owner_handles_names[i]);
> +			continue;
> +		}
>  
> -	depot_fetch_stack(handle, &trace);
> -	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
> -		 page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
> -	print_stack_trace(&trace, 0);
> +		depot_fetch_stack(handle, &trace);
> +		pr_alert("%s via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
> +				page_owner_handles_names[i], page_ext->order,
> +				migratetype_names[mt], gfp_mask, &gfp_mask);
> +		print_stack_trace(&trace, 0);
>  
> -	if (page_ext->last_migrate_reason != -1)
> +		if (page_ext->last_migrate_reason == -1)
> +			continue;
>  		pr_alert("page has been migrated, last migrate reason: %s\n",
>  			migrate_reason_names[page_ext->last_migrate_reason]);
> +	}
>  }
>  
>  static ssize_t
> @@ -381,7 +399,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
>  		 * Access to page_ext->handle isn't synchronous so we should
>  		 * be careful to access it.
>  		 */
> -		handle = READ_ONCE(page_ext->handle);
> +		handle = READ_ONCE(page_ext->handles[PAGE_OWNER_HANDLE_ALLOC]);
>  		if (!handle)
>  			continue;
>  
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 63ef65f..4ff0135 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -1073,6 +1073,9 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
>  			if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags))
>  				continue;
>  
> +			if (!test_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags))
> +				continue;
> +

I don't think this line is correct. Above PAGE_EXT_OWNER_ALLOC
check is to find allocated page. What means of PAGE_EXT_OWNER_FREE
check?

Thanks.

>  			page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
>  			if (pageblock_mt != page_mt) {
>  				if (is_migrate_cma(pageblock_mt))
> -- 
> 2.9.0.37.g6d523a3
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [RFC][PATCH v2 3/3] mm/page_owner: track page free call chain
  2016-07-11  6:21   ` Joonsoo Kim
@ 2016-07-11  7:24     ` Sergey Senozhatsky
  0 siblings, 0 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2016-07-11  7:24 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Sergey Senozhatsky, Andrew Morton, Vlastimil Babka, linux-mm,
	linux-kernel, Sergey Senozhatsky

On (07/11/16 15:21), Joonsoo Kim wrote:
[..]
> > +void __page_owner_free_pages(struct page *page, unsigned int order)
> > +{
> > +	int i;
> > +	depot_stack_handle_t handle = save_stack(0);
> > +
> > +	for (i = 0; i < (1 << order); i++) {
> > +		struct page_ext *page_ext = lookup_page_ext(page + i);
> > +
> > +		if (unlikely(!page_ext))
> > +			continue;
> > +
> > +		page_ext->handles[PAGE_OWNER_HANDLE_FREE] = handle;
> > +		__set_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags);
> > +		__clear_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
> > +	}
> > +}
> 
> I can't find any clear function to PAGE_EXT_OWNER_FREE. Isn't it
> intended? If so, why?

the PAGE_EXT_OWNER_FREE bit is not heavily used now. the
only place is this test in __dump_page_owner()

	if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags) &&
			!test_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags)) {
		pr_alert("page_owner info is not active (free page?)\n");
		return;
	}

other than that it's for symmetry/future use.

[..]
> > @@ -1073,6 +1073,9 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
> >  			if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags))
> >  				continue;
> >  
> > +			if (!test_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags))
> > +				continue;
> > +
> 
> I don't think this line is correct. Above PAGE_EXT_OWNER_ALLOC
> check is to find allocated page.

you are right. that PAGE_EXT_OWNER_FREE test is wrong, indeed.
thanks for spotting.

	-ss

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 3/3] mm/page_owner: track page free call chain
  2016-07-08 12:11 ` [RFC][PATCH v2 3/3] mm/page_owner: track page free call chain Sergey Senozhatsky
  2016-07-11  6:21   ` Joonsoo Kim
@ 2016-07-11 14:27   ` Sergey Senozhatsky
  1 sibling, 0 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2016-07-11 14:27 UTC (permalink / raw)
  To: Joonsoo Kim
  Cc: Andrew Morton, Vlastimil Babka, linux-mm, linux-kernel,
	Sergey Senozhatsky, Sergey Senozhatsky

Extend page_owner with free_pages() tracking functionality. This adds to the
dump_page_owner() output an additional backtrace, that tells us what path has
freed the page.

Aa a trivial example, let's assume that do_some_foo() has an error - an extra
put_page() on error return path, and the function is also getting preempted,
letting some other task to allocate the same page, which is then 'mistakenly'
getting freed once again by do_some_foo().

CPUA					CPUB

void do_some_foo(void)
{
	page = alloc_page();
	if (error) {
		put_page(page);
		goto out;
	}
	...
out:
	<<preempted>>
					void do_some_bar()
					{
						page = alloc_page();
						...
						<<preempted>>
	...
	put_page(page);
}
						<<use freed page>>
						put_page(page);
					}

With the existing implementation we would see only CPUB's backtrace
from bad_page. The extended page_owner would also report CPUA's
put_page(), which can be a helpful hint.

Backtrace:

 BUG: Bad page state in process cc1  pfn:bae1d
 page:ffffea0002eb8740 count:-1 mapcount:0 mapping:          (null) index:0x0
 flags: 0x4000000000000000()
 page dumped because: nonzero _count
 page allocated via order 0, migratetype Unmovable, gfp_mask 0x2000200(GFP_NOWAIT|__GFP_NOWARN)
  [<ffffffff8101bc9c>] save_stack_trace+0x26/0x41
  [<ffffffff81110fe4>] save_stack+0x46/0xc3
  [<ffffffff81111481>] __page_owner_alloc_pages+0x24/0x41
  [<ffffffff810c9867>] post_alloc_hook+0x1e/0x20
  [<ffffffff810ca63d>] get_page_from_freelist+0x4fd/0x756
  [<ffffffff810cadea>] __alloc_pages_nodemask+0xe7/0xbcf
  [<ffffffff810cb8e4>] __get_free_pages+0x12/0x40
  [<ffffffff810e6b64>] __tlb_remove_page_size.part.12+0x37/0x78
  [<ffffffff810e6d9b>] __tlb_remove_page_size+0x21/0x23
  [<ffffffff810e7ff2>] unmap_page_range+0x63a/0x75b
  [<ffffffff810e81cf>] unmap_single_vma+0xbc/0xc6
  [<ffffffff810e82d2>] unmap_vmas+0x35/0x44
  [<ffffffff810ee6f4>] exit_mmap+0x5a/0xec
  [<ffffffff810385b4>] mmput+0x4a/0xdc
  [<ffffffff8103dff7>] do_exit+0x398/0x8de
  [<ffffffff8103e5ae>] do_group_exit+0x45/0xb0
 page freed, was allocated via order 0, migratetype Unmovable, gfp_mask 0x2000200(GFP_NOWAIT|__GFP_NOWARN)
  [<ffffffff8101bc9c>] save_stack_trace+0x26/0x41
  [<ffffffff81110fe4>] save_stack+0x46/0xc3
  [<ffffffff81111411>] __page_owner_free_pages+0x25/0x71
  [<ffffffff810c9f0a>] free_hot_cold_page+0x1d6/0x1ea
  [<ffffffff810d03e1>] __put_page+0x37/0x3a
  [<ffffffff8115b8da>] do_some_foo()+0x8a/0x8e
	...
 Modules linked in: ....
 CPU: 3 PID: 1274 Comm: cc1 Not tainted 4.7.0-rc5-next-20160701-dbg-00009-ge01494f-dirty #535
  0000000000000000 ffff8800aeea3c18 ffffffff811e67ca ffffea0002eb8740
  ffffffff8175675e ffff8800aeea3c40 ffffffff810c87f5 0000000000000000
  ffffffff81880b40 ffff880137d98438 ffff8800aeea3c50 ffffffff810c88d5
 Call Trace:
  [<ffffffff811e67ca>] dump_stack+0x68/0x92
  [<ffffffff810c87f5>] bad_page+0xf8/0x11e
  [<ffffffff810c88d5>] check_new_page_bad+0x63/0x65
  [<ffffffff810ca36a>] get_page_from_freelist+0x22a/0x756
  [<ffffffff810cadea>] __alloc_pages_nodemask+0xe7/0xbcf
  [<ffffffff81073a43>] ? trace_hardirqs_on_caller+0x16d/0x189
  [<ffffffff810ede8d>] ? vma_merge+0x159/0x249
  [<ffffffff81074aa0>] ? __lock_acquire+0x2ac/0x15c7
  [<ffffffff81034ace>] pte_alloc_one+0x1b/0x67
  [<ffffffff810e922b>] __pte_alloc+0x19/0xa6
  [<ffffffff810eb09f>] handle_mm_fault+0x409/0xc59
  [<ffffffff810309f6>] __do_page_fault+0x1d8/0x3ac
  [<ffffffff81030bf7>] do_page_fault+0xc/0xe
  [<ffffffff814a84af>] page_fault+0x1f/0x30

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
---
 include/linux/page_ext.h | 11 ++++++-
 mm/page_owner.c          | 74 ++++++++++++++++++++++++++++++------------------
 2 files changed, 56 insertions(+), 29 deletions(-)

diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 66ba2bb..0cccc94 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -27,12 +27,21 @@ enum page_ext_flags {
 	PAGE_EXT_DEBUG_POISON,		/* Page is poisoned */
 	PAGE_EXT_DEBUG_GUARD,
 	PAGE_EXT_OWNER_ALLOC,
+	PAGE_EXT_OWNER_FREE,
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 	PAGE_EXT_YOUNG,
 	PAGE_EXT_IDLE,
 #endif
 };
 
+#ifdef CONFIG_PAGE_OWNER
+enum page_owner_handles {
+	PAGE_OWNER_HANDLE_ALLOC,
+	PAGE_OWNER_HANDLE_FREE,
+	PAGE_OWNER_HANDLE_MAX
+};
+#endif
+
 /*
  * Page Extension can be considered as an extended mem_map.
  * A page_ext page is associated with every page descriptor. The
@@ -46,7 +55,7 @@ struct page_ext {
 	unsigned int order;
 	gfp_t gfp_mask;
 	int last_migrate_reason;
-	depot_stack_handle_t handle;
+	depot_stack_handle_t handles[PAGE_OWNER_HANDLE_MAX];
 #endif
 };
 
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 4acccb7..c431ac4 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -13,6 +13,11 @@
 
 #define PAGE_OWNER_STACK_DEPTH (16)
 
+static const char *page_owner_handles_names[PAGE_OWNER_HANDLE_MAX] = {
+	"page allocated",
+	"page freed, was allocated",
+};
+
 static bool page_owner_disabled = true;
 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 
@@ -85,19 +90,6 @@ struct page_ext_operations page_owner_ops = {
 	.init = init_page_owner,
 };
 
-void __page_owner_free_pages(struct page *page, unsigned int order)
-{
-	int i;
-	struct page_ext *page_ext;
-
-	for (i = 0; i < (1 << order); i++) {
-		page_ext = lookup_page_ext(page + i);
-		if (unlikely(!page_ext))
-			continue;
-		__clear_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
-	}
-}
-
 static inline bool check_recursive_alloc(struct stack_trace *trace,
 					unsigned long ip)
 {
@@ -147,6 +139,23 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
 	return handle;
 }
 
+void __page_owner_free_pages(struct page *page, unsigned int order)
+{
+	int i;
+	depot_stack_handle_t handle = save_stack(0);
+
+	for (i = 0; i < (1 << order); i++) {
+		struct page_ext *page_ext = lookup_page_ext(page + i);
+
+		if (unlikely(!page_ext))
+			continue;
+
+		page_ext->handles[PAGE_OWNER_HANDLE_FREE] = handle;
+		__set_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags);
+		__clear_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags);
+	}
+}
+
 noinline void __page_owner_alloc_pages(struct page *page, unsigned int order,
 					gfp_t gfp_mask)
 {
@@ -155,7 +164,7 @@ noinline void __page_owner_alloc_pages(struct page *page, unsigned int order,
 	if (unlikely(!page_ext))
 		return;
 
-	page_ext->handle = save_stack(gfp_mask);
+	page_ext->handles[PAGE_OWNER_HANDLE_ALLOC] = save_stack(gfp_mask);
 	page_ext->order = order;
 	page_ext->gfp_mask = gfp_mask;
 	page_ext->last_migrate_reason = -1;
@@ -189,6 +198,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
 {
 	struct page_ext *old_ext = lookup_page_ext(oldpage);
 	struct page_ext *new_ext = lookup_page_ext(newpage);
+	int i;
 
 	if (unlikely(!old_ext || !new_ext))
 		return;
@@ -196,7 +206,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
 	new_ext->order = old_ext->order;
 	new_ext->gfp_mask = old_ext->gfp_mask;
 	new_ext->last_migrate_reason = old_ext->last_migrate_reason;
-	new_ext->handle = old_ext->handle;
+
+	for (i = 0; i < PAGE_OWNER_HANDLE_MAX; i++)
+		new_ext->handles[i] = old_ext->handles[i];
 
 	/*
 	 * We don't clear the bit on the oldpage as it's going to be freed
@@ -292,7 +304,7 @@ void __dump_page_owner(struct page *page)
 	};
 	depot_stack_handle_t handle;
 	gfp_t gfp_mask;
-	int mt;
+	int mt, i;
 
 	if (unlikely(!page_ext)) {
 		pr_alert("There is not page extension available.\n");
@@ -301,25 +313,31 @@ void __dump_page_owner(struct page *page)
 	gfp_mask = page_ext->gfp_mask;
 	mt = gfpflags_to_migratetype(gfp_mask);
 
-	if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags)) {
+	if (!test_bit(PAGE_EXT_OWNER_ALLOC, &page_ext->flags) &&
+			!test_bit(PAGE_EXT_OWNER_FREE, &page_ext->flags)) {
 		pr_alert("page_owner info is not active (free page?)\n");
 		return;
 	}
 
-	handle = READ_ONCE(page_ext->handle);
-	if (!handle) {
-		pr_alert("page_owner info is not active (free page?)\n");
-		return;
-	}
+	for (i = 0; i < PAGE_OWNER_HANDLE_MAX; i++) {
+		handle = READ_ONCE(page_ext->handles[i]);
+		if (!handle) {
+			pr_alert("page_owner info is not active for `%s'\n",
+					page_owner_handles_names[i]);
+			continue;
+		}
 
-	depot_fetch_stack(handle, &trace);
-	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
-		 page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
-	print_stack_trace(&trace, 0);
+		depot_fetch_stack(handle, &trace);
+		pr_alert("%s via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
+				page_owner_handles_names[i], page_ext->order,
+				migratetype_names[mt], gfp_mask, &gfp_mask);
+		print_stack_trace(&trace, 0);
 
-	if (page_ext->last_migrate_reason != -1)
+		if (page_ext->last_migrate_reason == -1)
+			continue;
 		pr_alert("page has been migrated, last migrate reason: %s\n",
 			migrate_reason_names[page_ext->last_migrate_reason]);
+	}
 }
 
 static ssize_t
@@ -381,7 +399,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 		 * Access to page_ext->handle isn't synchronous so we should
 		 * be careful to access it.
 		 */
-		handle = READ_ONCE(page_ext->handle);
+		handle = READ_ONCE(page_ext->handles[PAGE_OWNER_HANDLE_ALLOC]);
 		if (!handle)
 			continue;
 
-- 
2.9.0.243.g5c589a7

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2016-07-11 14:28 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-08 12:11 [RFC][PATCH v2 0/3] mm/page:_owner: track page free call chain Sergey Senozhatsky
2016-07-08 12:11 ` [RFC][PATCH v2 1/3] mm/page_owner: rename page_owner functions Sergey Senozhatsky
2016-07-08 12:11 ` [RFC][PATCH v2 2/3] mm/page_owner: rename PAGE_EXT_OWNER flag Sergey Senozhatsky
2016-07-08 12:11 ` [RFC][PATCH v2 3/3] mm/page_owner: track page free call chain Sergey Senozhatsky
2016-07-11  6:21   ` Joonsoo Kim
2016-07-11  7:24     ` Sergey Senozhatsky
2016-07-11 14:27   ` [PATCH " Sergey Senozhatsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).