All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner
@ 2022-03-22  3:22 Yinan Zhang
  2022-03-22  3:22 ` [PATCH 2/2] mm/vmalloc.c: record the allocator in page_owner when __vmalloc_area_node complete mapping pages to virtual address Yinan Zhang
  2022-03-22  3:28 ` [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner Matthew Wilcox
  0 siblings, 2 replies; 4+ messages in thread
From: Yinan Zhang @ 2022-03-22  3:22 UTC (permalink / raw)
  To: akpm
  Cc: willy, vbabka, william.kucharski, linux-kernel, linux-mm,
	yejiajian2018, hanshenghong2019, caoyixuan2019, zhaochongxi2019,
	yuhongf, Yinan Zhang

An application's memory consumption is high and keeps increasing,
then it is suspected of having memory leak. There are several
commonly used memory allocators: slab, cma, vmalloc, etc. The memory
leak identification can be speed up if page information allocated
by an individual allocator are analyzed individually. This patch
introduce vmalloc allocator for page_owner.

Following adjustments are made:
1) Add a member variable "allocator" to the page_owner struct.
And the value of "allocator" is predefined in a newly added string
array "allocator_name".
2) Add a function __set_page_owner_allocator() to record allocator
name in variable "allocator".
3) Add allocator name in the output of print_page_owner().

This work is coauthored by
	Shenghong Han
	Yixuan Cao
	Chongxi Zhao
	Jiajian Ye
	Yuhong Feng
	Yongqiang Liu

Signed-off-by: Yinan Zhang <zhangyinan2019@email.szu.edu.cn>
---
 include/linux/page_owner.h | 18 ++++++++++++++++++
 mm/page_owner.c            | 29 +++++++++++++++++++++++++++--
 2 files changed, 45 insertions(+), 2 deletions(-)

diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 119a0c9d2a8b..d559781dde67 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -11,6 +11,8 @@ extern struct page_ext_operations page_owner_ops;
 extern void __reset_page_owner(struct page *page, unsigned short order);
 extern void __set_page_owner(struct page *page,
 			unsigned short order, gfp_t gfp_mask);
+extern void __set_page_owner_allocator(struct page *page, unsigned short order,
+			unsigned short allocator);
 extern void __split_page_owner(struct page *page, unsigned int nr);
 extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
@@ -18,6 +20,11 @@ extern void __dump_page_owner(const struct page *page);
 extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 					pg_data_t *pgdat, struct zone *zone);
 
+enum page_owner_allocator {
+	PAGE_OWNER_ALLOCATOR_UNKNOWN = 0,
+	PAGE_OWNER_ALLOCATOR_VMALLOC
+};
+
 static inline void reset_page_owner(struct page *page, unsigned short order)
 {
 	if (static_branch_unlikely(&page_owner_inited))
@@ -31,6 +38,13 @@ static inline void set_page_owner(struct page *page,
 		__set_page_owner(page, order, gfp_mask);
 }
 
+static inline void set_page_owner_allocator(struct page *page, unsigned short order,
+			unsigned short allocator)
+{
+	if (static_branch_unlikely(&page_owner_inited))
+		__set_page_owner_allocator(page, order, allocator);
+}
+
 static inline void split_page_owner(struct page *page, unsigned int nr)
 {
 	if (static_branch_unlikely(&page_owner_inited))
@@ -59,6 +73,10 @@ static inline void set_page_owner(struct page *page,
 			unsigned int order, gfp_t gfp_mask)
 {
 }
+static inline void set_page_owner_allocator(struct page *page, unsigned short order,
+			unsigned short allocator)
+{
+}
 static inline void split_page_owner(struct page *page,
 			unsigned short order)
 {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 0a9588506571..11bb805c61fd 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -32,6 +32,12 @@ struct page_owner {
 	char comm[TASK_COMM_LEN];
 	pid_t pid;
 	pid_t tgid;
+	unsigned short allocator;
+};
+
+const char * const allocator_name[] = {
+	"unknown",
+	"vmalloc",
 };
 
 static bool page_owner_enabled = false;
@@ -148,6 +154,7 @@ void __reset_page_owner(struct page *page, unsigned short order)
 		page_owner = get_page_owner(page_ext);
 		page_owner->free_handle = handle;
 		page_owner->free_ts_nsec = free_ts_nsec;
+		page_owner->allocator = PAGE_OWNER_ALLOCATOR_UNKNOWN;
 		page_ext = page_ext_next(page_ext);
 	}
 }
@@ -190,6 +197,22 @@ noinline void __set_page_owner(struct page *page, unsigned short order,
 	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
 }
 
+void __set_page_owner_allocator(struct page *page, unsigned short order, unsigned short allocator)
+{
+	int i;
+	struct page_ext *page_ext;
+	struct page_owner *page_owner;
+
+	page_ext = lookup_page_ext(page);
+	if (unlikely(!page_ext))
+		return;
+	for (i = 0; i < (1 << order); i++) {
+		page_owner = get_page_owner(page_ext);
+		page_owner->allocator = allocator;
+		page_ext = page_ext_next(page_ext);
+	}
+}
+
 void __set_page_owner_migrate_reason(struct page *page, int reason)
 {
 	struct page_ext *page_ext = lookup_page_ext(page);
@@ -238,6 +261,7 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
 	new_page_owner->tgid = old_page_owner->tgid;
 	new_page_owner->ts_nsec = old_page_owner->ts_nsec;
 	new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
+	new_page_owner->allocator = old_page_owner->allocator;
 	strcpy(new_page_owner->comm, old_page_owner->comm);
 
 	/*
@@ -386,11 +410,12 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
 		return -ENOMEM;
 
 	ret = scnprintf(kbuf, count,
-			"Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n",
+			"Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns, allocator %s\n",
 			page_owner->order, page_owner->gfp_mask,
 			&page_owner->gfp_mask, page_owner->pid,
 			page_owner->tgid, page_owner->comm,
-			page_owner->ts_nsec, page_owner->free_ts_nsec);
+			page_owner->ts_nsec, page_owner->free_ts_nsec,
+			allocator_name[page_owner->allocator]);
 
 	/* Print information relevant to grouping pages by mobility */
 	pageblock_mt = get_pageblock_migratetype(page);
-- 
2.25.1




^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] mm/vmalloc.c: record the allocator in page_owner when __vmalloc_area_node complete mapping pages to virtual address
  2022-03-22  3:22 [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner Yinan Zhang
@ 2022-03-22  3:22 ` Yinan Zhang
  2022-03-22  3:28 ` [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner Matthew Wilcox
  1 sibling, 0 replies; 4+ messages in thread
From: Yinan Zhang @ 2022-03-22  3:22 UTC (permalink / raw)
  To: akpm
  Cc: willy, vbabka, william.kucharski, linux-kernel, linux-mm,
	yejiajian2018, hanshenghong2019, caoyixuan2019, zhaochongxi2019,
	yuhongf, Yinan Zhang

page owner is for the tracking about who allocated each page.
Recording the allocator in page_owner requires support from vmalloc.

When __vmalloc_area_node complete mapping pages to virtual address,
it means that the memory allocation is successful. At this time,
call set_page_owner_allocator() to record the
allocator (in this patch, it's vmalloc) in page_owner.

This work is coauthored by
	Shenghong Han
	Yixuan Cao
	Chongxi Zhao
	Jiajian Ye
	Yuhong Feng
	Yongqiang Liu

Signed-off-by: Yinan Zhang <zhangyinan2019@email.szu.edu.cn>
---
 mm/vmalloc.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b454cf1a261f..c561d40a3f95 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -40,6 +40,7 @@
 #include <linux/uaccess.h>
 #include <linux/hugetlb.h>
 #include <linux/sched/mm.h>
+#include <linux/page_owner.h>
 #include <asm/tlbflush.h>
 #include <asm/shmparam.h>
 
@@ -3051,6 +3052,13 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 		goto fail;
 	}
 
+#ifdef CONFIG_PAGE_OWNER
+	int i;
+
+	for (i = 0; i < area->nr_pages; i++)
+		set_page_owner_allocator(area->pages[i], 0, PAGE_OWNER_ALLOCATOR_VMALLOC);
+#endif
+
 	return area->addr;
 
 fail:
-- 
2.25.1




^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner
  2022-03-22  3:22 [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner Yinan Zhang
  2022-03-22  3:22 ` [PATCH 2/2] mm/vmalloc.c: record the allocator in page_owner when __vmalloc_area_node complete mapping pages to virtual address Yinan Zhang
@ 2022-03-22  3:28 ` Matthew Wilcox
       [not found]   ` <299aef9a-dd57-2197-f3cf-6b08c441f755@email.szu.edu.cn>
  1 sibling, 1 reply; 4+ messages in thread
From: Matthew Wilcox @ 2022-03-22  3:28 UTC (permalink / raw)
  To: Yinan Zhang
  Cc: akpm, vbabka, william.kucharski, linux-kernel, linux-mm,
	yejiajian2018, hanshenghong2019, caoyixuan2019, zhaochongxi2019,
	yuhongf

On Tue, Mar 22, 2022 at 11:22:24AM +0800, Yinan Zhang wrote:
> An application's memory consumption is high and keeps increasing,
> then it is suspected of having memory leak. There are several
> commonly used memory allocators: slab, cma, vmalloc, etc. The memory
> leak identification can be speed up if page information allocated
> by an individual allocator are analyzed individually. This patch
> introduce vmalloc allocator for page_owner.

Why is /proc/vmallocinfo not enough?

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner
       [not found]   ` <299aef9a-dd57-2197-f3cf-6b08c441f755@email.szu.edu.cn>
@ 2022-03-22 15:10     ` Vlastimil Babka
  0 siblings, 0 replies; 4+ messages in thread
From: Vlastimil Babka @ 2022-03-22 15:10 UTC (permalink / raw)
  To: Yinan Zhang, Matthew Wilcox
  Cc: akpm, william.kucharski, linux-kernel, linux-mm, yejiajian2018,
	hanshenghong2019, caoyixuan2019, zhaochongxi2019, yuhongf

On 3/22/22 15:48, Yinan Zhang wrote:
> on 2022/3/22 11:28, Matthew Wilcox wrote:
> 
>> On Tue, Mar 22, 2022 at 11:22:24AM +0800, Yinan Zhang wrote:
>>> An application's memory consumption is high and keeps increasing,
>>> then it is suspected of having memory leak. There are several
>>> commonly used memory allocators: slab, cma, vmalloc, etc. The memory
>>> leak identification can be speed up if page information allocated
>>> by an individual allocator are analyzed individually. This patch
>>> introduce vmalloc allocator for page_owner.
>> Why is /proc/vmallocinfo not enough?
> 
> There is only one owner information in /proc/vmallocinfo: caller.
> While there are plenty of owner information in page owner,
> besides caller, it includes task id (pid), process id (tgid),
> and other useful information, which enables us for more comprehensive
> page allocation/release relevant statistics and analysis.
> 
> Adding allocator infomation can augment page owner for tracking about
> who allocates each page. And this patch adds vmalloc allocator infomation.

Hm but page_owner has the whole stack trace of allocation so it should be
easy to determine vmalloc from slab etc? How exactly would this extra field
help?



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-03-22 15:10 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-22  3:22 [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner Yinan Zhang
2022-03-22  3:22 ` [PATCH 2/2] mm/vmalloc.c: record the allocator in page_owner when __vmalloc_area_node complete mapping pages to virtual address Yinan Zhang
2022-03-22  3:28 ` [PATCH 1/2] mm/page_owner.c: introduce vmalloc allocator for page_owner Matthew Wilcox
     [not found]   ` <299aef9a-dd57-2197-f3cf-6b08c441f755@email.szu.edu.cn>
2022-03-22 15:10     ` Vlastimil Babka

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.