All of lore.kernel.org
 help / color / mirror / Atom feed
From: "zhaoyang.huang" <zhaoyang.huang@unisoc.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Vlastimil Babka <vbabka@suse.cz>,
	Zhaoyang Huang <huangzhaoyang@gmail.com>, <linux-mm@kvack.org>,
	<linux-kernel@vger.kernel.org>, <ke.wang@unisoc.com>,
	<steve.kang@unisoc.com>
Subject: [RFC PATCH] mm: introduce accounting of page_owner via backtrace
Date: Wed, 2 Nov 2022 20:06:21 +0800	[thread overview]
Message-ID: <1667390781-17515-1-git-send-email-zhaoyang.huang@unisoc.com> (raw)

From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>

Page_owner could be accounted and sort via its backtrace, which could
simplify the output.

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
 include/linux/page_owner.h |   7 +++
 mm/page_owner.c            | 113 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 120 insertions(+)

diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 119a0c9..c86a342 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -3,6 +3,13 @@
 #define __LINUX_PAGE_OWNER_H
 
 #include <linux/jump_label.h>
+#include <linux/stackdepot.h>
+
+struct hash_object {
+	int count;
+	depot_stack_handle_t trace_hash;
+	struct rb_node rb_node;
+};
 
 #ifdef CONFIG_PAGE_OWNER
 extern struct static_key_false page_owner_inited;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index e4c6f3f..3d014fd 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -42,6 +42,12 @@ struct page_owner {
 static depot_stack_handle_t early_handle;
 
 static void init_early_allocated_pages(void);
+static void store_user_hash(depot_stack_handle_t trace_hash, int count);
+static DEFINE_SPINLOCK(stack_hash_lock);
+static struct hash_object ho_pool[8192];
+static int ho_pool_free_count = ARRAY_SIZE(ho_pool);
+struct rb_root user_hash_root = RB_ROOT;
+
 
 static int __init early_page_owner_param(char *buf)
 {
@@ -152,6 +158,8 @@ void __reset_page_owner(struct page *page, unsigned short order)
 		page_owner->free_handle = handle;
 		page_owner->free_ts_nsec = free_ts_nsec;
 		page_ext = page_ext_next(page_ext);
+		if (!i)
+			store_user_hash(page_owner->handle, -(1 << order));
 	}
 }
 
@@ -190,6 +198,7 @@ noinline void __set_page_owner(struct page *page, unsigned short order,
 		return;
 
 	handle = save_stack(gfp_mask);
+	store_user_hash(handle, 1 << order);
 	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
 }
 
@@ -570,6 +579,105 @@ void __dump_page_owner(const struct page *page)
 	return 0;
 }
 
+static void store_user_hash(depot_stack_handle_t trace_hash, int count)
+{
+	struct rb_node *rb_parent;
+	unsigned int trace_hash_parent;
+	struct rb_node **link = &user_hash_root.rb_node;
+	struct hash_object *hash_parent = NULL;
+	struct hash_object *hash_object = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&stack_hash_lock, flags);
+	while (*link) {
+		rb_parent = *link;
+		hash_parent = rb_entry(rb_parent, struct hash_object, rb_node);
+		trace_hash_parent = hash_parent->trace_hash;
+		if (trace_hash < trace_hash_parent)
+			link = &hash_parent->rb_node.rb_left;
+		else if (trace_hash_parent < trace_hash)
+			link = &hash_parent->rb_node.rb_right;
+		else {
+			hash_object = rb_entry(*link, struct hash_object, rb_node);
+			hash_object->count += count;
+			if (!RB_EMPTY_ROOT(&user_hash_root)
+					&& !hash_object->count) {
+				rb_erase(&hash_object->rb_node, &user_hash_root);
+			}
+			spin_unlock_irqrestore(&stack_hash_lock, flags);
+			return;
+		}
+	}
+	/*
+	 * hash_object is the 1st node represent this trace_hash
+	 * insert it to user_hash_root
+	 */
+	hash_object = ho_pool_free_count ? &ho_pool[--ho_pool_free_count] : NULL;
+	if (!hash_object) {
+		spin_unlock_irqrestore(&stack_hash_lock, flags);
+		return;
+	}
+	hash_object->trace_hash = trace_hash;
+	/* add the node to rb tree*/
+	rb_link_node(&hash_object->rb_node, rb_parent, link);
+	rb_insert_color(&hash_object->rb_node, &user_hash_root);
+	spin_unlock_irqrestore(&stack_hash_lock, flags);
+	return;
+}
+static ssize_t print_owner_stack(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct hash_object *object;
+	struct rb_node *rb;
+	unsigned long *entries;
+	unsigned int nr_entries;
+	char *kbuf;
+	int i, ret = 0;
+	unsigned long flags;
+
+	count = min_t(size_t, count, PAGE_SIZE);
+	kbuf = kmalloc(count, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	rb = file->private_data ? (struct rb_node *)file->private_data : rb_first(&user_hash_root);
+	if(!rb)
+		return 0;
+	spin_lock_irqsave(&stack_hash_lock, flags);
+	object = rb_entry(rb, struct hash_object, rb_node);
+	if (object) {
+		while (!object->trace_hash || object->count <= 0) {
+			rb = rb_next(rb);
+			object = rb ? rb_entry(rb, struct hash_object, rb_node) : NULL;
+			if (!object) {
+				spin_unlock_irqrestore(&stack_hash_lock, flags);
+				kfree(kbuf);
+				return 0;
+			}
+		}
+		nr_entries = stack_depot_fetch(object->trace_hash, &entries);
+		ret += snprintf(kbuf + ret, count - ret, "count %d\n", object->count);
+		if (ret >= count)
+			goto err;
+		for (i = 0; i < nr_entries; i++) {
+			void *ptr = (void *)entries[i];
+			ret += snprintf(kbuf + ret, count - ret, "    [<%p>] %pS\n", ptr, ptr);
+			if (ret >= count)
+				goto err;
+		}
+	}
+	file->private_data = (void *)rb_next(rb);
+	spin_unlock_irqrestore(&stack_hash_lock, flags);
+	if (copy_to_user(buf, kbuf, ret))
+		ret = -EFAULT;
+
+	kfree(kbuf);
+	return ret;
+err:
+	spin_unlock_irqrestore(&stack_hash_lock, flags);
+	kfree(kbuf);
+	return -ENOMEM;
+}
+
 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 {
 	unsigned long pfn = zone->zone_start_pfn;
@@ -661,6 +769,9 @@ static void init_early_allocated_pages(void)
 static const struct file_operations proc_page_owner_operations = {
 	.read		= read_page_owner,
 };
+static const struct file_operations proc_page_owner_simple_operations = {
+	.read		= print_owner_stack,
+};
 
 static int __init pageowner_init(void)
 {
@@ -671,6 +782,8 @@ static int __init pageowner_init(void)
 
 	debugfs_create_file("page_owner", 0400, NULL, NULL,
 			    &proc_page_owner_operations);
+	debugfs_create_file("page_owner_simple", 0400, NULL, NULL,
+			    &proc_page_owner_simple_operations);
 
 	return 0;
 }
-- 
1.9.1


             reply	other threads:[~2022-11-02 12:07 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-02 12:06 zhaoyang.huang [this message]
2022-11-02 12:35 ` [RFC PATCH] mm: introduce accounting of page_owner via backtrace Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1667390781-17515-1-git-send-email-zhaoyang.huang@unisoc.com \
    --to=zhaoyang.huang@unisoc.com \
    --cc=akpm@linux-foundation.org \
    --cc=huangzhaoyang@gmail.com \
    --cc=ke.wang@unisoc.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=steve.kang@unisoc.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.