linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: split thp synchronously on MADV_DONTNEED
@ 2021-11-20 20:12 Shakeel Butt
  2021-11-21  4:35 ` Matthew Wilcox
                   ` (3 more replies)
  0 siblings, 4 replies; 32+ messages in thread
From: Shakeel Butt @ 2021-11-20 20:12 UTC (permalink / raw)
  To: David Hildenbrand, Kirill A . Shutemov, Yang Shi, Zi Yan, Matthew Wilcox
  Cc: Andrew Morton, linux-mm, linux-kernel, Shakeel Butt

Many applications do sophisticated management of their heap memory for
better performance but with low cost. We have a bunch of such
applications running on our production and examples include caching and
data storage services. These applications keep their hot data on the
THPs for better performance and release the cold data through
MADV_DONTNEED to keep the memory cost low.

The kernel defers the split and release of THPs until there is memory
pressure. This causes complicates the memory management of these
sophisticated applications which then needs to look into low level
kernel handling of THPs to better gauge their headroom for expansion. In
addition these applications are very latency sensitive and would prefer
to not face memory reclaim due to non-deterministic nature of reclaim.

This patch let such applications not worry about the low level handling
of THPs in the kernel and splits the THPs synchronously on
MADV_DONTNEED.

Signed-off-by: Shakeel Butt <shakeelb@google.com>
---
 include/linux/mmzone.h   |  5 ++++
 include/linux/sched.h    |  4 ++++
 include/linux/sched/mm.h | 11 +++++++++
 kernel/fork.c            |  3 +++
 mm/huge_memory.c         | 50 ++++++++++++++++++++++++++++++++++++++++
 mm/madvise.c             |  8 +++++++
 6 files changed, 81 insertions(+)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 58e744b78c2c..7fa0035128b9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -795,6 +795,11 @@ struct deferred_split {
 	struct list_head split_queue;
 	unsigned long split_queue_len;
 };
+void split_local_deferred_list(struct list_head *defer_list);
+#else
+static inline void split_local_deferred_list(struct list_head *defer_list)
+{
+}
 #endif
 
 /*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9d27fd0ce5df..a984bb6509d9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1412,6 +1412,10 @@ struct task_struct {
 	struct mem_cgroup		*active_memcg;
 #endif
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	struct list_head		*deferred_split_list;
+#endif
+
 #ifdef CONFIG_BLK_CGROUP
 	struct request_queue		*throttle_queue;
 #endif
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index fdf742e15e27..9b438c7e811e 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -374,6 +374,17 @@ set_active_memcg(struct mem_cgroup *memcg)
 }
 #endif
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void set_local_deferred_list(struct list_head *list)
+{
+	current->deferred_split_list = list;
+}
+#else
+static inline void set_local_deferred_list(struct list_head *list)
+{
+}
+#endif
+
 #ifdef CONFIG_MEMBARRIER
 enum {
 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY		= (1U << 0),
diff --git a/kernel/fork.c b/kernel/fork.c
index 01af6129aa38..8197b8ed4b7a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1019,6 +1019,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 
 #ifdef CONFIG_MEMCG
 	tsk->active_memcg = NULL;
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	tsk->deferred_split_list = NULL;
 #endif
 	return tsk;
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e5483347291c..2f73eeecb857 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2754,6 +2754,7 @@ void free_transhuge_page(struct page *page)
 void deferred_split_huge_page(struct page *page)
 {
 	struct deferred_split *ds_queue = get_deferred_split_queue(page);
+	struct list_head *list = current->deferred_split_list;
 #ifdef CONFIG_MEMCG
 	struct mem_cgroup *memcg = page_memcg(compound_head(page));
 #endif
@@ -2774,7 +2775,14 @@ void deferred_split_huge_page(struct page *page)
 	if (PageSwapCache(page))
 		return;
 
+	if (list && list_empty(page_deferred_list(page))) {
+		/* corresponding put in split_local_deferred_list. */
+		get_page(page);
+		list_add(page_deferred_list(page), list);
+	}
+
 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+
 	if (list_empty(page_deferred_list(page))) {
 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
 		list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
@@ -2801,6 +2809,48 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
 	return READ_ONCE(ds_queue->split_queue_len);
 }
 
+void split_local_deferred_list(struct list_head *defer_list)
+{
+	struct list_head *pos, *next;
+	struct page *page;
+
+	/* First iteration for split. */
+	list_for_each_safe(pos, next, defer_list) {
+		page = list_entry((void *)pos, struct page, deferred_list);
+		page = compound_head(page);
+
+		if (!trylock_page(page))
+			continue;
+
+		if (split_huge_page(page)) {
+			unlock_page(page);
+			continue;
+		}
+		/* split_huge_page() removes page from list on success */
+		unlock_page(page);
+
+		/* corresponding get in deferred_split_huge_page. */
+		put_page(page);
+	}
+
+	/* Second iteration to putback failed pages. */
+	list_for_each_safe(pos, next, defer_list) {
+		struct deferred_split *ds_queue;
+		unsigned long flags;
+
+		page = list_entry((void *)pos, struct page, deferred_list);
+		page = compound_head(page);
+		ds_queue = get_deferred_split_queue(page);
+
+		spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+		list_move(page_deferred_list(page), &ds_queue->split_queue);
+		spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+
+		/* corresponding get in deferred_split_huge_page. */
+		put_page(page);
+	}
+}
+
 static unsigned long deferred_split_scan(struct shrinker *shrink,
 		struct shrink_control *sc)
 {
diff --git a/mm/madvise.c b/mm/madvise.c
index 8c927202bbe6..15614115e359 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -762,7 +762,15 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
 					unsigned long start, unsigned long end)
 {
+	LIST_HEAD(list);
+
+	set_local_deferred_list(&list);
+
 	zap_page_range(vma, start, end - start);
+
+	set_local_deferred_list(NULL);
+	split_local_deferred_list(&list);
+
 	return 0;
 }
 
-- 
2.34.0.rc2.393.gf8c9666880-goog


^ permalink raw reply related	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2022-01-24 18:49 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-20 20:12 [PATCH] mm: split thp synchronously on MADV_DONTNEED Shakeel Butt
2021-11-21  4:35 ` Matthew Wilcox
2021-11-21  5:25   ` Shakeel Butt
2021-11-22  0:50 ` Kirill A. Shutemov
2021-11-22  3:42   ` Shakeel Butt
2021-11-22  4:56 ` Matthew Wilcox
2021-11-22  9:19   ` David Hildenbrand
2021-12-08 13:23     ` Pankaj Gupta
2021-11-22  8:32 ` David Hildenbrand
2021-11-22 18:40   ` Shakeel Butt
2021-11-22 18:59     ` David Hildenbrand
2021-11-23  1:20       ` Shakeel Butt
2021-11-23 16:56         ` David Hildenbrand
2021-11-23 17:17           ` Shakeel Butt
2021-11-23 17:20             ` David Hildenbrand
2021-11-23 17:24               ` Shakeel Butt
2021-11-23 17:26                 ` David Hildenbrand
2021-11-23 17:28                   ` Shakeel Butt
2021-11-25 10:09                     ` Peter Xu
2021-11-25 17:14                       ` Shakeel Butt
2021-11-26  0:00                         ` Peter Xu
2021-11-25 10:24     ` Peter Xu
2021-11-25 10:32       ` David Hildenbrand
2021-11-26  2:52         ` Peter Xu
2021-11-26  9:04           ` David Hildenbrand
2021-11-29 22:00             ` Yang Shi
2021-11-26  3:21       ` Shakeel Butt
2021-11-26  4:12         ` Peter Xu
2021-11-26  9:16           ` David Hildenbrand
2021-11-26  9:39             ` Peter Xu
2021-11-29 21:32             ` Yang Shi
2022-01-24 18:48           ` David Rientjes

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).