All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC] mm/vmscan: add periodic slab shrinker
@ 2022-04-02  7:21 Hillf Danton
  2022-04-02 17:54 ` Roman Gushchin
  0 siblings, 1 reply; 22+ messages in thread
From: Hillf Danton @ 2022-04-02  7:21 UTC (permalink / raw)
  To: MM
  Cc: Matthew Wilcox, Dave Chinner, Mel Gorman, Stephen Brennan,
	Yu Zhao, David Hildenbrand, LKML

To mitigate the pain of having "several millions" of negative dentries in
a single directory [1] for example, add the periodic slab shrinker that
runs independent of direct and background reclaimers in bid to recycle the
slab objects that haven been cold for more than 30 seconds.

Q, Why is it needed?
A, Kswapd may take a nap as long as 30 minutes.

Add periodic flag to shrink control to let cache owners know this is the
periodic shrinker that equals to the regular one running at the lowest
recalim priority, and feel free to take no action without one-off objects
piling up.

Only for thoughts now.

Hillf

[1] https://lore.kernel.org/linux-fsdevel/20220209231406.187668-1-stephen.s.brennan@oracle.com/

--- x/include/linux/shrinker.h
+++ y/include/linux/shrinker.h
@@ -14,6 +14,7 @@ struct shrink_control {
 
 	/* current node being shrunk (for NUMA aware shrinkers) */
 	int nid;
+	int periodic;
 
 	/*
 	 * How many objects scan_objects should scan and try to reclaim.
--- x/mm/vmscan.c
+++ y/mm/vmscan.c
@@ -781,6 +781,8 @@ static unsigned long do_shrink_slab(stru
 		scanned += shrinkctl->nr_scanned;
 
 		cond_resched();
+		if (shrinkctl->periodic)
+			break;
 	}
 
 	/*
@@ -906,7 +908,8 @@ static unsigned long shrink_slab_memcg(g
  */
 static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 				 struct mem_cgroup *memcg,
-				 int priority)
+				 int priority,
+				 int periodic)
 {
 	unsigned long ret, freed = 0;
 	struct shrinker *shrinker;
@@ -929,6 +932,7 @@ static unsigned long shrink_slab(gfp_t g
 			.gfp_mask = gfp_mask,
 			.nid = nid,
 			.memcg = memcg,
+			.periodic = periodic,
 		};
 
 		ret = do_shrink_slab(&sc, shrinker, priority);
@@ -952,7 +956,7 @@ out:
 	return freed;
 }
 
-static void drop_slab_node(int nid)
+static void drop_slab_node(int nid, int periodic)
 {
 	unsigned long freed;
 	int shift = 0;
@@ -966,19 +970,31 @@ static void drop_slab_node(int nid)
 		freed = 0;
 		memcg = mem_cgroup_iter(NULL, NULL, NULL);
 		do {
-			freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
+			freed += shrink_slab(GFP_KERNEL, nid, memcg, 0, periodic);
 		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
 	} while ((freed >> shift++) > 1);
 }
 
-void drop_slab(void)
+static void __drop_slab(int periodic)
 {
 	int nid;
 
 	for_each_online_node(nid)
-		drop_slab_node(nid);
+		drop_slab_node(nid, periodic);
+}
+
+void drop_slab(void)
+{
+	__drop_slab(0);
 }
 
+static void periodic_slab_shrinker_workfn(struct work_struct *work)
+{
+	__drop_slab(1);
+	queue_delayed_work(system_unbound_wq, to_delayed_work(work), 30*HZ);
+}
+static DECLARE_DELAYED_WORK(periodic_slab_shrinker, periodic_slab_shrinker_workfn);
+
 static inline int is_page_cache_freeable(struct folio *folio)
 {
 	/*
@@ -3098,7 +3114,7 @@ static void shrink_node_memcgs(pg_data_t
 		shrink_lruvec(lruvec, sc);
 
 		shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
-			    sc->priority);
+			    sc->priority, 0);
 
 		/* Record the group's reclaim efficiency */
 		vmpressure(sc->gfp_mask, memcg, false,
@@ -4354,8 +4370,11 @@ static void kswapd_try_to_sleep(pg_data_
 		 */
 		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
 
-		if (!kthread_should_stop())
+		if (!kthread_should_stop()) {
+			queue_delayed_work(system_unbound_wq,
+						&periodic_slab_shrinker, 60*HZ);
 			schedule();
+		}
 
 		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
 	} else {
--


^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2022-04-21 23:55 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-02  7:21 [RFC] mm/vmscan: add periodic slab shrinker Hillf Danton
2022-04-02 17:54 ` Roman Gushchin
2022-04-03  0:56   ` Hillf Danton
2022-04-04  1:09     ` Dave Chinner
2022-04-04  5:14       ` Hillf Danton
2022-04-04 18:32         ` Roman Gushchin
2022-04-04 19:08       ` Roman Gushchin
2022-04-05  5:17         ` Dave Chinner
2022-04-05 16:35           ` Roman Gushchin
2022-04-05 20:58             ` Yang Shi
2022-04-05 21:21               ` Matthew Wilcox
2022-04-06  0:01                 ` Dave Chinner
2022-04-06  4:14                   ` Hillf Danton
2022-04-21 19:03                   ` Kent Overstreet
2022-04-21 23:55                     ` Dave Chinner
2022-04-05 21:31               ` Roman Gushchin
2022-04-06  0:11                 ` Dave Chinner
2022-04-05 17:22       ` Stephen Brennan
2022-04-05 21:18         ` Matthew Wilcox
2022-04-05 23:54           ` Dave Chinner
2022-04-06  1:06             ` Stephen Brennan
2022-04-06  3:52               ` Dave Chinner

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.