linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/4] vmscan: push reclaim_state down to shrink_node()
@ 2017-07-04 12:33 josef
  2017-07-04 12:33 ` [PATCH 2/4][v2] vmscan: bailout of slab reclaim once we reach our target josef
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: josef @ 2017-07-04 12:33 UTC (permalink / raw)
  To: minchan, akpm, kernel-team, linux-mm, hannes, riel; +Cc: Josef Bacik

From: Josef Bacik <jbacik@fb.com>

We care about this for slab reclaim, and only some of the paths set this
and way higher up than we care about.  Fix this by pushing it into
shrink_node() so we always have the slab reclaim information, regardless
of how we are doing the reclaim.

Signed-off-by: Josef Bacik <jbacik@fb.com>
---
 mm/page_alloc.c |  4 ----
 mm/vmscan.c     | 26 +++++++-------------------
 2 files changed, 7 insertions(+), 23 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b896897..2d5b79c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3484,7 +3484,6 @@ static int
 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
 					const struct alloc_context *ac)
 {
-	struct reclaim_state reclaim_state;
 	int progress;
 	unsigned int noreclaim_flag;
 
@@ -3494,13 +3493,10 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
 	cpuset_memory_pressure_bump();
 	noreclaim_flag = memalloc_noreclaim_save();
 	lockdep_set_current_reclaim_state(gfp_mask);
-	reclaim_state.reclaimed_slab = 0;
-	current->reclaim_state = &reclaim_state;
 
 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
 								ac->nodemask);
 
-	current->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
 	memalloc_noreclaim_restore(noreclaim_flag);
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f84cdd3..cf23de9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2560,10 +2560,13 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
 
 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 {
-	struct reclaim_state *reclaim_state = current->reclaim_state;
+	struct reclaim_state reclaim_state = {
+		.reclaimed_slab = 0,
+	};
 	unsigned long nr_reclaimed, nr_scanned;
 	bool reclaimable = false;
 
+	current->reclaim_state = &reclaim_state;
 	do {
 		struct mem_cgroup *root = sc->target_mem_cgroup;
 		struct mem_cgroup_reclaim_cookie reclaim = {
@@ -2644,10 +2647,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 			   sc->nr_scanned - nr_scanned,
 			   sc->nr_reclaimed - nr_reclaimed);
 
-		if (reclaim_state) {
-			sc->nr_reclaimed += reclaim_state->reclaimed_slab;
-			reclaim_state->reclaimed_slab = 0;
-		}
+		sc->nr_reclaimed += reclaim_state.reclaimed_slab;
+		reclaim_state.reclaimed_slab = 0;
 
 		if (sc->nr_reclaimed - nr_reclaimed)
 			reclaimable = true;
@@ -2664,6 +2665,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 	if (reclaimable)
 		pgdat->kswapd_failures = 0;
 
+	current->reclaim_state = NULL;
 	return reclaimable;
 }
 
@@ -3527,16 +3529,12 @@ static int kswapd(void *p)
 	pg_data_t *pgdat = (pg_data_t*)p;
 	struct task_struct *tsk = current;
 
-	struct reclaim_state reclaim_state = {
-		.reclaimed_slab = 0,
-	};
 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
 	lockdep_set_current_reclaim_state(GFP_KERNEL);
 
 	if (!cpumask_empty(cpumask))
 		set_cpus_allowed_ptr(tsk, cpumask);
-	current->reclaim_state = &reclaim_state;
 
 	/*
 	 * Tell the memory management that we're a "memory allocator",
@@ -3598,7 +3596,6 @@ static int kswapd(void *p)
 	}
 
 	tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
-	current->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
 
 	return 0;
@@ -3645,7 +3642,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
  */
 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
 {
-	struct reclaim_state reclaim_state;
 	struct scan_control sc = {
 		.nr_to_reclaim = nr_to_reclaim,
 		.gfp_mask = GFP_HIGHUSER_MOVABLE,
@@ -3657,18 +3653,14 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
 		.hibernation_mode = 1,
 	};
 	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
-	struct task_struct *p = current;
 	unsigned long nr_reclaimed;
 	unsigned int noreclaim_flag;
 
 	noreclaim_flag = memalloc_noreclaim_save();
 	lockdep_set_current_reclaim_state(sc.gfp_mask);
-	reclaim_state.reclaimed_slab = 0;
-	p->reclaim_state = &reclaim_state;
 
 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
-	p->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
 	memalloc_noreclaim_restore(noreclaim_flag);
 
@@ -3833,7 +3825,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
 	/* Minimum pages needed in order to stay on node */
 	const unsigned long nr_pages = 1 << order;
 	struct task_struct *p = current;
-	struct reclaim_state reclaim_state;
 	unsigned int noreclaim_flag;
 	struct scan_control sc = {
 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
@@ -3855,8 +3846,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
 	noreclaim_flag = memalloc_noreclaim_save();
 	p->flags |= PF_SWAPWRITE;
 	lockdep_set_current_reclaim_state(sc.gfp_mask);
-	reclaim_state.reclaimed_slab = 0;
-	p->reclaim_state = &reclaim_state;
 
 	if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
 		/*
@@ -3868,7 +3857,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
 		} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
 	}
 
-	p->reclaim_state = NULL;
 	current->flags &= ~PF_SWAPWRITE;
 	memalloc_noreclaim_restore(noreclaim_flag);
 	lockdep_clear_current_reclaim_state();
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/4][v2] vmscan: bailout of slab reclaim once we reach our target
  2017-07-04 12:33 [PATCH 1/4] vmscan: push reclaim_state down to shrink_node() josef
@ 2017-07-04 12:33 ` josef
  2017-07-05  4:27   ` Minchan Kim
  2017-07-04 12:33 ` [PATCH 3/4] mm: use slab size in the slab shrinking ratio calculation josef
  2017-07-04 12:33 ` [PATCH 4/4] mm: make kswapd try harder to keep active pages in cache josef
  2 siblings, 1 reply; 6+ messages in thread
From: josef @ 2017-07-04 12:33 UTC (permalink / raw)
  To: minchan, akpm, kernel-team, linux-mm, hannes, riel; +Cc: Josef Bacik

From: Josef Bacik <jbacik@fb.com>

Following patches will greatly increase our aggressiveness in slab
reclaim, so we need checks in place to make sure we stop trying to
reclaim slab once we've hit our reclaim target.

Signed-off-by: Josef Bacik <jbacik@fb.com>
---
v1->v2:
- Don't bail out in shrink_slab() so that we always scan at least batch_size
  objects of every slab regardless of wether we've hit our target or not.

 mm/vmscan.c | 33 ++++++++++++++++++++++-----------
 1 file changed, 22 insertions(+), 11 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index cf23de9..78860a6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -305,11 +305,13 @@ EXPORT_SYMBOL(unregister_shrinker);
 
 #define SHRINK_BATCH 128
 
-static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
+static unsigned long do_shrink_slab(struct scan_control *sc,
+				    struct shrink_control *shrinkctl,
 				    struct shrinker *shrinker,
 				    unsigned long nr_scanned,
 				    unsigned long nr_eligible)
 {
+	struct reclaim_state *reclaim_state = current->reclaim_state;
 	unsigned long freed = 0;
 	unsigned long long delta;
 	long total_scan;
@@ -394,14 +396,18 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 
 		shrinkctl->nr_to_scan = nr_to_scan;
 		ret = shrinker->scan_objects(shrinker, shrinkctl);
+		if (reclaim_state) {
+			sc->nr_reclaimed += reclaim_state->reclaimed_slab;
+			reclaim_state->reclaimed_slab = 0;
+		}
 		if (ret == SHRINK_STOP)
 			break;
 		freed += ret;
-
 		count_vm_events(SLABS_SCANNED, nr_to_scan);
 		total_scan -= nr_to_scan;
 		scanned += nr_to_scan;
-
+		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
+			break;
 		cond_resched();
 	}
 
@@ -452,7 +458,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
  *
  * Returns the number of reclaimed slab objects.
  */
-static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
+static unsigned long shrink_slab(struct scan_control *sc, int nid,
 				 struct mem_cgroup *memcg,
 				 unsigned long nr_scanned,
 				 unsigned long nr_eligible)
@@ -478,8 +484,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 	}
 
 	list_for_each_entry(shrinker, &shrinker_list, list) {
-		struct shrink_control sc = {
-			.gfp_mask = gfp_mask,
+		struct shrink_control shrinkctl = {
+			.gfp_mask = sc->gfp_mask,
 			.nid = nid,
 			.memcg = memcg,
 		};
@@ -494,9 +500,10 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 			continue;
 
 		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
-			sc.nid = 0;
+			shrinkctl.nid = 0;
 
-		freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
+		freed += do_shrink_slab(sc, &shrinkctl, shrinker, nr_scanned,
+					nr_eligible);
 	}
 
 	up_read(&shrinker_rwsem);
@@ -510,11 +517,15 @@ void drop_slab_node(int nid)
 	unsigned long freed;
 
 	do {
+		struct scan_control sc = {
+			.nr_to_reclaim = -1UL,
+			.gfp_mask = GFP_KERNEL,
+		};
 		struct mem_cgroup *memcg = NULL;
 
 		freed = 0;
 		do {
-			freed += shrink_slab(GFP_KERNEL, nid, memcg,
+			freed += shrink_slab(&sc, nid, memcg,
 					     1000, 1000);
 		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
 	} while (freed > 10);
@@ -2600,7 +2611,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 			node_lru_pages += lru_pages;
 
 			if (memcg)
-				shrink_slab(sc->gfp_mask, pgdat->node_id,
+				shrink_slab(sc, pgdat->node_id,
 					    memcg, sc->nr_scanned - scanned,
 					    lru_pages);
 
@@ -2631,7 +2642,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 		 * the eligible LRU pages were scanned.
 		 */
 		if (global_reclaim(sc))
-			shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
+			shrink_slab(sc, pgdat->node_id, NULL,
 				    sc->nr_scanned - nr_scanned,
 				    node_lru_pages);
 
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/4] mm: use slab size in the slab shrinking ratio calculation
  2017-07-04 12:33 [PATCH 1/4] vmscan: push reclaim_state down to shrink_node() josef
  2017-07-04 12:33 ` [PATCH 2/4][v2] vmscan: bailout of slab reclaim once we reach our target josef
@ 2017-07-04 12:33 ` josef
  2017-07-04 12:33 ` [PATCH 4/4] mm: make kswapd try harder to keep active pages in cache josef
  2 siblings, 0 replies; 6+ messages in thread
From: josef @ 2017-07-04 12:33 UTC (permalink / raw)
  To: minchan, akpm, kernel-team, linux-mm, hannes, riel; +Cc: Josef Bacik

From: Josef Bacik <jbacik@fb.com>

When testing a slab heavy workload I noticed that we often would barely
reclaim anything at all from slab when kswapd started doing reclaim.
This is because we use the ratio of nr_scanned / nr_lru to determine how
much of slab we should reclaim.  But in a slab only/mostly workload we
will not have much page cache to reclaim, and thus our ratio will be
really low and not at all related to where the memory on the system is.
Instead we want to use a ratio of the reclaimable slab to the actual
reclaimable space on the system.  That way if we are slab heavy we work
harder to reclaim slab.

Signed-off-by: Josef Bacik <jbacik@fb.com>
---
 mm/vmscan.c | 76 ++++++++++++++++++++++++++++++++++++-------------------------
 1 file changed, 45 insertions(+), 31 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 78860a6..2f05eb7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -308,8 +308,8 @@ EXPORT_SYMBOL(unregister_shrinker);
 static unsigned long do_shrink_slab(struct scan_control *sc,
 				    struct shrink_control *shrinkctl,
 				    struct shrinker *shrinker,
-				    unsigned long nr_scanned,
-				    unsigned long nr_eligible)
+				    unsigned long numerator,
+				    unsigned long denominator)
 {
 	struct reclaim_state *reclaim_state = current->reclaim_state;
 	unsigned long freed = 0;
@@ -335,9 +335,9 @@ static unsigned long do_shrink_slab(struct scan_control *sc,
 	nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
 
 	total_scan = nr;
-	delta = (4 * nr_scanned) / shrinker->seeks;
+	delta = (4 * numerator) / shrinker->seeks;
 	delta *= freeable;
-	do_div(delta, nr_eligible + 1);
+	do_div(delta, denominator + 1);
 	total_scan += delta;
 	if (total_scan < 0) {
 		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
@@ -371,7 +371,7 @@ static unsigned long do_shrink_slab(struct scan_control *sc,
 		total_scan = freeable * 2;
 
 	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
-				   nr_scanned, nr_eligible,
+				   numerator, denominator,
 				   freeable, delta, total_scan);
 
 	/*
@@ -435,8 +435,8 @@ static unsigned long do_shrink_slab(struct scan_control *sc,
  * @gfp_mask: allocation context
  * @nid: node whose slab caches to target
  * @memcg: memory cgroup whose slab caches to target
- * @nr_scanned: pressure numerator
- * @nr_eligible: pressure denominator
+ * @numerator: pressure numerator
+ * @denominator: pressure denominator
  *
  * Call the shrink functions to age shrinkable caches.
  *
@@ -448,20 +448,16 @@ static unsigned long do_shrink_slab(struct scan_control *sc,
  * objects from the memory cgroup specified. Otherwise, only unaware
  * shrinkers are called.
  *
- * @nr_scanned and @nr_eligible form a ratio that indicate how much of
- * the available objects should be scanned.  Page reclaim for example
- * passes the number of pages scanned and the number of pages on the
- * LRU lists that it considered on @nid, plus a bias in @nr_scanned
- * when it encountered mapped pages.  The ratio is further biased by
- * the ->seeks setting of the shrink function, which indicates the
- * cost to recreate an object relative to that of an LRU page.
+ * @numerator and @denominator form a ratio that indicate how much of
+ * the available objects should be scanned.  Global reclaim for example will do
+ * the ratio of reclaimable slab to the lru sizes.
  *
  * Returns the number of reclaimed slab objects.
  */
 static unsigned long shrink_slab(struct scan_control *sc, int nid,
 				 struct mem_cgroup *memcg,
-				 unsigned long nr_scanned,
-				 unsigned long nr_eligible)
+				 unsigned long numerator,
+				 unsigned long denominator)
 {
 	struct shrinker *shrinker;
 	unsigned long freed = 0;
@@ -469,9 +465,6 @@ static unsigned long shrink_slab(struct scan_control *sc, int nid,
 	if (memcg && (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)))
 		return 0;
 
-	if (nr_scanned == 0)
-		nr_scanned = SWAP_CLUSTER_MAX;
-
 	if (!down_read_trylock(&shrinker_rwsem)) {
 		/*
 		 * If we would return 0, our callers would understand that we
@@ -502,8 +495,8 @@ static unsigned long shrink_slab(struct scan_control *sc, int nid,
 		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
 			shrinkctl.nid = 0;
 
-		freed += do_shrink_slab(sc, &shrinkctl, shrinker, nr_scanned,
-					nr_eligible);
+		freed += do_shrink_slab(sc, &shrinkctl, shrinker, numerator,
+					denominator);
 	}
 
 	up_read(&shrinker_rwsem);
@@ -2569,15 +2562,37 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
 	return true;
 }
 
+static unsigned long lruvec_reclaimable_pages(struct lruvec *lruvec)
+{
+	unsigned long nr;
+
+	nr = lruvec_page_state(lruvec, NR_ACTIVE_FILE) +
+	     lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
+	     lruvec_page_state(lruvec, NR_ISOLATED_FILE);
+
+	if (get_nr_swap_pages() > 0)
+		nr += lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
+		      lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
+		      lruvec_page_state(lruvec, NR_ISOLATED_ANON);
+
+	return nr;
+}
+
 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 {
 	struct reclaim_state reclaim_state = {
 		.reclaimed_slab = 0,
 	};
 	unsigned long nr_reclaimed, nr_scanned;
+	unsigned long greclaim = 1, gslab = 1;
 	bool reclaimable = false;
 
 	current->reclaim_state = &reclaim_state;
+	if (global_reclaim(sc)) {
+		gslab = node_page_state(pgdat, NR_SLAB_RECLAIMABLE);
+		greclaim = pgdat_reclaimable_pages(pgdat);
+	}
+
 	do {
 		struct mem_cgroup *root = sc->target_mem_cgroup;
 		struct mem_cgroup_reclaim_cookie reclaim = {
@@ -2592,6 +2607,9 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 
 		memcg = mem_cgroup_iter(root, NULL, &reclaim);
 		do {
+			struct lruvec *lruvec = mem_cgroup_lruvec(pgdat,
+								  memcg);
+			unsigned long nr_slab, nr_reclaim;
 			unsigned long lru_pages;
 			unsigned long reclaimed;
 			unsigned long scanned;
@@ -2606,14 +2624,16 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 
 			reclaimed = sc->nr_reclaimed;
 			scanned = sc->nr_scanned;
+			nr_slab = lruvec_page_state(lruvec,
+						    NR_SLAB_RECLAIMABLE);
+			nr_reclaim = lruvec_reclaimable_pages(lruvec);
 
 			shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
 			node_lru_pages += lru_pages;
 
 			if (memcg)
-				shrink_slab(sc, pgdat->node_id,
-					    memcg, sc->nr_scanned - scanned,
-					    lru_pages);
+				shrink_slab(sc, pgdat->node_id, memcg, nr_slab,
+					    nr_reclaim);
 
 			/* Record the group's reclaim efficiency */
 			vmpressure(sc->gfp_mask, memcg, false,
@@ -2637,14 +2657,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 			}
 		} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
 
-		/*
-		 * Shrink the slab caches in the same proportion that
-		 * the eligible LRU pages were scanned.
-		 */
 		if (global_reclaim(sc))
-			shrink_slab(sc, pgdat->node_id, NULL,
-				    sc->nr_scanned - nr_scanned,
-				    node_lru_pages);
+			shrink_slab(sc, pgdat->node_id, NULL, gslab, greclaim);
 
 		/*
 		 * Record the subtree's reclaim efficiency. The reclaimed
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/4] mm: make kswapd try harder to keep active pages in cache
  2017-07-04 12:33 [PATCH 1/4] vmscan: push reclaim_state down to shrink_node() josef
  2017-07-04 12:33 ` [PATCH 2/4][v2] vmscan: bailout of slab reclaim once we reach our target josef
  2017-07-04 12:33 ` [PATCH 3/4] mm: use slab size in the slab shrinking ratio calculation josef
@ 2017-07-04 12:33 ` josef
  2 siblings, 0 replies; 6+ messages in thread
From: josef @ 2017-07-04 12:33 UTC (permalink / raw)
  To: minchan, akpm, kernel-team, linux-mm, hannes, riel; +Cc: Josef Bacik

From: Josef Bacik <jbacik@fb.com>

While testing slab reclaim I noticed that if we were running a workload
that used most of the system memory for it's working set and we start
putting a lot of reclaimable slab pressure on the system (think find /,
or some other silliness), we will happily evict the active pages over
the slab cache.  This is kind of backwards as we want to do all that we
can to keep the active working set in memory, and instead evict these
short lived objects.  The same thing occurs when say you do a yum
update of a few packages while your working set takes up most of RAM,
you end up with inactive lists being relatively small and so we reclaim
active pages even though we could reclaim these short lived inactive
pages.

My approach here is twofold.  First, keep track of the difference in
inactive and slab pages since the last time kswapd ran.  In the first
run this will just be the overall counts of inactive and slab, but for
each subsequent run we'll have a good idea of where the memory pressure
is coming from.  Then we use this information to put pressure on either
the inactive lists or the slab caches, depending on where the pressure
is coming from.

If this optimization does not work, then we fall back to the previous
methods of reclaiming space with a slight adjustment.  Instead of using
the overall scan rate of page cache to determine the scan rate for slab,
we instead use the total usage of slab compared to the reclaimable page
cache on the box.  This will allow us to put an appropriate amount of
pressure on the slab shrinkers if we are a mostly slab workload.

I have two tests I was using to watch either side of this problem.  The
first test kept 2 files that took up 3/4 of the memory, and then started
creating a bunch of empty files.  Without this patch we would have to
re-read both files in their entirety at least 3 times during the run.
With this patch the active pages are never evicted.

The second test was a test that would read and stat all the files in a
directory, which again would take up about 3/4 of the memory with slab
cache.  Then I cat'ed a 100gib file into /dev/null and checked to see if
any of the files were evicted and verified that none of the files were
evicted.

Signed-off-by: Josef Bacik <jbacik@fb.com>
---
 mm/vmscan.c | 170 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 156 insertions(+), 14 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2f05eb7..68a3999 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -110,11 +110,20 @@ struct scan_control {
 	/* One of the zones is ready for compaction */
 	unsigned int compaction_ready:1;
 
+	/* Only reclaim inactive page cache or slab. */
+	unsigned int inactive_only:1;
+
 	/* Incremented by the number of inactive pages that were scanned */
 	unsigned long nr_scanned;
 
 	/* Number of pages freed so far during a call to shrink_zones() */
 	unsigned long nr_reclaimed;
+
+	/* Number of inactive pages added since last kswapd run. */
+	unsigned long inactive_diff;
+
+	/* Number of slab pages added since last kswapd run. */
+	unsigned long slab_diff;
 };
 
 #ifdef ARCH_HAS_PREFETCH
@@ -309,7 +318,8 @@ static unsigned long do_shrink_slab(struct scan_control *sc,
 				    struct shrink_control *shrinkctl,
 				    struct shrinker *shrinker,
 				    unsigned long numerator,
-				    unsigned long denominator)
+				    unsigned long denominator,
+				    unsigned long *slab_scanned)
 {
 	struct reclaim_state *reclaim_state = current->reclaim_state;
 	unsigned long freed = 0;
@@ -415,6 +425,9 @@ static unsigned long do_shrink_slab(struct scan_control *sc,
 		next_deferred -= scanned;
 	else
 		next_deferred = 0;
+	if (slab_scanned)
+		(*slab_scanned) += scanned;
+
 	/*
 	 * move the unused scan count back into the shrinker in a
 	 * manner that handles concurrent updates. If we exhausted the
@@ -457,7 +470,8 @@ static unsigned long do_shrink_slab(struct scan_control *sc,
 static unsigned long shrink_slab(struct scan_control *sc, int nid,
 				 struct mem_cgroup *memcg,
 				 unsigned long numerator,
-				 unsigned long denominator)
+				 unsigned long denominator,
+				 unsigned long *slab_scanned)
 {
 	struct shrinker *shrinker;
 	unsigned long freed = 0;
@@ -496,7 +510,7 @@ static unsigned long shrink_slab(struct scan_control *sc, int nid,
 			shrinkctl.nid = 0;
 
 		freed += do_shrink_slab(sc, &shrinkctl, shrinker, numerator,
-					denominator);
+					denominator, slab_scanned);
 	}
 
 	up_read(&shrinker_rwsem);
@@ -519,7 +533,7 @@ void drop_slab_node(int nid)
 		freed = 0;
 		do {
 			freed += shrink_slab(&sc, nid, memcg,
-					     1000, 1000);
+					     1000, 1000, NULL);
 		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
 	} while (freed > 10);
 }
@@ -2150,6 +2164,7 @@ enum scan_balance {
 	SCAN_FRACT,
 	SCAN_ANON,
 	SCAN_FILE,
+	SCAN_INACTIVE,
 };
 
 /*
@@ -2176,6 +2191,11 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 	unsigned long ap, fp;
 	enum lru_list lru;
 
+	if (sc->inactive_only) {
+		scan_balance = SCAN_INACTIVE;
+		goto out;
+	}
+
 	/* If we have no swap space, do not bother scanning anon pages. */
 	if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
 		scan_balance = SCAN_FILE;
@@ -2349,6 +2369,14 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 				scan = 0;
 			}
 			break;
+		case SCAN_INACTIVE:
+			if (file && !is_active_lru(lru)) {
+				scan = max(scan, sc->nr_to_reclaim);
+			} else {
+				size = 0;
+				scan = 0;
+			}
+			break;
 		default:
 			/* Look ma, no brain */
 			BUG();
@@ -2584,13 +2612,60 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 		.reclaimed_slab = 0,
 	};
 	unsigned long nr_reclaimed, nr_scanned;
-	unsigned long greclaim = 1, gslab = 1;
+	unsigned long greclaim = 1, gslab = 1, total_high_wmark = 0, nr_inactive;
 	bool reclaimable = false;
+	bool skip_slab = false;
 
 	current->reclaim_state = &reclaim_state;
 	if (global_reclaim(sc)) {
+		int z;
+		for (z = 0; z < MAX_NR_ZONES; z++) {
+			struct zone *zone = &pgdat->node_zones[z];
+			if (!managed_zone(zone))
+				continue;
+			total_high_wmark += high_wmark_pages(zone);
+		}
+		nr_inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
 		gslab = node_page_state(pgdat, NR_SLAB_RECLAIMABLE);
 		greclaim = pgdat_reclaimable_pages(pgdat);
+	} else {
+		struct lruvec *lruvec =
+			mem_cgroup_lruvec(pgdat, sc->target_mem_cgroup);
+		total_high_wmark = sc->nr_to_reclaim;
+		nr_inactive = lruvec_page_state(lruvec, NR_INACTIVE_FILE);
+		gslab = lruvec_page_state(lruvec, NR_SLAB_RECLAIMABLE);
+	}
+
+	/*
+	 * If we don't have a lot of inactive or slab pages then there's no
+	 * point in trying to free them exclusively, do the normal scan stuff.
+	 */
+	if (nr_inactive + gslab < total_high_wmark)
+		sc->inactive_only = 0;
+
+	/*
+	 * We still want to slightly prefer slab over inactive, so if the
+	 * inactive on this node is large enough and what is pushing us into
+	 * reclaim terretitory then limit our flushing to the inactive list for
+	 * the first go around.
+	 *
+	 * The idea is that with a memcg configured system we will still reclaim
+	 * memcg aware shrinkers, which includes the super block shrinkers.  So
+	 * if our steady state is keeping fs objects in cache for our workload
+	 * we'll still put a certain amount of pressure on them anyway.  To
+	 * avoid evicting things we actually care about we want to skip slab
+	 * reclaim altogether.
+	 *
+	 * However we still want to account for slab and inactive growing at the
+	 * same rate, so if that is the case just carry on shrinking inactive
+	 * and slab together.
+	 */
+	if (nr_inactive > total_high_wmark &&
+	    sc->inactive_diff > sc->slab_diff) {
+		unsigned long tmp = sc->inactive_diff >> 1;
+
+		if (tmp >= sc->slab_diff)
+			skip_slab = true;
 	}
 
 	do {
@@ -2600,6 +2675,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 			.priority = sc->priority,
 		};
 		unsigned long node_lru_pages = 0;
+		unsigned long slab_reclaimed = 0;
+		unsigned long slab_scanned = 0;
 		struct mem_cgroup *memcg;
 
 		nr_reclaimed = sc->nr_reclaimed;
@@ -2631,9 +2708,18 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 			shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
 			node_lru_pages += lru_pages;
 
-			if (memcg)
-				shrink_slab(sc, pgdat->node_id, memcg, nr_slab,
-					    nr_reclaim);
+			if (memcg && !skip_slab) {
+				unsigned long numerator = nr_slab;
+				unsigned long denominator = nr_reclaim;
+				if (sc->inactive_only) {
+					numerator = sc->slab_diff;
+					denominator = sc->inactive_diff;
+				}
+				slab_reclaimed +=
+					shrink_slab(sc, pgdat->node_id, memcg,
+						    numerator, denominator,
+						    &slab_scanned);
+			}
 
 			/* Record the group's reclaim efficiency */
 			vmpressure(sc->gfp_mask, memcg, false,
@@ -2657,8 +2743,17 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 			}
 		} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
 
-		if (global_reclaim(sc))
-			shrink_slab(sc, pgdat->node_id, NULL, gslab, greclaim);
+		if (!skip_slab && global_reclaim(sc)) {
+			unsigned long numerator = gslab;
+			unsigned long denominator = greclaim;
+			if (sc->inactive_only) {
+				numerator = sc->slab_diff;
+				denominator = sc->inactive_diff;
+			}
+			slab_reclaimed += shrink_slab(sc, pgdat->node_id, NULL,
+						      numerator, denominator,
+						      &slab_scanned);
+		}
 
 		/*
 		 * Record the subtree's reclaim efficiency. The reclaimed
@@ -2675,9 +2770,28 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 		sc->nr_reclaimed += reclaim_state.reclaimed_slab;
 		reclaim_state.reclaimed_slab = 0;
 
-		if (sc->nr_reclaimed - nr_reclaimed)
+		if (sc->nr_reclaimed - nr_reclaimed) {
 			reclaimable = true;
+		} else if (sc->inactive_only && !skip_slab) {
+			unsigned long percent = 0;
 
+			/*
+			 * We didn't reclaim anything this go around, so the
+			 * inactive list is likely spent.  If we're reclaiming
+			 * less than half of the objects in slab that we're
+			 * scanning then just stop doing the inactive only scan.
+			 * Otherwise ramp up the pressure on the slab caches
+			 * hoping that eventually we'll start freeing enough
+			 * objects to reclaim space.
+			 */
+			if (slab_scanned)
+				percent = slab_reclaimed * 100 / slab_scanned;
+			if (percent < 50)
+				sc->inactive_only = 0;
+			else
+				gslab <<= 1;
+		}
+		skip_slab = false;
 	} while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
 					 sc->nr_scanned - nr_scanned, sc));
 
@@ -3321,7 +3435,8 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
  * or lower is eligible for reclaim until at least one usable zone is
  * balanced.
  */
-static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
+static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx,
+			 unsigned long inactive_diff, unsigned long slab_diff)
 {
 	int i;
 	unsigned long nr_soft_reclaimed;
@@ -3334,6 +3449,9 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 		.may_writepage = !laptop_mode,
 		.may_unmap = 1,
 		.may_swap = 1,
+		.inactive_only = 1,
+		.inactive_diff = inactive_diff,
+		.slab_diff = slab_diff,
 	};
 	count_vm_event(PAGEOUTRUN);
 
@@ -3553,7 +3671,7 @@ static int kswapd(void *p)
 	unsigned int classzone_idx = MAX_NR_ZONES - 1;
 	pg_data_t *pgdat = (pg_data_t*)p;
 	struct task_struct *tsk = current;
-
+	unsigned long nr_slab = 0, nr_inactive = 0;
 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
 	lockdep_set_current_reclaim_state(GFP_KERNEL);
@@ -3579,6 +3697,7 @@ static int kswapd(void *p)
 	pgdat->kswapd_order = 0;
 	pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
 	for ( ; ; ) {
+		unsigned long slab_diff, inactive_diff;
 		bool ret;
 
 		alloc_order = reclaim_order = pgdat->kswapd_order;
@@ -3606,6 +3725,23 @@ static int kswapd(void *p)
 			continue;
 
 		/*
+		 * We want to know where we're adding pages so we can make
+		 * smarter decisions about where we're going to put pressure
+		 * when shrinking.
+		 */
+		slab_diff = sum_zone_node_page_state(pgdat->node_id,
+						     NR_SLAB_RECLAIMABLE);
+		inactive_diff = node_page_state(pgdat, NR_INACTIVE_FILE);
+		if (nr_slab > slab_diff)
+			slab_diff = 0;
+		else
+			slab_diff -= nr_slab;
+		if (inactive_diff < nr_inactive)
+			inactive_diff = 0;
+		else
+			inactive_diff -= nr_inactive;
+
+		/*
 		 * Reclaim begins at the requested order but if a high-order
 		 * reclaim fails then kswapd falls back to reclaiming for
 		 * order-0. If that happens, kswapd will consider sleeping
@@ -3615,7 +3751,11 @@ static int kswapd(void *p)
 		 */
 		trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
 						alloc_order);
-		reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
+		reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx,
+					      inactive_diff, slab_diff);
+		nr_inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
+		nr_slab = sum_zone_node_page_state(pgdat->node_id,
+						   NR_SLAB_RECLAIMABLE);
 		if (reclaim_order < alloc_order)
 			goto kswapd_try_sleep;
 	}
@@ -3860,6 +4000,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
 		.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
 		.may_swap = 1,
 		.reclaim_idx = gfp_zone(gfp_mask),
+		.slab_diff = 1,
+		.inactive_diff = 1,
 	};
 
 	cond_resched();
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/4][v2] vmscan: bailout of slab reclaim once we reach our target
  2017-07-04 12:33 ` [PATCH 2/4][v2] vmscan: bailout of slab reclaim once we reach our target josef
@ 2017-07-05  4:27   ` Minchan Kim
  2017-07-05 12:57     ` Josef Bacik
  0 siblings, 1 reply; 6+ messages in thread
From: Minchan Kim @ 2017-07-05  4:27 UTC (permalink / raw)
  To: josef; +Cc: akpm, kernel-team, linux-mm, hannes, riel, Josef Bacik

On Tue, Jul 04, 2017 at 08:33:38AM -0400, josef@toxicpanda.com wrote:
> From: Josef Bacik <jbacik@fb.com>
> 
> Following patches will greatly increase our aggressiveness in slab
> reclaim, so we need checks in place to make sure we stop trying to
> reclaim slab once we've hit our reclaim target.
> 
> Signed-off-by: Josef Bacik <jbacik@fb.com>
> ---
> v1->v2:
> - Don't bail out in shrink_slab() so that we always scan at least batch_size
>   objects of every slab regardless of wether we've hit our target or not.

It's no different with v1 for aging fairness POV.

Imagine you have 3 shrinkers in shrinker_list and A has a lots of objects.

        HEAD-> A -> B -> C

shrink_slab does scan/reclaims from A srhinker a lot until it meets
sc->nr_to_reclaim. Then, VM does aging B and C with batch_size which is
rather small. It breaks fairness.

In next memory pressure, it shrinks A a lot again but B and C
a little bit.

> 
>  mm/vmscan.c | 33 ++++++++++++++++++++++-----------
>  1 file changed, 22 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index cf23de9..78860a6 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -305,11 +305,13 @@ EXPORT_SYMBOL(unregister_shrinker);
>  
>  #define SHRINK_BATCH 128
>  
> -static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> +static unsigned long do_shrink_slab(struct scan_control *sc,
> +				    struct shrink_control *shrinkctl,
>  				    struct shrinker *shrinker,
>  				    unsigned long nr_scanned,
>  				    unsigned long nr_eligible)
>  {
> +	struct reclaim_state *reclaim_state = current->reclaim_state;
>  	unsigned long freed = 0;
>  	unsigned long long delta;
>  	long total_scan;
> @@ -394,14 +396,18 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>  
>  		shrinkctl->nr_to_scan = nr_to_scan;
>  		ret = shrinker->scan_objects(shrinker, shrinkctl);
> +		if (reclaim_state) {
> +			sc->nr_reclaimed += reclaim_state->reclaimed_slab;
> +			reclaim_state->reclaimed_slab = 0;
> +		}
>  		if (ret == SHRINK_STOP)
>  			break;
>  		freed += ret;
> -
>  		count_vm_events(SLABS_SCANNED, nr_to_scan);
>  		total_scan -= nr_to_scan;
>  		scanned += nr_to_scan;
> -
> +		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
> +			break;
>  		cond_resched();
>  	}
>  
> @@ -452,7 +458,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>   *
>   * Returns the number of reclaimed slab objects.
>   */
> -static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
> +static unsigned long shrink_slab(struct scan_control *sc, int nid,
>  				 struct mem_cgroup *memcg,
>  				 unsigned long nr_scanned,
>  				 unsigned long nr_eligible)
> @@ -478,8 +484,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>  	}
>  
>  	list_for_each_entry(shrinker, &shrinker_list, list) {
> -		struct shrink_control sc = {
> -			.gfp_mask = gfp_mask,
> +		struct shrink_control shrinkctl = {
> +			.gfp_mask = sc->gfp_mask,
>  			.nid = nid,
>  			.memcg = memcg,
>  		};
> @@ -494,9 +500,10 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
>  			continue;
>  
>  		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> -			sc.nid = 0;
> +			shrinkctl.nid = 0;
>  
> -		freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
> +		freed += do_shrink_slab(sc, &shrinkctl, shrinker, nr_scanned,
> +					nr_eligible);
>  	}
>  
>  	up_read(&shrinker_rwsem);
> @@ -510,11 +517,15 @@ void drop_slab_node(int nid)
>  	unsigned long freed;
>  
>  	do {
> +		struct scan_control sc = {
> +			.nr_to_reclaim = -1UL,
> +			.gfp_mask = GFP_KERNEL,
> +		};
>  		struct mem_cgroup *memcg = NULL;
>  
>  		freed = 0;
>  		do {
> -			freed += shrink_slab(GFP_KERNEL, nid, memcg,
> +			freed += shrink_slab(&sc, nid, memcg,
>  					     1000, 1000);
>  		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
>  	} while (freed > 10);
> @@ -2600,7 +2611,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
>  			node_lru_pages += lru_pages;
>  
>  			if (memcg)
> -				shrink_slab(sc->gfp_mask, pgdat->node_id,
> +				shrink_slab(sc, pgdat->node_id,
>  					    memcg, sc->nr_scanned - scanned,
>  					    lru_pages);
>  
> @@ -2631,7 +2642,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
>  		 * the eligible LRU pages were scanned.
>  		 */
>  		if (global_reclaim(sc))
> -			shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
> +			shrink_slab(sc, pgdat->node_id, NULL,
>  				    sc->nr_scanned - nr_scanned,
>  				    node_lru_pages);
>  
> -- 
> 2.7.4
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/4][v2] vmscan: bailout of slab reclaim once we reach our target
  2017-07-05  4:27   ` Minchan Kim
@ 2017-07-05 12:57     ` Josef Bacik
  0 siblings, 0 replies; 6+ messages in thread
From: Josef Bacik @ 2017-07-05 12:57 UTC (permalink / raw)
  To: Minchan Kim; +Cc: josef, akpm, kernel-team, linux-mm, hannes, riel, Josef Bacik

On Wed, Jul 05, 2017 at 01:27:04PM +0900, Minchan Kim wrote:
> On Tue, Jul 04, 2017 at 08:33:38AM -0400, josef@toxicpanda.com wrote:
> > From: Josef Bacik <jbacik@fb.com>
> > 
> > Following patches will greatly increase our aggressiveness in slab
> > reclaim, so we need checks in place to make sure we stop trying to
> > reclaim slab once we've hit our reclaim target.
> > 
> > Signed-off-by: Josef Bacik <jbacik@fb.com>
> > ---
> > v1->v2:
> > - Don't bail out in shrink_slab() so that we always scan at least batch_size
> >   objects of every slab regardless of wether we've hit our target or not.
> 
> It's no different with v1 for aging fairness POV.
> 
> Imagine you have 3 shrinkers in shrinker_list and A has a lots of objects.
> 
>         HEAD-> A -> B -> C
> 
> shrink_slab does scan/reclaims from A srhinker a lot until it meets
> sc->nr_to_reclaim. Then, VM does aging B and C with batch_size which is
> rather small. It breaks fairness.
> 
> In next memory pressure, it shrinks A a lot again but B and C
> a little bit.
> 

Oh duh yeah I see what you are saying.  I had a scheme previously to break up
the scanning targets based on overall usage but it meant looping through the
shrinkers twice, as we have to get a total count of objects first to determine
individual ratios.  I suppose since there's relatively low cost to getting
object counts per shrinker and there don't tend to be a lot of shrinkers we
could go with this to make it more fair.  I'll write this up.  Thanks,

Josef

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2017-07-05 12:57 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-04 12:33 [PATCH 1/4] vmscan: push reclaim_state down to shrink_node() josef
2017-07-04 12:33 ` [PATCH 2/4][v2] vmscan: bailout of slab reclaim once we reach our target josef
2017-07-05  4:27   ` Minchan Kim
2017-07-05 12:57     ` Josef Bacik
2017-07-04 12:33 ` [PATCH 3/4] mm: use slab size in the slab shrinking ratio calculation josef
2017-07-04 12:33 ` [PATCH 4/4] mm: make kswapd try harder to keep active pages in cache josef

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).