All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vlastimil Babka <vbabka@suse.cz>
To: vbabka@suse.cz
Cc: akpm@linux-foundation.org, bigeasy@linutronix.de, cl@linux.com,
	guro@fb.com, hannes@cmpxchg.org, iamjoonsoo.kim@lge.com,
	jannh@google.com, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, mhocko@kernel.org, minchan@kernel.org,
	penberg@kernel.org, rientjes@google.com, shakeelb@google.com,
	surenb@google.com, tglx@linutronix.de
Subject: [RFC 1/2] mm, vmscan: add priority field to struct shrink_control
Date: Thu, 21 Jan 2021 18:21:53 +0100	[thread overview]
Message-ID: <20210121172154.27580-1-vbabka@suse.cz> (raw)
In-Reply-To: <aa02cf86-3a83-2e55-3bb6-3ec1c0f71b11@suse.cz>

Slab reclaim works with reclaim priority, which influences how much to reclaim,
but is not directly passed to individual shrinkers. The next patch introduces a
slab shrinker that uses the priority, so add it to shrink_control and
initialize appropriately. We can then also remove the parameter from
shrink_slab() and trace_mm_shrink_slab_start().

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 include/linux/shrinker.h      |  3 +++
 include/trace/events/vmscan.h |  8 +++-----
 mm/vmscan.c                   | 14 ++++++++------
 3 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 0f80123650e2..1066f052be4f 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -29,6 +29,9 @@ struct shrink_control {
 	 */
 	unsigned long nr_scanned;
 
+	/* current reclaim priority */
+	int priority;
+
 	/* current memcg being shrunk (for memcg aware shrinkers) */
 	struct mem_cgroup *memcg;
 };
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 2070df64958e..d42e480977c6 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -185,11 +185,9 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
 TRACE_EVENT(mm_shrink_slab_start,
 	TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
 		long nr_objects_to_shrink, unsigned long cache_items,
-		unsigned long long delta, unsigned long total_scan,
-		int priority),
+		unsigned long long delta, unsigned long total_scan),
 
-	TP_ARGS(shr, sc, nr_objects_to_shrink, cache_items, delta, total_scan,
-		priority),
+	TP_ARGS(shr, sc, nr_objects_to_shrink, cache_items, delta, total_scan),
 
 	TP_STRUCT__entry(
 		__field(struct shrinker *, shr)
@@ -212,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start,
 		__entry->cache_items = cache_items;
 		__entry->delta = delta;
 		__entry->total_scan = total_scan;
-		__entry->priority = priority;
+		__entry->priority = sc->priority;
 	),
 
 	TP_printk("%pS %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d",
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 469016222cdb..bc5157625cec 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(unregister_shrinker);
 #define SHRINK_BATCH 128
 
 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
-				    struct shrinker *shrinker, int priority)
+				    struct shrinker *shrinker)
 {
 	unsigned long freed = 0;
 	unsigned long long delta;
@@ -439,7 +439,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 
 	total_scan = nr;
 	if (shrinker->seeks) {
-		delta = freeable >> priority;
+		delta = freeable >> shrinkctl->priority;
 		delta *= 4;
 		do_div(delta, shrinker->seeks);
 	} else {
@@ -484,7 +484,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 		total_scan = freeable * 2;
 
 	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
-				   freeable, delta, total_scan, priority);
+				   freeable, delta, total_scan);
 
 	/*
 	 * Normally, we should not scan less than batch_size objects in one
@@ -562,6 +562,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 		struct shrink_control sc = {
 			.gfp_mask = gfp_mask,
 			.nid = nid,
+			.priority = priority,
 			.memcg = memcg,
 		};
 		struct shrinker *shrinker;
@@ -578,7 +579,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 		    !(shrinker->flags & SHRINKER_NONSLAB))
 			continue;
 
-		ret = do_shrink_slab(&sc, shrinker, priority);
+		ret = do_shrink_slab(&sc, shrinker);
 		if (ret == SHRINK_EMPTY) {
 			clear_bit(i, map->map);
 			/*
@@ -597,7 +598,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 			 *   set_bit()          do_shrink_slab()
 			 */
 			smp_mb__after_atomic();
-			ret = do_shrink_slab(&sc, shrinker, priority);
+			ret = do_shrink_slab(&sc, shrinker);
 			if (ret == SHRINK_EMPTY)
 				ret = 0;
 			else
@@ -666,10 +667,11 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 		struct shrink_control sc = {
 			.gfp_mask = gfp_mask,
 			.nid = nid,
+			.priority = priority,
 			.memcg = memcg,
 		};
 
-		ret = do_shrink_slab(&sc, shrinker, priority);
+		ret = do_shrink_slab(&sc, shrinker);
 		if (ret == SHRINK_EMPTY)
 			ret = 0;
 		freed += ret;
-- 
2.30.0


  reply	other threads:[~2021-01-21 17:23 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-11 23:12 SLUB: percpu partial object count is highly inaccurate, causing some memory wastage and maybe also worse tail latencies? Jann Horn
2021-01-11 23:12 ` Jann Horn
2021-01-12  0:27 ` Roman Gushchin
2021-01-12 16:35 ` Christoph Lameter
2021-01-12 16:35   ` Christoph Lameter
2021-01-14  9:27   ` Vlastimil Babka
2021-01-18 11:03     ` Michal Hocko
2021-01-18 15:46       ` Christoph Lameter
2021-01-18 15:46         ` Christoph Lameter
2021-01-18 16:07         ` Michal Hocko
2021-01-13 19:14 ` Vlastimil Babka
2021-01-13 22:37   ` Jann Horn
2021-01-13 22:37     ` Jann Horn
2021-01-14  9:04     ` Christoph Lameter
2021-01-14  9:04       ` Christoph Lameter
2021-01-21 17:21 ` Vlastimil Babka
2021-01-21 17:21   ` Vlastimil Babka [this message]
2021-01-21 17:21     ` [RFC 2/2] mm, slub: add shrinker to reclaim cached slabs Vlastimil Babka
2021-01-22  0:48       ` Roman Gushchin
2021-01-26 12:06         ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210121172154.27580-1-vbabka@suse.cz \
    --to=vbabka@suse.cz \
    --cc=akpm@linux-foundation.org \
    --cc=bigeasy@linutronix.de \
    --cc=cl@linux.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=jannh@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=minchan@kernel.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=shakeelb@google.com \
    --cc=surenb@google.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.