From: Yang Shi <shy828301@gmail.com>
To: guro@fb.com, ktkhai@virtuozzo.com, shakeelb@google.com,
david@fromorbit.com, hannes@cmpxchg.org, mhocko@suse.com,
akpm@linux-foundation.org
Cc: shy828301@gmail.com, linux-mm@kvack.org,
linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [v3 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker
Date: Tue, 5 Jan 2021 14:58:14 -0800 [thread overview]
Message-ID: <20210105225817.1036378-9-shy828301@gmail.com> (raw)
In-Reply-To: <20210105225817.1036378-1-shy828301@gmail.com>
Use per memcg's nr_deferred for memcg aware shrinkers. The shrinker's nr_deferred
will be used in the following cases:
1. Non memcg aware shrinkers
2. !CONFIG_MEMCG
3. memcg is disabled by boot parameter
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
mm/vmscan.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 69 insertions(+), 12 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 72259253e414..f20ed8e928c2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -372,6 +372,27 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
up_write(&shrinker_rwsem);
}
+static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+ struct memcg_shrinker_info *info;
+
+ info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+ true);
+ return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
+}
+
+static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+ struct memcg_shrinker_info *info;
+
+ info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+ true);
+
+ return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
+}
+
static bool cgroup_reclaim(struct scan_control *sc)
{
return sc->target_mem_cgroup;
@@ -410,6 +431,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
{
}
+static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
+static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
static bool cgroup_reclaim(struct scan_control *sc)
{
return false;
@@ -421,6 +454,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
}
#endif
+static long count_nr_deferred(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ int nid = sc->nid;
+
+ if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+ nid = 0;
+
+ if (sc->memcg &&
+ (shrinker->flags & SHRINKER_MEMCG_AWARE))
+ return count_nr_deferred_memcg(nid, shrinker,
+ sc->memcg);
+
+ return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+}
+
+
+static long set_nr_deferred(long nr, struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ int nid = sc->nid;
+
+ if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+ nid = 0;
+
+ if (sc->memcg &&
+ (shrinker->flags & SHRINKER_MEMCG_AWARE))
+ return set_nr_deferred_memcg(nr, nid, shrinker,
+ sc->memcg);
+
+ return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
+}
+
/*
* This misses isolated pages which are not accounted for to save counters.
* As the data only determines if reclaim or compaction continues, it is
@@ -558,14 +624,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
long freeable;
long nr;
long new_nr;
- int nid = shrinkctl->nid;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
long scanned = 0, next_deferred;
- if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
- nid = 0;
-
freeable = shrinker->count_objects(shrinker, shrinkctl);
if (freeable == 0 || freeable == SHRINK_EMPTY)
return freeable;
@@ -575,7 +637,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
- nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+ nr = count_nr_deferred(shrinker, shrinkctl);
total_scan = nr;
if (shrinker->seeks) {
@@ -666,14 +728,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
next_deferred = 0;
/*
* move the unused scan count back into the shrinker in a
- * manner that handles concurrent updates. If we exhausted the
- * scan, there is no need to do an update.
+ * manner that handles concurrent updates.
*/
- if (next_deferred > 0)
- new_nr = atomic_long_add_return(next_deferred,
- &shrinker->nr_deferred[nid]);
- else
- new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+ new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
return freed;
--
2.26.2
next prev parent reply other threads:[~2021-01-05 23:01 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-05 22:58 [RFC v3 PATCH 0/11] Make shrinker's nr_deferred memcg aware Yang Shi
2021-01-05 22:58 ` [v3 PATCH 01/11] mm: vmscan: use nid from shrink_control for tracepoint Yang Shi
2021-01-05 22:58 ` [v3 PATCH 02/11] mm: vmscan: consolidate shrinker_maps handling code Yang Shi
2021-01-07 0:13 ` Roman Gushchin
2021-01-07 17:29 ` Yang Shi
2021-01-11 19:00 ` Yang Shi
2021-01-11 19:37 ` Roman Gushchin
2021-01-11 19:43 ` Yang Shi
2021-01-05 22:58 ` [v3 PATCH 03/11] mm: vmscan: use shrinker_rwsem to protect shrinker_maps allocation Yang Shi
2021-01-06 9:54 ` Kirill Tkhai
2021-01-11 17:08 ` Yang Shi
2021-01-11 17:33 ` Kirill Tkhai
2021-01-11 18:57 ` Yang Shi
2021-01-11 21:33 ` Kirill Tkhai
2021-01-12 21:23 ` Yang Shi
2021-01-13 18:16 ` Yang Shi
2021-01-05 22:58 ` [v3 PATCH 04/11] mm: vmscan: remove memcg_shrinker_map_size Yang Shi
2021-01-06 10:15 ` Kirill Tkhai
2021-01-11 17:44 ` Yang Shi
2021-01-13 23:48 ` Yang Shi
2021-01-05 22:58 ` [v3 PATCH 05/11] mm: vmscan: use a new flag to indicate shrinker is registered Yang Shi
2021-01-06 10:21 ` Kirill Tkhai
2021-01-11 18:17 ` Yang Shi
2021-01-11 21:37 ` Kirill Tkhai
2021-01-12 20:58 ` Yang Shi
2021-01-05 22:58 ` [v3 PATCH 06/11] mm: memcontrol: rename shrinker_map to shrinker_info Yang Shi
2021-01-06 11:38 ` Kirill Tkhai
2021-01-11 18:19 ` Yang Shi
2021-01-05 22:58 ` [v3 PATCH 07/11] mm: vmscan: add per memcg shrinker nr_deferred Yang Shi
2021-01-06 11:06 ` Kirill Tkhai
2021-01-11 18:24 ` Yang Shi
2021-01-13 23:30 ` Yang Shi
2021-01-05 22:58 ` Yang Shi [this message]
2021-01-07 0:17 ` [v3 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker Roman Gushchin
2021-01-07 17:34 ` Yang Shi
2021-01-05 22:58 ` [v3 PATCH 09/11] mm: vmscan: don't need allocate shrinker->nr_deferred for memcg aware shrinkers Yang Shi
2021-01-06 11:15 ` Kirill Tkhai
2021-01-11 18:40 ` Yang Shi
2021-01-11 21:57 ` Kirill Tkhai
2021-01-05 22:58 ` [v3 PATCH 10/11] mm: memcontrol: reparent nr_deferred when memcg offline Yang Shi
2021-01-06 11:34 ` Kirill Tkhai
2021-01-11 18:43 ` Yang Shi
2021-01-05 22:58 ` [v3 PATCH 11/11] mm: vmscan: shrink deferred objects proportional to priority Yang Shi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210105225817.1036378-9-shy828301@gmail.com \
--to=shy828301@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=david@fromorbit.com \
--cc=guro@fb.com \
--cc=hannes@cmpxchg.org \
--cc=ktkhai@virtuozzo.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=shakeelb@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).