All of lore.kernel.org
 help / color / mirror / Atom feed
From: Roman Gushchin <guro@fb.com>
To: Yang Shi <shy828301@gmail.com>
Cc: <ktkhai@virtuozzo.com>, <shakeelb@google.com>,
	<david@fromorbit.com>, <hannes@cmpxchg.org>, <mhocko@suse.com>,
	<akpm@linux-foundation.org>, <linux-mm@kvack.org>,
	<linux-fsdevel@vger.kernel.org>, <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH 6/9] mm: vmscan: use per memcg nr_deferred of shrinker
Date: Wed, 2 Dec 2020 19:08:41 -0800	[thread overview]
Message-ID: <20201203030841.GH1375014@carbon.DHCP.thefacebook.com> (raw)
In-Reply-To: <20201202182725.265020-7-shy828301@gmail.com>

On Wed, Dec 02, 2020 at 10:27:22AM -0800, Yang Shi wrote:
> Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's nr_deferred
> will be used in the following cases:
>     1. Non memcg aware shrinkers
>     2. !CONFIG_MEMCG
>     3. memcg is disabled by boot parameter
> 
> Signed-off-by: Yang Shi <shy828301@gmail.com>
> ---
>  mm/vmscan.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 82 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index cba0bc8d4661..d569fdcaba79 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -203,6 +203,12 @@ static DECLARE_RWSEM(shrinker_rwsem);
>  static DEFINE_IDR(shrinker_idr);
>  static int shrinker_nr_max;
>  
> +static inline bool is_deferred_memcg_aware(struct shrinker *shrinker)
> +{
> +	return (shrinker->flags & SHRINKER_MEMCG_AWARE) &&
> +		!mem_cgroup_disabled();
> +}
> +
>  static int prealloc_memcg_shrinker(struct shrinker *shrinker)
>  {
>  	int id, ret = -ENOMEM;
> @@ -271,7 +277,58 @@ static bool writeback_throttling_sane(struct scan_control *sc)
>  #endif
>  	return false;
>  }
> +
> +static inline long count_nr_deferred(struct shrinker *shrinker,
> +				     struct shrink_control *sc)
> +{
> +	bool per_memcg_deferred = is_deferred_memcg_aware(shrinker) && sc->memcg;
> +	struct memcg_shrinker_deferred *deferred;
> +	struct mem_cgroup *memcg = sc->memcg;
> +	int nid = sc->nid;
> +	int id = shrinker->id;
> +	long nr;
> +
> +	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> +		nid = 0;
> +
> +	if (per_memcg_deferred) {
> +		deferred = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_deferred,
> +						     true);
> +		nr = atomic_long_xchg(&deferred->nr_deferred[id], 0);
> +	} else
> +		nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +
> +	return nr;
> +}
> +
> +static inline long set_nr_deferred(long nr, struct shrinker *shrinker,
> +				   struct shrink_control *sc)
> +{
> +	bool per_memcg_deferred = is_deferred_memcg_aware(shrinker) && sc->memcg;
> +	struct memcg_shrinker_deferred *deferred;
> +	struct mem_cgroup *memcg = sc->memcg;
> +	int nid = sc->nid;
> +	int id = shrinker->id;
> +	long new_nr;
> +
> +	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> +		nid = 0;
> +
> +	if (per_memcg_deferred) {
> +		deferred = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_deferred,
> +						     true);
> +		new_nr = atomic_long_add_return(nr, &deferred->nr_deferred[id]);
> +	} else
> +		new_nr = atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
> +
> +	return new_nr;
> +}
>  #else
> +static inline bool is_deferred_memcg_aware(struct shrinker *shrinker)
> +{
> +	return false;
> +}
> +
>  static int prealloc_memcg_shrinker(struct shrinker *shrinker)
>  {
>  	return 0;
> @@ -290,6 +347,29 @@ static bool writeback_throttling_sane(struct scan_control *sc)
>  {
>  	return true;
>  }
> +
> +static inline long count_nr_deferred(struct shrinker *shrinker,
> +				     struct shrink_control *sc)
> +{
> +	int nid = sc->nid;
> +
> +	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> +		nid = 0;
> +
> +	return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +}
> +
> +static inline long set_nr_deferred(long nr, struct shrinker *shrinker,
> +				   struct shrink_control *sc)
> +{
> +	int nid = sc->nid;
> +
> +	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> +		nid = 0;
> +
> +	return atomic_long_add_return(nr,
> +				      &shrinker->nr_deferred[nid]);
> +}
>  #endif
>  
>  /*
> @@ -429,13 +509,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>  	long freeable;
>  	long nr;
>  	long new_nr;
> -	int nid = shrinkctl->nid;
>  	long batch_size = shrinker->batch ? shrinker->batch
>  					  : SHRINK_BATCH;
>  	long scanned = 0, next_deferred;
>  
> -	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> -		nid = 0;
>  
>  	freeable = shrinker->count_objects(shrinker, shrinkctl);
>  	if (freeable == 0 || freeable == SHRINK_EMPTY)
> @@ -446,7 +523,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>  	 * and zero it so that other concurrent shrinker invocations
>  	 * don't also do this scanning work.
>  	 */
> -	nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +	nr = count_nr_deferred(shrinker, shrinkctl);
>  
>  	total_scan = nr;
>  	if (shrinker->seeks) {
> @@ -539,8 +616,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
>  	 * move the unused scan count back into the shrinker in a
>  	 * manner that handles concurrent updates.
>  	 */
> -	new_nr = atomic_long_add_return(next_deferred,
> -					&shrinker->nr_deferred[nid]);
> +	new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);

Ok, I think patch (1) can be just merged into this and then it would make total sense.

  reply	other threads:[~2020-12-03  3:10 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-02 18:27 [RFC PATCH 0/9] Make shrinker's nr_deferred memcg aware Yang Shi
2020-12-02 18:27 ` [PATCH 1/9] mm: vmscan: simplify nr_deferred update code Yang Shi
2020-12-03  2:56   ` Roman Gushchin
2020-12-02 18:27 ` [PATCH 2/9] mm: vmscan: use nid from shrink_control for tracepoint Yang Shi
2020-12-03  3:13   ` Xiaqing (A)
2020-12-11 19:20     ` Yang Shi
2020-12-11 19:20       ` Yang Shi
2020-12-02 18:27 ` [PATCH 3/9] mm: memcontrol: rename memcg_shrinker_map_mutex to memcg_shrinker_mutex Yang Shi
2020-12-02 18:27 ` [PATCH 4/9] mm: vmscan: use a new flag to indicate shrinker is registered Yang Shi
2020-12-03  3:01   ` Roman Gushchin
2020-12-03  4:59     ` Yang Shi
2020-12-03  4:59       ` Yang Shi
2020-12-03 20:08       ` Roman Gushchin
2020-12-03 22:25         ` Yang Shi
2020-12-03 22:25           ` Yang Shi
2020-12-04 18:52           ` Johannes Weiner
2020-12-04 21:24             ` Yang Shi
2020-12-04 21:24               ` Yang Shi
2020-12-02 18:27 ` [PATCH 5/9] mm: memcontrol: add per memcg shrinker nr_deferred Yang Shi
2020-12-03  3:06   ` Roman Gushchin
2020-12-03  4:54     ` Yang Shi
2020-12-03  4:54       ` Yang Shi
2020-12-03 18:03       ` Yang Shi
2020-12-03 18:03         ` Yang Shi
2020-12-03 20:07         ` Roman Gushchin
2020-12-03 22:49           ` Yang Shi
2020-12-03 22:49             ` Yang Shi
2020-12-03 23:30             ` Roman Gushchin
2020-12-04  0:22               ` Yang Shi
2020-12-04  0:22                 ` Yang Shi
2020-12-10 15:33   ` Johannes Weiner
2020-12-10 19:12     ` Yang Shi
2020-12-10 19:12       ` Yang Shi
2020-12-11 17:52       ` Yang Shi
2020-12-11 17:52         ` Yang Shi
2020-12-10 21:59     ` Yang Shi
2020-12-10 21:59       ` Yang Shi
2020-12-02 18:27 ` [PATCH 6/9] mm: vmscan: use per memcg nr_deferred of shrinker Yang Shi
2020-12-03  3:08   ` Roman Gushchin [this message]
2020-12-03  5:01     ` Yang Shi
2020-12-03  5:01       ` Yang Shi
2020-12-03 11:40   ` Kirill Tkhai
2020-12-08 17:13     ` Yang Shi
2020-12-08 17:13       ` Yang Shi
2020-12-09 15:41       ` Kirill Tkhai
2020-12-09 17:32         ` Yang Shi
2020-12-09 17:32           ` Yang Shi
2020-12-10 15:13           ` Johannes Weiner
2020-12-10 15:17             ` Kirill Tkhai
2020-12-15 16:44               ` Johannes Weiner
2020-12-02 18:27 ` [PATCH 7/9] mm: vmscan: don't need allocate shrinker->nr_deferred for memcg aware shrinkers Yang Shi
2020-12-02 18:27 ` [PATCH 8/9] mm: memcontrol: reparent nr_deferred when memcg offline Yang Shi
2020-12-02 18:27 ` [PATCH 9/9] mm: vmscan: shrink deferred objects proportional to priority Yang Shi
2020-12-03  2:52 ` [RFC PATCH 0/9] Make shrinker's nr_deferred memcg aware Roman Gushchin
2020-12-03 17:52   ` Yang Shi
2020-12-03 17:52     ` Yang Shi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201203030841.GH1375014@carbon.DHCP.thefacebook.com \
    --to=guro@fb.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@fromorbit.com \
    --cc=hannes@cmpxchg.org \
    --cc=ktkhai@virtuozzo.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.com \
    --cc=shakeelb@google.com \
    --cc=shy828301@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.