All of lore.kernel.org
 help / color / mirror / Atom feed
From: Qi Zheng <zhengqi.arch@bytedance.com>
To: Kirill Tkhai <tkhai@ya.ru>,
	akpm@linux-foundation.org, hannes@cmpxchg.org,
	shakeelb@google.com, mhocko@kernel.org, roman.gushchin@linux.dev,
	muchun.song@linux.dev, david@redhat.com, shy828301@gmail.com,
	rppt@kernel.org
Cc: sultan@kerneltoast.com, dave@stgolabs.net,
	penguin-kernel@I-love.SAKURA.ne.jp, paulmck@kernel.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v4 1/8] mm: vmscan: add a map_nr_max field to shrinker_info
Date: Thu, 9 Mar 2023 14:33:05 +0800	[thread overview]
Message-ID: <b9721532-e7d7-7586-a0da-79ffe519d5f0@bytedance.com> (raw)
In-Reply-To: <eab519de-1222-b097-9eb4-28a444458c28@ya.ru>



On 2023/3/9 06:13, Kirill Tkhai wrote:
> Hi,
> 
> On 07.03.2023 09:55, Qi Zheng wrote:
>> To prepare for the subsequent lockless memcg slab shrink,
>> add a map_nr_max field to struct shrinker_info to records
>> its own real shrinker_nr_max.
>>
>> Suggested-by: Kirill Tkhai <tkhai@ya.ru>
>> Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
>> ---
>>   include/linux/memcontrol.h |  1 +
>>   mm/vmscan.c                | 41 ++++++++++++++++++++++----------------
>>   2 files changed, 25 insertions(+), 17 deletions(-)
>>
>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>> index b6eda2ab205d..aa69ea98e2d8 100644
>> --- a/include/linux/memcontrol.h
>> +++ b/include/linux/memcontrol.h
>> @@ -97,6 +97,7 @@ struct shrinker_info {
>>   	struct rcu_head rcu;
>>   	atomic_long_t *nr_deferred;
>>   	unsigned long *map;
>> +	int map_nr_max;
>>   };
>>   
>>   struct lruvec_stats_percpu {
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index 9414226218f0..2dcc01682026 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -224,9 +224,16 @@ static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
>>   					 lockdep_is_held(&shrinker_rwsem));
>>   }
>>   
>> +static inline bool need_expand(int new_nr_max, int old_nr_max)
>> +{
>> +	return round_up(new_nr_max, BITS_PER_LONG) >
>> +	       round_up(old_nr_max, BITS_PER_LONG);
>> +}
>> +
>>   static int expand_one_shrinker_info(struct mem_cgroup *memcg,
>>   				    int map_size, int defer_size,
>> -				    int old_map_size, int old_defer_size)
>> +				    int old_map_size, int old_defer_size,
>> +				    int new_nr_max)
>>   {
>>   	struct shrinker_info *new, *old;
>>   	struct mem_cgroup_per_node *pn;
>> @@ -240,12 +247,17 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
>>   		if (!old)
>>   			return 0;
>>   
>> +		/* Already expanded this shrinker_info */
>> +		if (!need_expand(new_nr_max, old->map_nr_max))
> 
> need_expand() looks confusing here. It's strange that we round_up(old->map_nr_max),
> despite old->map never may exceed old->map_nr_max.
> 
> Won't plain
> 
> 	if (new_nr_max <= old->map_nr_max)
> 
> look clearer here?

Yeah, will change to it.

> 
> The rest in patch looks OK for me.
> 
>> +			continue;
>> +
>>   		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
>>   		if (!new)
>>   			return -ENOMEM;
>>   
>>   		new->nr_deferred = (atomic_long_t *)(new + 1);
>>   		new->map = (void *)new->nr_deferred + defer_size;
>> +		new->map_nr_max = new_nr_max;
>>   
>>   		/* map: set all old bits, clear all new bits */
>>   		memset(new->map, (int)0xff, old_map_size);
>> @@ -295,6 +307,7 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
>>   		}
>>   		info->nr_deferred = (atomic_long_t *)(info + 1);
>>   		info->map = (void *)info->nr_deferred + defer_size;
>> +		info->map_nr_max = shrinker_nr_max;
>>   		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
>>   	}
>>   	up_write(&shrinker_rwsem);
>> @@ -302,23 +315,14 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
>>   	return ret;
>>   }
>>   
>> -static inline bool need_expand(int nr_max)
>> -{
>> -	return round_up(nr_max, BITS_PER_LONG) >
>> -	       round_up(shrinker_nr_max, BITS_PER_LONG);
>> -}
>> -
>>   static int expand_shrinker_info(int new_id)
>>   {
>>   	int ret = 0;
>> -	int new_nr_max = new_id + 1;
>> +	int new_nr_max = round_up(new_id + 1, BITS_PER_LONG);
>>   	int map_size, defer_size = 0;
>>   	int old_map_size, old_defer_size = 0;
>>   	struct mem_cgroup *memcg;
>>   
>> -	if (!need_expand(new_nr_max))
>> -		goto out;
>> -
>>   	if (!root_mem_cgroup)
>>   		goto out;
>>   
>> @@ -332,7 +336,8 @@ static int expand_shrinker_info(int new_id)
>>   	memcg = mem_cgroup_iter(NULL, NULL, NULL);
>>   	do {
>>   		ret = expand_one_shrinker_info(memcg, map_size, defer_size,
>> -					       old_map_size, old_defer_size);
>> +					       old_map_size, old_defer_size,
>> +					       new_nr_max);
>>   		if (ret) {
>>   			mem_cgroup_iter_break(NULL, memcg);
>>   			goto out;
>> @@ -352,9 +357,11 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
>>   
>>   		rcu_read_lock();
>>   		info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
>> -		/* Pairs with smp mb in shrink_slab() */
>> -		smp_mb__before_atomic();
>> -		set_bit(shrinker_id, info->map);
>> +		if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) {
>> +			/* Pairs with smp mb in shrink_slab() */
>> +			smp_mb__before_atomic();
>> +			set_bit(shrinker_id, info->map);
>> +		}
>>   		rcu_read_unlock();
>>   	}
>>   }
>> @@ -432,7 +439,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
>>   	for_each_node(nid) {
>>   		child_info = shrinker_info_protected(memcg, nid);
>>   		parent_info = shrinker_info_protected(parent, nid);
>> -		for (i = 0; i < shrinker_nr_max; i++) {
>> +		for (i = 0; i < child_info->map_nr_max; i++) {
>>   			nr = atomic_long_read(&child_info->nr_deferred[i]);
>>   			atomic_long_add(nr, &parent_info->nr_deferred[i]);
>>   		}
>> @@ -899,7 +906,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
>>   	if (unlikely(!info))
>>   		goto unlock;
>>   
>> -	for_each_set_bit(i, info->map, shrinker_nr_max) {
>> +	for_each_set_bit(i, info->map, info->map_nr_max) {
>>   		struct shrink_control sc = {
>>   			.gfp_mask = gfp_mask,
>>   			.nid = nid,
> 

-- 
Thanks,
Qi

  reply	other threads:[~2023-03-09  6:33 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-07  6:55 [PATCH v4 0/8] make slab shrink lockless Qi Zheng
2023-03-07  6:55 ` [PATCH v4 1/8] mm: vmscan: add a map_nr_max field to shrinker_info Qi Zheng
2023-03-08 14:40   ` Vlastimil Babka
2023-03-08 22:13   ` Kirill Tkhai
2023-03-09  6:33     ` Qi Zheng [this message]
2023-03-07  6:55 ` [PATCH v4 2/8] mm: vmscan: make global slab shrink lockless Qi Zheng
2023-03-08 15:02   ` Vlastimil Babka
2023-03-08 22:18   ` Kirill Tkhai
2023-03-07  6:56 ` [PATCH v4 3/8] mm: vmscan: make memcg " Qi Zheng
2023-03-08 22:23   ` Kirill Tkhai
2023-03-08 22:46   ` Vlastimil Babka
2023-03-09  6:47     ` Qi Zheng
2023-03-07  6:56 ` [PATCH v4 4/8] mm: vmscan: add shrinker_srcu_generation Qi Zheng
2023-03-09  9:23   ` Vlastimil Babka
2023-03-09 10:12     ` Qi Zheng
2023-03-07  6:56 ` [PATCH v4 5/8] mm: shrinkers: make count and scan in shrinker debugfs lockless Qi Zheng
2023-03-09  9:36   ` Vlastimil Babka
2023-03-09  9:39   ` Vlastimil Babka
2023-03-09 10:14     ` Qi Zheng
2023-03-09 19:30   ` Kirill Tkhai
2023-03-07  6:56 ` [PATCH v4 6/8] mm: vmscan: hold write lock to reparent shrinker nr_deferred Qi Zheng
2023-03-09  9:36   ` Vlastimil Babka
2023-03-09 19:32   ` Kirill Tkhai
2023-03-07  6:56 ` [PATCH v4 7/8] mm: vmscan: remove shrinker_rwsem from synchronize_shrinkers() Qi Zheng
2023-03-08 22:39   ` Kirill Tkhai
2023-03-09  7:06     ` Qi Zheng
2023-03-09  8:11       ` Christian König
2023-03-09  8:32         ` Qi Zheng
2023-03-09 19:34           ` Kirill Tkhai
2023-03-09  9:40   ` Vlastimil Babka
2023-03-09 19:34   ` Kirill Tkhai
2023-03-07  6:56 ` [PATCH v4 8/8] mm: shrinkers: convert shrinker_rwsem to mutex Qi Zheng
2023-03-09  9:42   ` Vlastimil Babka
2023-03-09 19:49   ` Kirill Tkhai
2023-03-07 22:20 ` [PATCH v4 0/8] make slab shrink lockless Andrew Morton
2023-03-08 11:59   ` Qi Zheng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b9721532-e7d7-7586-a0da-79ffe519d5f0@bytedance.com \
    --to=zhengqi.arch@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=muchun.song@linux.dev \
    --cc=paulmck@kernel.org \
    --cc=penguin-kernel@I-love.SAKURA.ne.jp \
    --cc=roman.gushchin@linux.dev \
    --cc=rppt@kernel.org \
    --cc=shakeelb@google.com \
    --cc=shy828301@gmail.com \
    --cc=sultan@kerneltoast.com \
    --cc=tkhai@ya.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.