All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@suse.com>
To: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Tejun Heo <tj@kernel.org>, Roman Gushchin <guro@fb.com>,
	linux-mm@kvack.org, cgroups@vger.kernel.org,
	linux-kernel@vger.kernel.org, kernel-team@fb.com
Subject: Re: [PATCH 2/7] mm: memcontrol: kill mem_cgroup_nodeinfo()
Date: Thu, 4 Feb 2021 14:29:21 +0100	[thread overview]
Message-ID: <YBv2sYiMmPhYmW3h@dhcp22.suse.cz> (raw)
In-Reply-To: <20210202184746.119084-3-hannes@cmpxchg.org>

On Tue 02-02-21 13:47:41, Johannes Weiner wrote:
> No need to encapsulate a simple struct member access.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  include/linux/memcontrol.h |  8 +-------
>  mm/memcontrol.c            | 21 +++++++++++----------
>  2 files changed, 12 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 7a38a1517a05..c7f387a6233e 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -602,12 +602,6 @@ void mem_cgroup_uncharge_list(struct list_head *page_list);
>  
>  void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
>  
> -static struct mem_cgroup_per_node *
> -mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
> -{
> -	return memcg->nodeinfo[nid];
> -}
> -
>  /**
>   * mem_cgroup_lruvec - get the lru list vector for a memcg & node
>   * @memcg: memcg of the wanted lruvec
> @@ -631,7 +625,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
>  	if (!memcg)
>  		memcg = root_mem_cgroup;
>  
> -	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
> +	mz = memcg->nodeinfo[pgdat->node_id];
>  	lruvec = &mz->lruvec;
>  out:
>  	/*
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 8120d565dd79..7e05a4ebf80f 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -414,13 +414,14 @@ static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
>  					 int size, int old_size)
>  {
>  	struct memcg_shrinker_map *new, *old;
> +	struct mem_cgroup_per_node *pn;
>  	int nid;
>  
>  	lockdep_assert_held(&memcg_shrinker_map_mutex);
>  
>  	for_each_node(nid) {
> -		old = rcu_dereference_protected(
> -			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
> +		pn = memcg->nodeinfo[nid];
> +		old = rcu_dereference_protected(pn->shrinker_map, true);
>  		/* Not yet online memcg */
>  		if (!old)
>  			return 0;
> @@ -433,7 +434,7 @@ static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
>  		memset(new->map, (int)0xff, old_size);
>  		memset((void *)new->map + old_size, 0, size - old_size);
>  
> -		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
> +		rcu_assign_pointer(pn->shrinker_map, new);
>  		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
>  	}
>  
> @@ -450,7 +451,7 @@ static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
>  		return;
>  
>  	for_each_node(nid) {
> -		pn = mem_cgroup_nodeinfo(memcg, nid);
> +		pn = memcg->nodeinfo[nid];
>  		map = rcu_dereference_protected(pn->shrinker_map, true);
>  		kvfree(map);
>  		rcu_assign_pointer(pn->shrinker_map, NULL);
> @@ -713,7 +714,7 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
>  	int nid;
>  
>  	for_each_node(nid) {
> -		mz = mem_cgroup_nodeinfo(memcg, nid);
> +		mz = memcg->nodeinfo[nid];
>  		mctz = soft_limit_tree_node(nid);
>  		if (mctz)
>  			mem_cgroup_remove_exceeded(mz, mctz);
> @@ -796,7 +797,7 @@ parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
>  	parent = parent_mem_cgroup(pn->memcg);
>  	if (!parent)
>  		return NULL;
> -	return mem_cgroup_nodeinfo(parent, nid);
> +	return parent->nodeinfo[nid];
>  }
>  
>  void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
> @@ -1163,7 +1164,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
>  	if (reclaim) {
>  		struct mem_cgroup_per_node *mz;
>  
> -		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
> +		mz = root->nodeinfo[reclaim->pgdat->node_id];
>  		iter = &mz->iter;
>  
>  		if (prev && reclaim->generation != iter->generation)
> @@ -1265,7 +1266,7 @@ static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
>  	int nid;
>  
>  	for_each_node(nid) {
> -		mz = mem_cgroup_nodeinfo(from, nid);
> +		mz = from->nodeinfo[nid];
>  		iter = &mz->iter;
>  		cmpxchg(&iter->position, dead_memcg, NULL);
>  	}
> @@ -2438,7 +2439,7 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
>  				struct mem_cgroup_per_node *pn;
>  				long x;
>  
> -				pn = mem_cgroup_nodeinfo(memcg, nid);
> +				pn = memcg->nodeinfo[nid];
>  				lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
>  
>  				x = lstatc->count[i];
> @@ -4145,7 +4146,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  		unsigned long file_cost = 0;
>  
>  		for_each_online_pgdat(pgdat) {
> -			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
> +			mz = memcg->nodeinfo[pgdat->node_id];
>  
>  			anon_cost += mz->lruvec.anon_cost;
>  			file_cost += mz->lruvec.file_cost;
> -- 
> 2.30.0
> 

-- 
Michal Hocko
SUSE Labs

WARNING: multiple messages have this Message-ID (diff)
From: Michal Hocko <mhocko-IBi9RG/b67k@public.gmane.org>
To: Johannes Weiner <hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>
Cc: Andrew Morton
	<akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>,
	Tejun Heo <tj-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	Roman Gushchin <guro-b10kYP2dOMg@public.gmane.org>,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
	cgroups-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	kernel-team-b10kYP2dOMg@public.gmane.org
Subject: Re: [PATCH 2/7] mm: memcontrol: kill mem_cgroup_nodeinfo()
Date: Thu, 4 Feb 2021 14:29:21 +0100	[thread overview]
Message-ID: <YBv2sYiMmPhYmW3h@dhcp22.suse.cz> (raw)
In-Reply-To: <20210202184746.119084-3-hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>

On Tue 02-02-21 13:47:41, Johannes Weiner wrote:
> No need to encapsulate a simple struct member access.
> 
> Signed-off-by: Johannes Weiner <hannes-druUgvl0LCNAfugRpC6u6w@public.gmane.org>

Acked-by: Michal Hocko <mhocko-IBi9RG/b67k@public.gmane.org>

> ---
>  include/linux/memcontrol.h |  8 +-------
>  mm/memcontrol.c            | 21 +++++++++++----------
>  2 files changed, 12 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 7a38a1517a05..c7f387a6233e 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -602,12 +602,6 @@ void mem_cgroup_uncharge_list(struct list_head *page_list);
>  
>  void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
>  
> -static struct mem_cgroup_per_node *
> -mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
> -{
> -	return memcg->nodeinfo[nid];
> -}
> -
>  /**
>   * mem_cgroup_lruvec - get the lru list vector for a memcg & node
>   * @memcg: memcg of the wanted lruvec
> @@ -631,7 +625,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
>  	if (!memcg)
>  		memcg = root_mem_cgroup;
>  
> -	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
> +	mz = memcg->nodeinfo[pgdat->node_id];
>  	lruvec = &mz->lruvec;
>  out:
>  	/*
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 8120d565dd79..7e05a4ebf80f 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -414,13 +414,14 @@ static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
>  					 int size, int old_size)
>  {
>  	struct memcg_shrinker_map *new, *old;
> +	struct mem_cgroup_per_node *pn;
>  	int nid;
>  
>  	lockdep_assert_held(&memcg_shrinker_map_mutex);
>  
>  	for_each_node(nid) {
> -		old = rcu_dereference_protected(
> -			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
> +		pn = memcg->nodeinfo[nid];
> +		old = rcu_dereference_protected(pn->shrinker_map, true);
>  		/* Not yet online memcg */
>  		if (!old)
>  			return 0;
> @@ -433,7 +434,7 @@ static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
>  		memset(new->map, (int)0xff, old_size);
>  		memset((void *)new->map + old_size, 0, size - old_size);
>  
> -		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
> +		rcu_assign_pointer(pn->shrinker_map, new);
>  		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
>  	}
>  
> @@ -450,7 +451,7 @@ static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
>  		return;
>  
>  	for_each_node(nid) {
> -		pn = mem_cgroup_nodeinfo(memcg, nid);
> +		pn = memcg->nodeinfo[nid];
>  		map = rcu_dereference_protected(pn->shrinker_map, true);
>  		kvfree(map);
>  		rcu_assign_pointer(pn->shrinker_map, NULL);
> @@ -713,7 +714,7 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
>  	int nid;
>  
>  	for_each_node(nid) {
> -		mz = mem_cgroup_nodeinfo(memcg, nid);
> +		mz = memcg->nodeinfo[nid];
>  		mctz = soft_limit_tree_node(nid);
>  		if (mctz)
>  			mem_cgroup_remove_exceeded(mz, mctz);
> @@ -796,7 +797,7 @@ parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
>  	parent = parent_mem_cgroup(pn->memcg);
>  	if (!parent)
>  		return NULL;
> -	return mem_cgroup_nodeinfo(parent, nid);
> +	return parent->nodeinfo[nid];
>  }
>  
>  void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
> @@ -1163,7 +1164,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
>  	if (reclaim) {
>  		struct mem_cgroup_per_node *mz;
>  
> -		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
> +		mz = root->nodeinfo[reclaim->pgdat->node_id];
>  		iter = &mz->iter;
>  
>  		if (prev && reclaim->generation != iter->generation)
> @@ -1265,7 +1266,7 @@ static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
>  	int nid;
>  
>  	for_each_node(nid) {
> -		mz = mem_cgroup_nodeinfo(from, nid);
> +		mz = from->nodeinfo[nid];
>  		iter = &mz->iter;
>  		cmpxchg(&iter->position, dead_memcg, NULL);
>  	}
> @@ -2438,7 +2439,7 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
>  				struct mem_cgroup_per_node *pn;
>  				long x;
>  
> -				pn = mem_cgroup_nodeinfo(memcg, nid);
> +				pn = memcg->nodeinfo[nid];
>  				lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
>  
>  				x = lstatc->count[i];
> @@ -4145,7 +4146,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  		unsigned long file_cost = 0;
>  
>  		for_each_online_pgdat(pgdat) {
> -			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
> +			mz = memcg->nodeinfo[pgdat->node_id];
>  
>  			anon_cost += mz->lruvec.anon_cost;
>  			file_cost += mz->lruvec.file_cost;
> -- 
> 2.30.0
> 

-- 
Michal Hocko
SUSE Labs

  parent reply	other threads:[~2021-02-04 13:30 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-02 18:47 [PATCH 0/7]: mm: memcontrol: switch to rstat Johannes Weiner
2021-02-02 18:47 ` Johannes Weiner
2021-02-02 18:47 ` [PATCH 1/7] mm: memcontrol: fix cpuhotplug statistics flushing Johannes Weiner
2021-02-02 18:47   ` Johannes Weiner
2021-02-02 22:23   ` Shakeel Butt
2021-02-02 22:23     ` Shakeel Butt
2021-02-02 22:23     ` Shakeel Butt
2021-02-02 23:07   ` Roman Gushchin
2021-02-02 23:07     ` Roman Gushchin
2021-02-03  2:28     ` Roman Gushchin
2021-02-03  2:28       ` Roman Gushchin
2021-02-04 19:29       ` Johannes Weiner
2021-02-04 19:29         ` Johannes Weiner
2021-02-04 19:34         ` Roman Gushchin
2021-02-04 19:34           ` Roman Gushchin
2021-02-05 17:50           ` Johannes Weiner
2021-02-05 17:50             ` Johannes Weiner
2021-02-04 13:28   ` Michal Hocko
2021-02-04 13:28     ` Michal Hocko
2021-02-02 18:47 ` [PATCH 2/7] mm: memcontrol: kill mem_cgroup_nodeinfo() Johannes Weiner
2021-02-02 18:47   ` Johannes Weiner
2021-02-02 22:24   ` Shakeel Butt
2021-02-02 22:24     ` Shakeel Butt
2021-02-02 22:24     ` Shakeel Butt
2021-02-02 23:13   ` Roman Gushchin
2021-02-02 23:13     ` Roman Gushchin
2021-02-04 13:29   ` Michal Hocko [this message]
2021-02-04 13:29     ` Michal Hocko
2021-02-02 18:47 ` [PATCH 3/7] mm: memcontrol: privatize memcg_page_state query functions Johannes Weiner
2021-02-02 18:47   ` Johannes Weiner
2021-02-02 22:26   ` Shakeel Butt
2021-02-02 22:26     ` Shakeel Butt
2021-02-02 23:17   ` Roman Gushchin
2021-02-02 23:17     ` Roman Gushchin
2021-02-04 13:30   ` Michal Hocko
2021-02-04 13:30     ` Michal Hocko
2021-02-02 18:47 ` [PATCH 4/7] cgroup: rstat: support cgroup1 Johannes Weiner
2021-02-02 18:47   ` Johannes Weiner
2021-02-03  1:16   ` Roman Gushchin
2021-02-03  1:16     ` Roman Gushchin
2021-02-04 13:39   ` Michal Hocko
2021-02-04 13:39     ` Michal Hocko
2021-02-04 16:01     ` Johannes Weiner
2021-02-04 16:01       ` Johannes Weiner
2021-02-04 16:42       ` Michal Hocko
2021-02-04 16:42         ` Michal Hocko
2021-02-02 18:47 ` [PATCH 5/7] cgroup: rstat: punt root-level optimization to individual controllers Johannes Weiner
2021-02-02 18:47   ` Johannes Weiner
2021-02-02 18:47 ` [PATCH 6/7] mm: memcontrol: switch to rstat Johannes Weiner
2021-02-02 18:47   ` Johannes Weiner
2021-02-03  1:47   ` Roman Gushchin
2021-02-03  1:47     ` Roman Gushchin
2021-02-04 16:26     ` Johannes Weiner
2021-02-04 16:26       ` Johannes Weiner
2021-02-04 18:45       ` Roman Gushchin
2021-02-04 18:45         ` Roman Gushchin
2021-02-04 20:05         ` Johannes Weiner
2021-02-04 20:05           ` Johannes Weiner
2021-02-04 14:19   ` Michal Hocko
2021-02-04 14:19     ` Michal Hocko
2021-02-04 16:15     ` Johannes Weiner
2021-02-04 16:44       ` Michal Hocko
2021-02-04 16:44         ` Michal Hocko
2021-02-04 20:28         ` Johannes Weiner
2021-02-04 20:28           ` Johannes Weiner
2021-02-05 15:05   ` Michal Hocko
2021-02-05 15:05     ` Michal Hocko
2021-02-05 16:34     ` Johannes Weiner
2021-02-05 16:34       ` Johannes Weiner
2021-02-08 14:07       ` Michal Hocko
2021-02-08 14:07         ` Michal Hocko
2021-02-02 18:47 ` [PATCH 7/7] mm: memcontrol: consolidate lruvec stat flushing Johannes Weiner
2021-02-03  2:25   ` Roman Gushchin
2021-02-03  2:25     ` Roman Gushchin
2021-02-04 21:44     ` Johannes Weiner
2021-02-04 21:44       ` Johannes Weiner
2021-02-04 21:47       ` Roman Gushchin
2021-02-04 21:47         ` Roman Gushchin
2021-02-05 15:17   ` Michal Hocko
2021-02-05 15:17     ` Michal Hocko
2021-02-05 17:10     ` Johannes Weiner
2021-02-05 17:10       ` Johannes Weiner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YBv2sYiMmPhYmW3h@dhcp22.suse.cz \
    --to=mhocko@suse.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=kernel-team@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=tj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.